repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
nitzmahone/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/google/gcp_healthcheck.py
|
48
|
#!/usr/bin/python
# Copyright 2017 Google Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_healthcheck
version_added: "2.4"
short_description: Create, Update or Destroy a Healthcheck.
description:
- Create, Update or Destroy a Healthcheck. Currently only HTTP and
HTTPS Healthchecks are supported. Healthchecks are used to monitor
individual instances, managed instance groups and/or backend
services. Healtchecks are reusable.
- Visit
U(https://cloud.google.com/compute/docs/load-balancing/health-checks)
for an overview of Healthchecks on GCP.
- See
U(https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks) for
API details on HTTP Healthchecks.
- See
U(https://cloud.google.com/compute/docs/reference/latest/httpsHealthChecks)
for more details on the HTTPS Healtcheck API.
requirements:
- "python >= 2.6"
- "google-api-python-client >= 1.6.2"
- "google-auth >= 0.9.0"
- "google-auth-httplib2 >= 0.0.2"
notes:
- Only supports HTTP and HTTPS Healthchecks currently.
author:
- "Tom Melendez (@supertom) <tom@supertom.com>"
options:
check_interval:
description:
- How often (in seconds) to send a health check.
default: 5
healthcheck_name:
description:
- Name of the Healthcheck.
required: true
healthcheck_type:
description:
- Type of Healthcheck.
required: true
choices: ["HTTP", "HTTPS"]
host_header:
description:
- The value of the host header in the health check request. If left
empty, the public IP on behalf of which this health
check is performed will be used.
required: true
default: ""
port:
description:
- The TCP port number for the health check request. The default value is
443 for HTTPS and 80 for HTTP.
request_path:
description:
- The request path of the HTTPS health check request.
required: false
default: "/"
state:
description: State of the Healthcheck.
required: true
choices: ["present", "absent"]
timeout:
description:
- How long (in seconds) to wait for a response before claiming
failure. It is invalid for timeout
to have a greater value than check_interval.
default: 5
unhealthy_threshold:
description:
- A so-far healthy instance will be marked unhealthy after this
many consecutive failures.
default: 2
healthy_threshold:
description:
- A so-far unhealthy instance will be marked healthy after this
many consecutive successes.
default: 2
service_account_email:
description:
- service account email
service_account_permissions:
version_added: "2.0"
description:
- service account permissions (see
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
--scopes section for detailed information)
choices: [
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
"useraccounts-ro", "useraccounts-rw", "datastore", "logging-write",
"monitoring", "sql-admin", "storage-full", "storage-ro",
"storage-rw", "taskqueue", "userinfo-email"
]
credentials_file:
description:
- Path to the JSON file associated with the service account email
project_id:
description:
- Your GCP project ID
'''
EXAMPLES = '''
- name: Create Minimum HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: my-healthcheck
healthcheck_type: HTTP
state: present
- name: Create HTTP HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: my-healthcheck
healthcheck_type: HTTP
host: my-host
request_path: /hc
check_interval: 10
timeout: 30
unhealthy_threshhold: 2
healthy_threshhold: 1
state: present
- name: Create HTTPS HealthCheck
gcp_healthcheck:
service_account_email: "{{ service_account_email }}"
credentials_file: "{{ credentials_file }}"
project_id: "{{ project_id }}"
healthcheck_name: "{{ https_healthcheck }}"
healthcheck_type: HTTPS
host_header: my-host
request_path: /hc
check_interval: 5
timeout: 5
unhealthy_threshold: 2
healthy_threshold: 1
state: present
'''
RETURN = '''
state:
description: state of the Healthcheck
returned: Always.
type: str
sample: present
healthcheck_name:
description: Name of the Healthcheck
returned: Always
type: str
sample: my-url-map
healthcheck_type:
description: Type of the Healthcheck
returned: Always
type: str
sample: HTTP
healthcheck:
description: GCP Healthcheck dictionary
returned: Always. Refer to GCP documentation for detailed field descriptions.
type: dict
sample: { "name": "my-hc", "port": 443, "requestPath": "/foo" }
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gcp import get_google_api_client, GCPUtils
USER_AGENT_PRODUCT = 'ansible-healthcheck'
USER_AGENT_VERSION = '0.0.1'
def _validate_healthcheck_params(params):
"""
Validate healthcheck params.
Simple validation has already assumed by AnsibleModule.
:param params: Ansible dictionary containing configuration.
:type params: ``dict``
:return: True or raises ValueError
:rtype: ``bool`` or `class:ValueError`
"""
if params['timeout'] > params['check_interval']:
raise ValueError("timeout (%s) is greater than check_interval (%s)" % (
params['timeout'], params['check_interval']))
return (True, '')
def _build_healthcheck_dict(params):
"""
Reformat services in Ansible Params for GCP.
:param params: Params from AnsibleModule object
:type params: ``dict``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: dictionary suitable for submission to GCP
HealthCheck (HTTP/HTTPS) API.
:rtype ``dict``
"""
gcp_dict = GCPUtils.params_to_gcp_dict(params, 'healthcheck_name')
if 'timeout' in gcp_dict:
gcp_dict['timeoutSec'] = gcp_dict['timeout']
del gcp_dict['timeout']
if 'checkInterval' in gcp_dict:
gcp_dict['checkIntervalSec'] = gcp_dict['checkInterval']
del gcp_dict['checkInterval']
if 'hostHeader' in gcp_dict:
gcp_dict['host'] = gcp_dict['hostHeader']
del gcp_dict['hostHeader']
if 'healthcheckType' in gcp_dict:
del gcp_dict['healthcheckType']
return gcp_dict
def _get_req_resource(client, resource_type):
if resource_type == 'HTTPS':
return (client.httpsHealthChecks(), 'httpsHealthCheck')
else:
return (client.httpHealthChecks(), 'httpHealthCheck')
def get_healthcheck(client, name, project_id=None, resource_type='HTTP'):
"""
Get a Healthcheck from GCP.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: A dict resp from the respective GCP 'get' request.
:rtype: ``dict``
"""
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name}
req = resource.get(**args)
return GCPUtils.execute_api_client_req(req, raise_404=False)
except:
raise
def create_healthcheck(client, params, project_id, resource_type='HTTP'):
"""
Create a new Healthcheck.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param params: Dictionary of arguments from AnsibleModule.
:type params: ``dict``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
gcp_dict = _build_healthcheck_dict(params)
try:
resource, _ = _get_req_resource(client, resource_type)
args = {'project': project_id, 'body': gcp_dict}
req = resource.insert(**args)
return_data = GCPUtils.execute_api_client_req(req, client, raw=False)
if not return_data:
return_data = get_healthcheck(client,
name=params['healthcheck_name'],
project_id=project_id)
return (True, return_data)
except:
raise
def delete_healthcheck(client, name, project_id, resource_type='HTTP'):
"""
Delete a Healthcheck.
:param client: An initialized GCE Compute Disover resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name}
req = resource.delete(**args)
return_data = GCPUtils.execute_api_client_req(req, client)
return (True, return_data)
except:
raise
def update_healthcheck(client, healthcheck, params, name, project_id,
resource_type='HTTP'):
"""
Update a Healthcheck.
If the healthcheck has not changed, the update will not occur.
:param client: An initialized GCE Compute Disovery resource.
:type client: :class: `googleapiclient.discovery.Resource`
:param healthcheck: Name of the Url Map.
:type healthcheck: ``dict``
:param params: Dictionary of arguments from AnsibleModule.
:type params: ``dict``
:param name: Name of the Url Map.
:type name: ``str``
:param project_id: The GCP project ID.
:type project_id: ``str``
:return: Tuple with changed status and response dict
:rtype: ``tuple`` in the format of (bool, dict)
"""
gcp_dict = _build_healthcheck_dict(params)
ans = GCPUtils.are_params_equal(healthcheck, gcp_dict)
if ans:
return (False, 'no update necessary')
try:
resource, entity_name = _get_req_resource(client, resource_type)
args = {'project': project_id, entity_name: name, 'body': gcp_dict}
req = resource.update(**args)
return_data = GCPUtils.execute_api_client_req(
req, client=client, raw=False)
return (True, return_data)
except:
raise
def main():
module = AnsibleModule(argument_spec=dict(
healthcheck_name=dict(required=True),
healthcheck_type=dict(required=True,
choices=['HTTP', 'HTTPS']),
request_path=dict(required=False, default='/'),
check_interval=dict(required=False, type='int', default=5),
healthy_threshold=dict(required=False, type='int', default=2),
unhealthy_threshold=dict(required=False, type='int', default=2),
host_header=dict(required=False, type='str', default=''),
timeout=dict(required=False, type='int', default=5),
port=dict(required=False, type='int'),
state=dict(choices=['absent', 'present'], default='present'),
service_account_email=dict(),
service_account_permissions=dict(type='list'),
credentials_file=dict(),
project_id=dict(), ), )
client, conn_params = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
user_agent_version=USER_AGENT_VERSION)
params = {}
params['healthcheck_name'] = module.params.get('healthcheck_name')
params['healthcheck_type'] = module.params.get('healthcheck_type')
params['request_path'] = module.params.get('request_path')
params['check_interval'] = module.params.get('check_interval')
params['healthy_threshold'] = module.params.get('healthy_threshold')
params['unhealthy_threshold'] = module.params.get('unhealthy_threshold')
params['host_header'] = module.params.get('host_header')
params['timeout'] = module.params.get('timeout')
params['port'] = module.params.get('port', None)
params['state'] = module.params.get('state')
if not params['port']:
params['port'] = 80
if params['healthcheck_type'] == 'HTTPS':
params['port'] = 443
try:
_validate_healthcheck_params(params)
except Exception as e:
module.fail_json(msg=e.message, changed=False)
changed = False
json_output = {'state': params['state']}
healthcheck = get_healthcheck(client,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
if not healthcheck:
if params['state'] == 'absent':
# Doesn't exist in GCE, and state==absent.
changed = False
module.fail_json(
msg="Cannot delete unknown healthcheck: %s" %
(params['healthcheck_name']))
else:
# Create
changed, json_output['healthcheck'] = create_healthcheck(client,
params=params,
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
elif params['state'] == 'absent':
# Delete
changed, json_output['healthcheck'] = delete_healthcheck(client,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
else:
changed, json_output['healthcheck'] = update_healthcheck(client,
healthcheck=healthcheck,
params=params,
name=params['healthcheck_name'],
project_id=conn_params['project_id'],
resource_type=params['healthcheck_type'])
json_output['changed'] = changed
json_output.update(params)
module.exit_json(**json_output)
if __name__ == '__main__':
main()
|
adamtiger/tensorflow
|
refs/heads/master
|
tensorflow/python/estimator/canned/dnn_test.py
|
31
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.estimator.canned import dnn
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.estimator.inputs import pandas_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import queue_runner
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
def _dnn_classifier_fn(*args, **kwargs):
return dnn.DNNClassifier(*args, **kwargs)
class DNNModelFnTest(dnn_testing_utils.BaseDNNModelFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNModelFnTest.__init__(self, dnn._dnn_model_fn)
class DNNLogitFnTest(dnn_testing_utils.BaseDNNLogitFnTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNLogitFnTest.__init__(self,
dnn._dnn_logit_fn_builder)
class DNNClassifierEvaluateTest(
dnn_testing_utils.BaseDNNClassifierEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierEvaluateTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierPredictTest(
dnn_testing_utils.BaseDNNClassifierPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierPredictTest.__init__(
self, _dnn_classifier_fn)
class DNNClassifierTrainTest(
dnn_testing_utils.BaseDNNClassifierTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNClassifierTrainTest.__init__(
self, _dnn_classifier_fn)
def _dnn_regressor_fn(*args, **kwargs):
return dnn.DNNRegressor(*args, **kwargs)
class DNNRegressorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_regressor_fn)
class DNNRegressorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_regressor_fn)
def _queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = data_flow_ops.FIFOQueue(capacity=100, dtypes=queue_dtypes)
queue_runner.add_queue_runner(
queue_runner.QueueRunner(
input_queue,
[input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
class DNNRegressorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNRegressor(
hidden_units=(2, 2),
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
label_dimension = 1
batch_size = 10
data = np.linspace(0., 2., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y': feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([label_dimension], dtypes.float32),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
class DNNClassifierIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _as_label(self, data_in_float):
return np.rint(data_in_float).astype(np.int64)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
n_classes, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNClassifier(
hidden_units=(2, 2),
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predicted_proba = np.array([
x[prediction_keys.PredictionKeys.PROBABILITIES]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, n_classes), predicted_proba.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
n_classes = 3
input_dimension = 2
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
x_data = data.reshape(batch_size, input_dimension)
y_data = np.reshape(self._as_label(data[:batch_size]), (batch_size, 1))
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
y=y_data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': x_data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
input_dimension = 1
n_classes = 3
batch_size = 10
data = np.linspace(0., n_classes - 1., batch_size, dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(self._as_label(data))
train_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x,
y=y,
batch_size=batch_size,
shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x,
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
n_classes = 3
batch_size = 10
data = np.linspace(
0., n_classes - 1., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(float_list=feature_pb2.FloatList(
value=datum)),
'y':
feature_pb2.Feature(int64_list=feature_pb2.Int64List(
value=self._as_label(datum[:1]))),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': parsing_ops.FixedLenFeature([input_dimension], dtypes.float32),
'y': parsing_ops.FixedLenFeature([1], dtypes.int64),
}
def _train_input_fn():
feature_map = parsing_ops.parse_example(serialized_examples, feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = parsing_ops.parse_example(
input_lib.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = _queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
n_classes=n_classes,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
|
lubegamark/senkumba
|
refs/heads/master
|
loans/migrations/0003_auto_20160721_1556.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-21 12:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('loans', '0002_auto_20160721_1546'),
]
operations = [
migrations.RenameField(
model_name='loan',
old_name='interest',
new_name='interest_rate',
),
migrations.RemoveField(
model_name='interesttype',
name='period',
),
migrations.AddField(
model_name='interesttype',
name='days',
field=models.IntegerField(default=1),
preserve_default=False,
),
migrations.DeleteModel(
name='Period',
),
]
|
JayVora-SerpentCS/e-commerce
|
refs/heads/9.0
|
website_sale_require_login/__openerp__.py
|
1
|
# -*- coding: utf-8 -*-
# © 2015 Antiun Ingeniería, S.L. - Jairo Llopis
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Require login to checkout",
"summary": "Force users to login for buying",
"version": "9.0.1.0.0",
"category": "Website",
"website": "http://www.antiun.com",
"author": "Tecnativa, "
"LasLabs, "
"Odoo Community Association (OCA)",
"license": "AGPL-3",
"installable": True,
"depends": [
"website_sale_suggest_create_account",
],
"data": [
"views/assets.xml",
"views/website_sale.xml",
],
}
|
munyirik/python
|
refs/heads/develop
|
cpython/Lib/unittest/test/testmock/testpatch.py
|
8
|
# Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import os
import sys
import unittest
from unittest.test.testmock import support
from unittest.test.testmock.support import SomeClass, is_instance
from unittest.mock import (
NonCallableMock, CallableMixin, patch, sentinel,
MagicMock, Mock, NonCallableMagicMock, patch, _patch,
DEFAULT, call, _get_target, _patch
)
builtin_string = 'builtins'
PTModule = sys.modules[__name__]
MODNAME = '%s.PTModule' % __name__
def _get_proxy(obj, get_only=True):
class Proxy(object):
def __getattr__(self, name):
return getattr(obj, name)
if not get_only:
def __setattr__(self, name, value):
setattr(obj, name, value)
def __delattr__(self, name):
delattr(obj, name)
Proxy.__setattr__ = __setattr__
Proxy.__delattr__ = __delattr__
return Proxy()
# for use in the test
something = sentinel.Something
something_else = sentinel.SomethingElse
class Foo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
foo_name = '%s.Foo' % __name__
def function(a, b=Foo):
pass
class Container(object):
def __init__(self):
self.values = {}
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
self.values[name] = value
def __delitem__(self, name):
del self.values[name]
def __iter__(self):
return iter(self.values)
class PatchTest(unittest.TestCase):
def assertNotCallable(self, obj, magic=True):
MockClass = NonCallableMagicMock
if not magic:
MockClass = NonCallableMock
self.assertRaises(TypeError, obj)
self.assertTrue(is_instance(obj, MockClass))
self.assertFalse(is_instance(obj, CallableMixin))
def test_single_patchobject(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patchobject_with_none(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', None)
def test():
self.assertIsNone(Something.attribute, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_multiple_patchobject(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'next_attribute', sentinel.Patched2)
def test():
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
self.assertEqual(Something.next_attribute, sentinel.Patched2,
"unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(Something.next_attribute, sentinel.Original2,
"patch not restored")
def test_object_lookup_is_quite_lazy(self):
global something
original = something
@patch('%s.something' % __name__, sentinel.Something2)
def test():
pass
try:
something = sentinel.replacement_value
test()
self.assertEqual(something, sentinel.replacement_value)
finally:
something = original
def test_patch(self):
@patch('%s.something' % __name__, sentinel.Something2)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
@patch('%s.something' % __name__, sentinel.Something2)
@patch('%s.something_else' % __name__, sentinel.SomethingElse)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"unpatched")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
# Test the patching and restoring works a second time
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
mock = Mock()
mock.return_value = sentinel.Handle
@patch('%s.open' % builtin_string, mock)
def test():
self.assertEqual(open('filename', 'r'), sentinel.Handle,
"open not patched")
test()
test()
self.assertNotEqual(open, mock, "patch not restored")
def test_patch_class_attribute(self):
@patch('%s.SomeClass.class_attribute' % __name__,
sentinel.ClassAttribute)
def test():
self.assertEqual(PTModule.SomeClass.class_attribute,
sentinel.ClassAttribute, "unpatched")
test()
self.assertIsNone(PTModule.SomeClass.class_attribute,
"patch not restored")
def test_patchobject_with_default_mock(self):
class Test(object):
something = sentinel.Original
something2 = sentinel.Original2
@patch.object(Test, 'something')
def test(mock):
self.assertEqual(mock, Test.something,
"Mock not passed into test function")
self.assertIsInstance(mock, MagicMock,
"patch with two arguments did not create a mock")
test()
@patch.object(Test, 'something')
@patch.object(Test, 'something2')
def test(this1, this2, mock1, mock2):
self.assertEqual(this1, sentinel.this1,
"Patched function didn't receive initial argument")
self.assertEqual(this2, sentinel.this2,
"Patched function didn't receive second argument")
self.assertEqual(mock1, Test.something2,
"Mock not passed into test function")
self.assertEqual(mock2, Test.something,
"Second Mock not passed into test function")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
# A hack to test that new mocks are passed the second time
self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1")
self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1")
return mock1, mock2
outerMock1 = outerMock2 = None
outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2)
# Test that executing a second time creates new mocks
test(sentinel.this1, sentinel.this2)
def test_patch_with_spec(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec(self):
@patch.object(SomeClass, 'class_attribute', spec=SomeClass)
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_patch_with_spec_as_list(self):
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec_as_list(self):
@patch.object(SomeClass, 'class_attribute', spec=['wibble'])
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_nested_patch_with_spec_as_list(self):
# regression test for nested decorators
@patch('%s.open' % builtin_string)
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass, MockOpen):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patch_with_spec_as_boolean(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_object_with_spec_as_boolean(self):
@patch.object(PTModule, 'SomeClass', spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_class_acts_with_spec_is_inherited(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertTrue(is_instance(MockSomeClass, MagicMock))
instance = MockSomeClass()
self.assertNotCallable(instance)
# Should not raise attribute error
instance.wibble
self.assertRaises(AttributeError, lambda: instance.not_wibble)
test()
def test_patch_with_create_mocks_non_existent_attributes(self):
@patch('%s.frooble' % builtin_string, sentinel.Frooble, create=True)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_with_create_mocks_non_existent_attributes(self):
@patch.object(SomeClass, 'frooble', sentinel.Frooble, create=True)
def test():
self.assertEqual(SomeClass.frooble, sentinel.Frooble)
test()
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_wont_create_by_default(self):
try:
@patch('%s.frooble' % builtin_string, sentinel.Frooble)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_wont_create_by_default(self):
try:
@patch.object(SomeClass, 'ord', sentinel.Frooble)
def test():
self.fail('Patching non existent attributes should fail')
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertFalse(hasattr(SomeClass, 'ord'))
def test_patch_builtins_without_create(self):
@patch(__name__+'.ord')
def test_ord(mock_ord):
mock_ord.return_value = 101
return ord('c')
@patch(__name__+'.open')
def test_open(mock_open):
m = mock_open.return_value
m.read.return_value = 'abcd'
fobj = open('doesnotexists.txt')
data = fobj.read()
fobj.close()
return data
self.assertEqual(test_ord(), 101)
self.assertEqual(test_open(), 'abcd')
def test_patch_with_static_methods(self):
class Foo(object):
@staticmethod
def woot():
return sentinel.Static
@patch.object(Foo, 'woot', staticmethod(lambda: sentinel.Patched))
def anonymous():
self.assertEqual(Foo.woot(), sentinel.Patched)
anonymous()
self.assertEqual(Foo.woot(), sentinel.Static)
def test_patch_local(self):
foo = sentinel.Foo
@patch.object(sentinel, 'Foo', 'Foo')
def anonymous():
self.assertEqual(sentinel.Foo, 'Foo')
anonymous()
self.assertEqual(sentinel.Foo, foo)
def test_patch_slots(self):
class Foo(object):
__slots__ = ('Foo',)
foo = Foo()
foo.Foo = sentinel.Foo
@patch.object(foo, 'Foo', 'Foo')
def anonymous():
self.assertEqual(foo.Foo, 'Foo')
anonymous()
self.assertEqual(foo.Foo, sentinel.Foo)
def test_patchobject_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
def not_test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Original,
"non-test method patched")
Foo = patch.object(Something, 'attribute', sentinel.Patched)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self, mock_something):
self.assertEqual(PTModule.something, mock_something,
"unpatched")
def not_test_method(other_self):
self.assertEqual(PTModule.something, sentinel.Something,
"non-test method patched")
Foo = patch('%s.something' % __name__)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
def test_patchobject_twice(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_dict(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')])
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo.values), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo.values, original)
def test_patch_dict_with_clear(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')], clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object_and_clear(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo.values, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo.values, {'a': 'b'})
test()
self.assertEqual(foo.values, original)
def test_name_preserved(self):
foo = {}
@patch('%s.SomeClass' % __name__, object())
@patch('%s.SomeClass' % __name__, object(), autospec=True)
@patch.object(SomeClass, object())
@patch.dict(foo)
def some_name():
pass
self.assertEqual(some_name.__name__, 'some_name')
def test_patch_with_exception(self):
foo = {}
@patch.dict(foo, {'a': 'b'})
def test():
raise NameError('Konrad')
try:
test()
except NameError:
pass
else:
self.fail('NameError not raised by test')
self.assertEqual(foo, {})
def test_patch_dict_with_string(self):
@patch.dict('os.environ', {'konrad_delong': 'some value'})
def test():
self.assertIn('konrad_delong', os.environ)
test()
def test_patch_descriptor(self):
# would be some effort to fix this - we could special case the
# builtin descriptors: classmethod, property, staticmethod
return
class Nothing(object):
foo = None
class Something(object):
foo = {}
@patch.object(Nothing, 'foo', 2)
@classmethod
def klass(cls):
self.assertIs(cls, Something)
@patch.object(Nothing, 'foo', 2)
@staticmethod
def static(arg):
return arg
@patch.dict(foo)
@classmethod
def klass_dict(cls):
self.assertIs(cls, Something)
@patch.dict(foo)
@staticmethod
def static_dict(arg):
return arg
# these will raise exceptions if patching descriptors is broken
self.assertEqual(Something.static('f00'), 'f00')
Something.klass()
self.assertEqual(Something.static_dict('f00'), 'f00')
Something.klass_dict()
something = Something()
self.assertEqual(something.static('f00'), 'f00')
something.klass()
self.assertEqual(something.static_dict('f00'), 'f00')
something.klass_dict()
def test_patch_spec_set(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
def test_spec_set_inherit(self):
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
instance = MockClass()
instance.z = 'foo'
self.assertRaises(AttributeError, test)
def test_patch_start_stop(self):
original = something
patcher = patch('%s.something' % __name__)
self.assertIs(something, original)
mock = patcher.start()
try:
self.assertIsNot(mock, original)
self.assertIs(something, mock)
finally:
patcher.stop()
self.assertIs(something, original)
def test_stop_without_start(self):
patcher = patch(foo_name, 'bar', 3)
# calling stop without start used to produce a very obscure error
self.assertRaises(RuntimeError, patcher.stop)
def test_patchobject_start_stop(self):
original = something
patcher = patch.object(PTModule, 'something', 'foo')
self.assertIs(something, original)
replaced = patcher.start()
try:
self.assertEqual(replaced, 'foo')
self.assertIs(something, replaced)
finally:
patcher.stop()
self.assertIs(something, original)
def test_patch_dict_start_stop(self):
d = {'foo': 'bar'}
original = d.copy()
patcher = patch.dict(d, [('spam', 'eggs')], clear=True)
self.assertEqual(d, original)
patcher.start()
try:
self.assertEqual(d, {'spam': 'eggs'})
finally:
patcher.stop()
self.assertEqual(d, original)
def test_patch_dict_class_decorator(self):
this = self
d = {'spam': 'eggs'}
original = d.copy()
class Test(object):
def test_first(self):
this.assertEqual(d, {'foo': 'bar'})
def test_second(self):
this.assertEqual(d, {'foo': 'bar'})
Test = patch.dict(d, {'foo': 'bar'}, clear=True)(Test)
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
def test_get_only_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(thing)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_get_set_delete_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(Something, get_only=False)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_patch_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch(foo_name, **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_object_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch.object(Foo, 'f', **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_dict_keyword_args(self):
original = {'foo': 'bar'}
copy = original.copy()
patcher = patch.dict(original, foo=3, bar=4, baz=5)
patcher.start()
try:
self.assertEqual(original, dict(foo=3, bar=4, baz=5))
finally:
patcher.stop()
self.assertEqual(original, copy)
def test_autospec(self):
class Boo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
def _test(mock):
mock(1)
mock.assert_called_with(1)
self.assertRaises(TypeError, mock)
def _test2(mock):
mock.f(1)
mock.f.assert_called_with(1)
self.assertRaises(TypeError, mock.f)
mock.g()
mock.g.assert_called_with()
self.assertRaises(TypeError, mock.g, 1)
self.assertRaises(AttributeError, getattr, mock, 'h')
mock.foo.lower()
mock.foo.lower.assert_called_with()
self.assertRaises(AttributeError, getattr, mock.foo, 'bar')
mock.Bar()
mock.Bar.assert_called_with()
mock.Bar.a()
mock.Bar.a.assert_called_with()
self.assertRaises(TypeError, mock.Bar.a, 1)
mock.Bar().a()
mock.Bar().a.assert_called_with()
self.assertRaises(TypeError, mock.Bar().a, 1)
self.assertRaises(AttributeError, getattr, mock.Bar, 'b')
self.assertRaises(AttributeError, getattr, mock.Bar(), 'b')
def function(mock):
_test(mock)
_test2(mock)
_test2(mock(1))
self.assertIs(mock, Foo)
return mock
test = patch(foo_name, autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
module = sys.modules[__name__]
test = patch.object(module, 'Foo', autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
def test_autospec_function(self):
@patch('%s.function' % __name__, autospec=True)
def test(mock):
function(1)
function.assert_called_with(1)
function(2, 3)
function.assert_called_with(2, 3)
self.assertRaises(TypeError, function)
self.assertRaises(AttributeError, getattr, function, 'foo')
test()
def test_autospec_keywords(self):
@patch('%s.function' % __name__, autospec=True,
return_value=3)
def test(mock_function):
#self.assertEqual(function.abc, 'foo')
return function(1, 2)
result = test()
self.assertEqual(result, 3)
def test_autospec_with_new(self):
patcher = patch('%s.function' % __name__, new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
module = sys.modules[__name__]
patcher = patch.object(module, 'function', new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
def test_autospec_with_object(self):
class Bar(Foo):
extra = []
patcher = patch(foo_name, autospec=Bar)
mock = patcher.start()
try:
self.assertIsInstance(mock, Bar)
self.assertIsInstance(mock.extra, list)
finally:
patcher.stop()
def test_autospec_inherits(self):
FooClass = Foo
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIsInstance(mock, FooClass)
self.assertIsInstance(mock(3), FooClass)
finally:
patcher.stop()
def test_autospec_name(self):
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIn(" name='Foo'", repr(mock))
self.assertIn(" name='Foo.f'", repr(mock.f))
self.assertIn(" name='Foo()'", repr(mock(None)))
self.assertIn(" name='Foo().f'", repr(mock(None).f))
finally:
patcher.stop()
def test_tracebacks(self):
@patch.object(Foo, 'f', object())
def test():
raise AssertionError
try:
test()
except:
err = sys.exc_info()
result = unittest.TextTestResult(None, None, 0)
traceback = result._exc_info_to_string(err, self)
self.assertIn('raise AssertionError', traceback)
def test_new_callable_patch(self):
patcher = patch(foo_name, new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_patch_object(self):
patcher = patch.object(Foo, 'f', new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_keyword_arguments(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, arg1=1, arg2=2)
m = patcher.start()
try:
self.assertIs(type(m), Bar)
self.assertEqual(Bar.kwargs, dict(arg1=1, arg2=2))
finally:
patcher.stop()
def test_new_callable_spec(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, spec=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec=Bar))
finally:
patcher.stop()
patcher = patch(foo_name, new_callable=Bar, spec_set=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec_set=Bar))
finally:
patcher.stop()
def test_new_callable_create(self):
non_existent_attr = '%s.weeeee' % foo_name
p = patch(non_existent_attr, new_callable=NonCallableMock)
self.assertRaises(AttributeError, p.start)
p = patch(non_existent_attr, new_callable=NonCallableMock,
create=True)
m = p.start()
try:
self.assertNotCallable(m, magic=False)
finally:
p.stop()
def test_new_callable_incompatible_with_new(self):
self.assertRaises(
ValueError, patch, foo_name, new=object(), new_callable=MagicMock
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new=object(),
new_callable=MagicMock
)
def test_new_callable_incompatible_with_autospec(self):
self.assertRaises(
ValueError, patch, foo_name, new_callable=MagicMock,
autospec=True
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new_callable=MagicMock,
autospec=True
)
def test_new_callable_inherit_for_mocks(self):
class MockSub(Mock):
pass
MockClasses = (
NonCallableMock, NonCallableMagicMock, MagicMock, Mock, MockSub
)
for Klass in MockClasses:
for arg in 'spec', 'spec_set':
kwargs = {arg: True}
p = patch(foo_name, new_callable=Klass, **kwargs)
m = p.start()
try:
instance = m.return_value
self.assertRaises(AttributeError, getattr, instance, 'x')
finally:
p.stop()
def test_new_callable_inherit_non_mock(self):
class NotAMock(object):
def __init__(self, spec):
self.spec = spec
p = patch(foo_name, new_callable=NotAMock, spec=True)
m = p.start()
try:
self.assertTrue(is_instance(m, NotAMock))
self.assertRaises(AttributeError, getattr, m, 'return_value')
finally:
p.stop()
self.assertEqual(m.spec, Foo)
def test_new_callable_class_decorating(self):
test = self
original = Foo
class SomeTest(object):
def _test(self, mock_foo):
test.assertIsNot(Foo, original)
test.assertIs(Foo, mock_foo)
test.assertIsInstance(Foo, SomeClass)
def test_two(self, mock_foo):
self._test(mock_foo)
def test_one(self, mock_foo):
self._test(mock_foo)
SomeTest = patch(foo_name, new_callable=SomeClass)(SomeTest)
SomeTest().test_one()
SomeTest().test_two()
self.assertIs(Foo, original)
def test_patch_multiple(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher1 = patch.multiple(foo_name, f=1, g=2)
patcher2 = patch.multiple(Foo, f=1, g=2)
for patcher in patcher1, patcher2:
patcher.start()
try:
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 1)
self.assertEqual(Foo.g, 2)
finally:
patcher.stop()
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
@patch.multiple(foo_name, f=3, g=4)
def test():
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 3)
self.assertEqual(Foo.g, 4)
test()
def test_patch_multiple_no_kwargs(self):
self.assertRaises(ValueError, patch.multiple, foo_name)
self.assertRaises(ValueError, patch.multiple, Foo)
def test_patch_multiple_create_mocks(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
def test(f, foo):
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertEqual(Foo.g, 3)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_different_order(self):
# bug revealed by Jython!
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 3)
patcher.attribute_name = 'f'
other = patch.object(Foo, 'g', DEFAULT)
other.attribute_name = 'g'
patcher.additional_patchers = [other]
@patcher
def test(g):
self.assertIs(Foo.g, g)
self.assertEqual(Foo.f, 3)
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_stacked_decorators(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
@patch(foo_name + '.g')
def test1(g, **kwargs):
_test(g, **kwargs)
@patch.multiple(foo_name, f=DEFAULT)
@patch(foo_name + '.g')
@patch.multiple(foo_name, foo=DEFAULT)
def test2(g, **kwargs):
_test(g, **kwargs)
@patch(foo_name + '.g')
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
def test3(g, **kwargs):
_test(g, **kwargs)
def _test(g, **kwargs):
f = kwargs.pop('f')
foo = kwargs.pop('foo')
self.assertFalse(kwargs)
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.g, g)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(g, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test1()
test2()
test3()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_patcher(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher = patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
result = patcher.start()
try:
f = result['f']
foo = result['foo']
self.assertEqual(set(result), set(['f', 'foo']))
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
finally:
patcher.stop()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_decorating_class(self):
test = self
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
class SomeTest(object):
def _test(self, f, foo):
test.assertIs(Foo, original_foo)
test.assertIs(Foo.f, f)
test.assertEqual(Foo.g, 3)
test.assertIs(Foo.foo, foo)
test.assertTrue(is_instance(f, MagicMock))
test.assertTrue(is_instance(foo, MagicMock))
def test_two(self, f, foo):
self._test(f, foo)
def test_one(self, f, foo):
self._test(f, foo)
SomeTest = patch.multiple(
foo_name, f=DEFAULT, g=3, foo=DEFAULT
)(SomeTest)
thing = SomeTest()
thing.test_one()
thing.test_two()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create(self):
patcher = patch.multiple(Foo, blam='blam')
self.assertRaises(AttributeError, patcher.start)
patcher = patch.multiple(Foo, blam='blam', create=True)
patcher.start()
try:
self.assertEqual(Foo.blam, 'blam')
finally:
patcher.stop()
self.assertFalse(hasattr(Foo, 'blam'))
def test_patch_multiple_spec_set(self):
# if spec_set works then we can assume that spec and autospec also
# work as the underlying machinery is the same
patcher = patch.multiple(Foo, foo=DEFAULT, spec_set=['a', 'b'])
result = patcher.start()
try:
self.assertEqual(Foo.foo, result['foo'])
Foo.foo.a(1)
Foo.foo.b(2)
Foo.foo.a.assert_called_with(1)
Foo.foo.b.assert_called_with(2)
self.assertRaises(AttributeError, setattr, Foo.foo, 'c', None)
finally:
patcher.stop()
def test_patch_multiple_new_callable(self):
class Thing(object):
pass
patcher = patch.multiple(
Foo, f=DEFAULT, g=DEFAULT, new_callable=Thing
)
result = patcher.start()
try:
self.assertIs(Foo.f, result['f'])
self.assertIs(Foo.g, result['g'])
self.assertIsInstance(Foo.f, Thing)
self.assertIsInstance(Foo.g, Thing)
self.assertIsNot(Foo.f, Foo.g)
finally:
patcher.stop()
def test_nested_patch_failure(self):
original_f = Foo.f
original_g = Foo.g
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'missing', 1)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_failure(self):
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'missing', 1)
bad.attribute_name = 'missing'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'foo', new_callable=crasher)
bad.attribute_name = 'foo'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_string_subclasses(self):
Foo = type('Foo', (str,), {'fish': 'tasty'})
foo = Foo()
@patch.multiple(foo, fish='nearly gone')
def test():
self.assertEqual(foo.fish, 'nearly gone')
test()
self.assertEqual(foo.fish, 'tasty')
@patch('unittest.mock.patch.TEST_PREFIX', 'foo')
def test_patch_test_prefix(self):
class Foo(object):
thing = 'original'
def foo_one(self):
return self.thing
def foo_two(self):
return self.thing
def test_one(self):
return self.thing
def test_two(self):
return self.thing
Foo = patch.object(Foo, 'thing', 'changed')(Foo)
foo = Foo()
self.assertEqual(foo.foo_one(), 'changed')
self.assertEqual(foo.foo_two(), 'changed')
self.assertEqual(foo.test_one(), 'original')
self.assertEqual(foo.test_two(), 'original')
@patch('unittest.mock.patch.TEST_PREFIX', 'bar')
def test_patch_dict_test_prefix(self):
class Foo(object):
def bar_one(self):
return dict(the_dict)
def bar_two(self):
return dict(the_dict)
def test_one(self):
return dict(the_dict)
def test_two(self):
return dict(the_dict)
the_dict = {'key': 'original'}
Foo = patch.dict(the_dict, key='changed')(Foo)
foo =Foo()
self.assertEqual(foo.bar_one(), {'key': 'changed'})
self.assertEqual(foo.bar_two(), {'key': 'changed'})
self.assertEqual(foo.test_one(), {'key': 'original'})
self.assertEqual(foo.test_two(), {'key': 'original'})
def test_patch_with_spec_mock_repr(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
self.assertIn(" name='SomeClass'", repr(m))
self.assertIn(" name='SomeClass.class_attribute'",
repr(m.class_attribute))
self.assertIn(" name='SomeClass()'", repr(m()))
self.assertIn(" name='SomeClass().class_attribute'",
repr(m().class_attribute))
finally:
p.stop()
def test_patch_nested_autospec_repr(self):
with patch('unittest.test.testmock.support', autospec=True) as m:
self.assertIn(" name='support.SomeClass.wibble()'",
repr(m.SomeClass.wibble()))
self.assertIn(" name='support.SomeClass().wibble()'",
repr(m.SomeClass().wibble()))
def test_mock_calls_with_patch(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
m.wibble()
kalls = [call.wibble()]
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(m.method_calls, kalls)
self.assertEqual(m.wibble.mock_calls, [call()])
result = m()
kalls.append(call())
self.assertEqual(m.mock_calls, kalls)
result.wibble()
kalls.append(call().wibble())
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(result.mock_calls, [call.wibble()])
self.assertEqual(result.wibble.mock_calls, [call()])
self.assertEqual(result.method_calls, [call.wibble()])
finally:
p.stop()
def test_patch_imports_lazily(self):
sys.modules.pop('squizz', None)
p1 = patch('squizz.squozz')
self.assertRaises(ImportError, p1.start)
squizz = Mock()
squizz.squozz = 6
sys.modules['squizz'] = squizz
p1 = patch('squizz.squozz')
squizz.squozz = 3
p1.start()
p1.stop()
self.assertEqual(squizz.squozz, 3)
def test_patch_propogrates_exc_on_exit(self):
class holder:
exc_info = None, None, None
class custom_patch(_patch):
def __exit__(self, etype=None, val=None, tb=None):
_patch.__exit__(self, etype, val, tb)
holder.exc_info = etype, val, tb
stop = __exit__
def with_custom_patch(target):
getter, attribute = _get_target(target)
return custom_patch(
getter, attribute, DEFAULT, None, False, None,
None, None, {}
)
@with_custom_patch('squizz.squozz')
def test(mock):
raise RuntimeError
self.assertRaises(RuntimeError, test)
self.assertIs(holder.exc_info[0], RuntimeError)
self.assertIsNotNone(holder.exc_info[1],
'exception value not propgated')
self.assertIsNotNone(holder.exc_info[2],
'exception traceback not propgated')
def test_create_and_specs(self):
for kwarg in ('spec', 'spec_set', 'autospec'):
p = patch('%s.doesnotexist' % __name__, create=True,
**{kwarg: True})
self.assertRaises(TypeError, p.start)
self.assertRaises(NameError, lambda: doesnotexist)
# check that spec with create is innocuous if the original exists
p = patch(MODNAME, create=True, **{kwarg: True})
p.start()
p.stop()
def test_multiple_specs(self):
original = PTModule
for kwarg in ('spec', 'spec_set'):
p = patch(MODNAME, autospec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec_set', 'autospec'):
p = patch(MODNAME, spec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
def test_specs_false_instead_of_none(self):
p = patch(MODNAME, spec=False, spec_set=False, autospec=False)
mock = p.start()
try:
# no spec should have been set, so attribute access should not fail
mock.does_not_exist
mock.does_not_exist = 3
finally:
p.stop()
def test_falsey_spec(self):
for kwarg in ('spec', 'autospec', 'spec_set'):
p = patch(MODNAME, **{kwarg: 0})
m = p.start()
try:
self.assertRaises(AttributeError, getattr, m, 'doesnotexit')
finally:
p.stop()
def test_spec_set_true(self):
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=True, **{kwarg: True})
m = p.start()
try:
self.assertRaises(AttributeError, setattr, m,
'doesnotexist', 'something')
self.assertRaises(AttributeError, getattr, m, 'doesnotexist')
finally:
p.stop()
def test_callable_spec_as_list(self):
spec = ('__call__',)
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertTrue(callable(m))
finally:
p.stop()
def test_not_callable_spec_as_list(self):
spec = ('foo', 'bar')
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertFalse(callable(m))
finally:
p.stop()
def test_patch_stopall(self):
unlink = os.unlink
chdir = os.chdir
path = os.path
patch('os.unlink', something).start()
patch('os.chdir', something_else).start()
@patch('os.path')
def patched(mock_path):
patch.stopall()
self.assertIs(os.path, mock_path)
self.assertIs(os.unlink, unlink)
self.assertIs(os.chdir, chdir)
patched()
self.assertIs(os.path, path)
def test_stopall_lifo(self):
stopped = []
class thing(object):
one = two = three = None
def get_patch(attribute):
class mypatch(_patch):
def stop(self):
stopped.append(attribute)
return super(mypatch, self).stop()
return mypatch(lambda: thing, attribute, None, None,
False, None, None, None, {})
[get_patch(val).start() for val in ("one", "two", "three")]
patch.stopall()
self.assertEqual(stopped, ["three", "two", "one"])
if __name__ == '__main__':
unittest.main()
|
bartsidee/bartsidee-boxee
|
refs/heads/master
|
sources/tv/external/workerpool/exceptions.py
|
8
|
# exceptions.py - Exceptions used in the operation of a worker pool
# Copyright (c) 2008 Andrey Petrov
#
# This module is part of workerpool and is released under
# the MIT license: http://www.opensource.org/licenses/mit-license.php
class TerminationNotice(Exception):
"This exception is raised inside a thread when it's time for it to die."
pass
|
mattvick/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/lru_cache.py
|
134
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class Node():
def __init__(self, key, value):
self.key = key
self.value = value
self.prev = None
self.next = None
class LRUCache():
"""An implementation of Least Recently Used (LRU) Cache."""
def __init__(self, capacity):
"""Initializes a lru cache with the given capacity.
Args:
capacity: The capacity of the cache.
"""
assert capacity > 0, "capacity (%s) must be greater than zero." % capacity
self._first = None
self._last = None
self._dict = {}
self._capacity = capacity
def __setitem__(self, key, value):
if key in self._dict:
self.__delitem__(key)
if not self._first:
self._one_node(key, value)
return
if len(self._dict) >= self._capacity:
del self._dict[self._last.key]
if self._capacity == 1:
self._one_node(key, value)
return
self._last = self._last.next
self._last.prev = None
node = Node(key, value)
node.prev = self._first
self._first.next = node
self._first = node
self._dict[key] = node
def _one_node(self, key, value):
node = Node(key, value)
self._dict[key] = node
self._first = node
self._last = node
def __getitem__(self, key):
if not self._first:
raise KeyError(str(key))
if self._first.key == key:
return self._first.value
if self._last.key == key:
next_last = self._last.next
next_last.prev = None
next_first = self._last
next_first.prev = self._first
next_first.next = None
self._first.next = next_first
self._first = next_first
self._last = next_last
return self._first.value
node = self._dict[key]
node.next.prev = node.prev
node.prev.next = node.next
node.prev = self._first
node.next = None
self._first.next = node
self._first = node
return self._first.value
def __delitem__(self, key):
node = self._dict[key]
del self._dict[key]
if self._first is self._last:
self._last = None
self._first = None
return
if self._first is node:
self._first = node.prev
self._first.next = None
return
if self._last is node:
self._last = node.next
self._last.prev = None
return
node.next.prev = node.prev
node.prev.next = node.next
def __len__(self):
return len(self._dict)
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._dict)
def items(self):
return [(key, node.value) for key, node in self._dict.items()]
def values(self):
return [node.value for node in self._dict.values()]
def keys(self):
return self._dict.keys()
|
Awesomeomics/webserver
|
refs/heads/master
|
env/lib/python2.7/site-packages/setuptools/tests/test_build_ext.py
|
166
|
"""build_ext tests
"""
import unittest
from distutils.command.build_ext import build_ext as distutils_build_ext
from setuptools.command.build_ext import build_ext
from setuptools.dist import Distribution
class TestBuildExtTest(unittest.TestCase):
def test_get_ext_filename(self):
# setuptools needs to give back the same
# result than distutils, even if the fullname
# is not in ext_map
dist = Distribution()
cmd = build_ext(dist)
cmd.ext_map['foo/bar'] = ''
res = cmd.get_ext_filename('foo')
wanted = distutils_build_ext.get_ext_filename(cmd, 'foo')
assert res == wanted
|
miptliot/edx-platform
|
refs/heads/ginkgo_openedu_docker
|
cms/djangoapps/contentstore/push_notification.py
|
8
|
"""
Helper methods for push notifications from Studio.
"""
from logging import exception as log_exception
from uuid import uuid4
from django.conf import settings
from contentstore.models import PushNotificationConfig
from contentstore.tasks import push_course_update_task
from parse_rest.connection import register
from parse_rest.core import ParseError
from parse_rest.installation import Push
from xmodule.modulestore.django import modulestore
def push_notification_enabled():
"""
Returns whether the push notification feature is enabled.
"""
return PushNotificationConfig.is_enabled()
def enqueue_push_course_update(update, course_key):
"""
Enqueues a task for push notification for the given update for the given course if
(1) the feature is enabled and
(2) push_notification is selected for the update
"""
if push_notification_enabled() and update.get("push_notification_selected"):
course = modulestore().get_course(course_key)
if course:
push_course_update_task.delay(
unicode(course_key),
course.clean_id(padding_char='_'),
course.display_name
)
def send_push_course_update(course_key_string, course_subscription_id, course_display_name):
"""
Sends a push notification for a course update, given the course's subscription_id and display_name.
"""
if settings.PARSE_KEYS:
try:
register(
settings.PARSE_KEYS["APPLICATION_ID"],
settings.PARSE_KEYS["REST_API_KEY"],
)
push_payload = {
"action": "course.announcement",
"notification-id": unicode(uuid4()),
"course-id": course_key_string,
"course-name": course_display_name,
}
push_channels = [course_subscription_id]
# Push to all Android devices
Push.alert(
data=push_payload,
channels={"$in": push_channels},
where={"deviceType": "android"},
)
# Push to all iOS devices
# With additional payload so that
# 1. The push is displayed automatically
# 2. The app gets it even in the background.
# See http://stackoverflow.com/questions/19239737/silent-push-notification-in-ios-7-does-not-work
push_payload.update({
"alert": "",
"content-available": 1
})
Push.alert(
data=push_payload,
channels={"$in": push_channels},
where={"deviceType": "ios"},
)
except ParseError as error:
log_exception(error.message)
|
luchesar/emibg
|
refs/heads/master
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/make.py
|
896
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host)
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host)
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@"
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
command = command,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False, command=None):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' %
(' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in
# make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the
# actual command.
# - The intermediate recipe will 'touch' the intermediate file.
# - The multi-output rule will have an do-nothing recipe.
intermediate = "%s.intermediate" % (command if command else self.target)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('\t%s' % '@:');
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' %
(intermediate, ' '.join(inputs), force_append))
actions.insert(0, '$(call do_cmd,touch)')
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
# XXX(TooTallNate): disabling this code since we don't want this behavior...
#if (self.type == 'shared_library' and
# (self.flavor != 'mac' or self.toolset != 'target')):
# # Install all shared libs into a common directory (per toolset) for
# # convenient access with LD_LIBRARY_PATH.
# return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
copy_archive_arguments = '-af'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
'copy_archive_args': copy_archive_arguments,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'openbsd':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
})
elif flavor == 'aix':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host', 'CC'), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host', 'AR'), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host', 'CXX'), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host', 'LINK'), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
|
gkoelln/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/rtlnl.py
|
10
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
)
class RtlNlIE(InfoExtractor):
IE_NAME = 'rtl.nl'
IE_DESC = 'rtl.nl and rtlxl.nl'
_VALID_URL = r'''(?x)
https?://(?:(?:www|static)\.)?
(?:
rtlxl\.nl/[^\#]*\#!/[^/]+/|
rtl\.nl/(?:(?:system/videoplayer/(?:[^/]+/)+(?:video_)?embed\.html|embed)\b.+?\buuid=|video/)
)
(?P<id>[0-9a-f-]+)'''
_TESTS = [{
'url': 'http://www.rtlxl.nl/#!/rtl-nieuws-132237/82b1aad1-4a14-3d7b-b554-b0aed1b2c416',
'md5': '473d1946c1fdd050b2c0161a4b13c373',
'info_dict': {
'id': '82b1aad1-4a14-3d7b-b554-b0aed1b2c416',
'ext': 'mp4',
'title': 'RTL Nieuws',
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
'timestamp': 1461951000,
'upload_date': '20160429',
'duration': 1167.96,
},
}, {
# best format avaialble a3t
'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed/autoplay=false',
'md5': 'dea7474214af1271d91ef332fb8be7ea',
'info_dict': {
'id': '84ae5571-ac25-4225-ae0c-ef8d9efb2aed',
'ext': 'mp4',
'timestamp': 1424039400,
'title': 'RTL Nieuws - Nieuwe beelden Kopenhagen: chaos direct na aanslag',
'thumbnail': r're:^https?://screenshots\.rtl\.nl/(?:[^/]+/)*sz=[0-9]+x[0-9]+/uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed$',
'upload_date': '20150215',
'description': 'Er zijn nieuwe beelden vrijgegeven die vlak na de aanslag in Kopenhagen zijn gemaakt. Op de video is goed te zien hoe omstanders zich bekommeren om één van de slachtoffers, terwijl de eerste agenten ter plaatse komen.',
}
}, {
# empty synopsis and missing episodes (see https://github.com/rg3/youtube-dl/issues/6275)
# best format available nettv
'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a/autoplay=false',
'info_dict': {
'id': 'f536aac0-1dc3-4314-920e-3bd1c5b3811a',
'ext': 'mp4',
'title': 'RTL Nieuws - Meer beelden van overval juwelier',
'thumbnail': r're:^https?://screenshots\.rtl\.nl/(?:[^/]+/)*sz=[0-9]+x[0-9]+/uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a$',
'timestamp': 1437233400,
'upload_date': '20150718',
'duration': 30.474,
},
'params': {
'skip_download': True,
},
}, {
# encrypted m3u8 streams, georestricted
'url': 'http://www.rtlxl.nl/#!/afl-2-257632/52a74543-c504-4cde-8aa8-ec66fe8d68a7',
'only_matching': True,
}, {
'url': 'http://www.rtl.nl/system/videoplayer/derden/embed.html#!/uuid=bb0353b0-d6a4-1dad-90e9-18fe75b8d1f0',
'only_matching': True,
}, {
'url': 'http://rtlxl.nl/?_ga=1.204735956.572365465.1466978370#!/rtl-nieuws-132237/3c487912-023b-49ac-903e-2c5d79f8410f',
'only_matching': True,
}, {
'url': 'https://www.rtl.nl/video/c603c9c2-601d-4b5e-8175-64f1e942dc7d/',
'only_matching': True,
}, {
'url': 'https://static.rtl.nl/embed/?uuid=1a2970fc-5c0b-43ff-9fdc-927e39e6d1bc&autoplay=false&publicatiepunt=rtlnieuwsnl',
'only_matching': True,
}]
def _real_extract(self, url):
uuid = self._match_id(url)
info = self._download_json(
'http://www.rtl.nl/system/s4m/vfd/version=2/uuid=%s/fmt=adaptive/' % uuid,
uuid)
material = info['material'][0]
title = info['abstracts'][0]['name']
subtitle = material.get('title')
if subtitle:
title += ' - %s' % subtitle
description = material.get('synopsis')
meta = info.get('meta', {})
videopath = material['videopath']
m3u8_url = meta.get('videohost', 'http://manifest.us.rtl.nl') + videopath
formats = self._extract_m3u8_formats(
m3u8_url, uuid, 'mp4', m3u8_id='hls', fatal=False)
self._sort_formats(formats)
thumbnails = []
for p in ('poster_base_url', '"thumb_base_url"'):
if not meta.get(p):
continue
thumbnails.append({
'url': self._proto_relative_url(meta[p] + uuid),
'width': int_or_none(self._search_regex(
r'/sz=([0-9]+)', meta[p], 'thumbnail width', fatal=False)),
'height': int_or_none(self._search_regex(
r'/sz=[0-9]+x([0-9]+)',
meta[p], 'thumbnail height', fatal=False))
})
return {
'id': uuid,
'title': title,
'formats': formats,
'timestamp': material['original_date'],
'description': description,
'duration': parse_duration(material.get('duration')),
'thumbnails': thumbnails,
}
|
izzyalonso/tndata_backend
|
refs/heads/master
|
tndata_backend/officehours/admin.py
|
2
|
from django.contrib import admin
from . import models
@admin.register(models.OfficeHours)
class OfficeHoursAdmin(admin.ModelAdmin):
list_display = (
'email', 'full_name', '__str__',
'expires_on', 'created_on'
)
search_fields = ('user__email', 'user__first_name', 'user__last_name')
raw_id_fields = ('user', )
def email(self, obj):
return obj.user.email
def full_name(self, obj):
return obj.user.get_full_name()
@admin.register(models.Course)
class CourseAdmin(admin.ModelAdmin):
list_display = ('code', 'full_name', 'name', 'start_time', 'location')
list_filter = ('name', )
search_fields = (
'name', 'user__email', 'user__first_name', 'user__last_name', 'code'
)
raw_id_fields = ('user', 'students')
def full_name(self, obj):
return obj.user.get_full_name()
|
djenniex/CouchPotatoServer
|
refs/heads/master
|
couchpotato/core/media/movie/providers/automation/trakt/__init__.py
|
35
|
from .main import Trakt
def autoload():
return Trakt()
config = [{
'name': 'trakt',
'groups': [
{
'tab': 'automation',
'list': 'watchlist_providers',
'name': 'trakt_automation',
'label': 'Trakt',
'description': 'Import movies from your own watchlist',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'automation_oauth_token',
'label': 'Auth Token',
'advanced': 1
},
{
'name': 'automation_oauth_refresh',
'label': 'Refresh Token',
'description': ('Used to automatically refresh your oauth token every 3 months',
'To get a refresh token, reconnect with trakt'),
'advanced': 1
},
],
},
],
}]
|
IIIIIHIIIII/metamarket
|
refs/heads/master
|
setup.py
|
2
|
# Copyright (C) 2015 Marc D. Wood
#
# This file is part of the METAMARKET project.
#
# It is subject to the license terms in the LICENSE file found in the top-level
# directory of this distribution.
#
# No part of METAMARKET, including this file, may be copied, modified,
# propagated, or distributed except according to the terms contained in the
# LICENSE file.
from distutils.core import setup
from glob import glob
import py2exe
data_files = [("Microsoft.VC90.CRT", glob(r'C:\WINDOWS\WinSxS\x86_Microsoft.VC90.CRT_1fc8b3b9a1e18e3b_9.0.21022.8_x-ww_d08d0375\*.*'))]
setup(
name='METAMARKET',
data_files=data_files,
console=['mmcli.py', 'modserver.py', 'vendserver.py'],
options = {
'py2exe': {
'packages': ['bitcoin', 'pycoin'],
'includes': ['simplejson', 'simplecrypt', '_scrypt'],
} # Changed scrypt.py to import C module from current directory.
}
)
|
incaser/odoo-odoo
|
refs/heads/8.0
|
addons/calendar/calendar.py
|
81
|
# -*- coding: utf-8 -*-
import pytz
import re
import time
import openerp
import openerp.service.report
import uuid
import collections
import babel.dates
from werkzeug.exceptions import BadRequest
from datetime import datetime, timedelta
from dateutil import parser
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from openerp import api
from openerp import tools, SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
from openerp.http import request
from operator import itemgetter
import logging
_logger = logging.getLogger(__name__)
def calendar_id2real_id(calendar_id=None, with_date=False):
"""
Convert a "virtual/recurring event id" (type string) into a real event id (type int).
E.g. virtual/recurring event id is 4-20091201100000, so it will return 4.
@param calendar_id: id of calendar
@param with_date: if a value is passed to this param it will return dates based on value of withdate + calendar_id
@return: real event id
"""
if calendar_id and isinstance(calendar_id, (basestring)):
res = calendar_id.split('-')
if len(res) >= 2:
real_id = res[0]
if with_date:
real_date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT, time.strptime(res[1], "%Y%m%d%H%M%S"))
start = datetime.strptime(real_date, DEFAULT_SERVER_DATETIME_FORMAT)
end = start + timedelta(hours=with_date)
return (int(real_id), real_date, end.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
return int(real_id)
return calendar_id and int(calendar_id) or calendar_id
def get_real_ids(ids):
if isinstance(ids, (basestring, int, long)):
return calendar_id2real_id(ids)
if isinstance(ids, (list, tuple)):
return [calendar_id2real_id(id) for id in ids]
class calendar_attendee(osv.Model):
"""
Calendar Attendee Information
"""
_name = 'calendar.attendee'
_rec_name = 'cn'
_description = 'Attendee information'
def _compute_data(self, cr, uid, ids, name, arg, context=None):
"""
Compute data on function fields for attendee values.
@param ids: list of calendar attendee's IDs
@param name: name of field
@return: dictionary of form {id: {'field Name': value'}}
"""
name = name[0]
result = {}
for attdata in self.browse(cr, uid, ids, context=context):
id = attdata.id
result[id] = {}
if name == 'cn':
if attdata.partner_id:
result[id][name] = attdata.partner_id.name or False
else:
result[id][name] = attdata.email or ''
return result
STATE_SELECTION = [
('needsAction', 'Needs Action'),
('tentative', 'Uncertain'),
('declined', 'Declined'),
('accepted', 'Accepted'),
]
_columns = {
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True, help="Status of the attendee's participation"),
'cn': fields.function(_compute_data, string='Common name', type="char", multi='cn', store=True),
'partner_id': fields.many2one('res.partner', 'Contact', readonly="True"),
'email': fields.char('Email', help="Email of Invited Person"),
'availability': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Free/Busy', readonly="True"),
'access_token': fields.char('Invitation Token'),
'event_id': fields.many2one('calendar.event', 'Meeting linked', ondelete='cascade'),
}
_defaults = {
'state': 'needsAction',
}
def copy(self, cr, uid, id, default=None, context=None):
raise osv.except_osv(_('Warning!'), _('You cannot duplicate a calendar attendee.'))
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
"""
Make entry on email and availability on change of partner_id field.
@param partner_id: changed value of partner id
"""
if not partner_id:
return {'value': {'email': ''}}
partner = self.pool['res.partner'].browse(cr, uid, partner_id, context=context)
return {'value': {'email': partner.email}}
def get_ics_file(self, cr, uid, event_obj, context=None):
"""
Returns iCalendar file for the event invitation.
@param event_obj: event object (browse record)
@return: .ics file content
"""
res = None
def ics_datetime(idate, allday=False):
if idate:
if allday:
return openerp.fields.Date.from_string(idate)
else:
return openerp.fields.Datetime.from_string(idate).replace(tzinfo=pytz.timezone('UTC'))
return False
try:
# FIXME: why isn't this in CalDAV?
import vobject
except ImportError:
return res
cal = vobject.iCalendar()
event = cal.add('vevent')
if not event_obj.start or not event_obj.stop:
raise osv.except_osv(_('Warning!'), _("First you have to specify the date of the invitation."))
event.add('created').value = ics_datetime(time.strftime(DEFAULT_SERVER_DATETIME_FORMAT))
event.add('dtstart').value = ics_datetime(event_obj.start, event_obj.allday)
event.add('dtend').value = ics_datetime(event_obj.stop, event_obj.allday)
event.add('summary').value = event_obj.name
if event_obj.description:
event.add('description').value = event_obj.description
if event_obj.location:
event.add('location').value = event_obj.location
if event_obj.rrule:
event.add('rrule').value = event_obj.rrule
if event_obj.alarm_ids:
for alarm in event_obj.alarm_ids:
valarm = event.add('valarm')
interval = alarm.interval
duration = alarm.duration
trigger = valarm.add('TRIGGER')
trigger.params['related'] = ["START"]
if interval == 'days':
delta = timedelta(days=duration)
elif interval == 'hours':
delta = timedelta(hours=duration)
elif interval == 'minutes':
delta = timedelta(minutes=duration)
trigger.value = delta
valarm.add('DESCRIPTION').value = alarm.name or 'Odoo'
for attendee in event_obj.attendee_ids:
attendee_add = event.add('attendee')
attendee_add.value = 'MAILTO:' + (attendee.email or '')
res = cal.serialize()
return res
def _send_mail_to_attendees(self, cr, uid, ids, email_from=tools.config.get('email_from', False),
template_xmlid='calendar_template_meeting_invitation', force=False, context=None):
"""
Send mail for event invitation to event attendees.
@param email_from: email address for user sending the mail
@param force: If set to True, email will be sent to user himself. Usefull for example for alert, ...
"""
res = False
if self.pool['ir.config_parameter'].get_param(cr, uid, 'calendar.block_mail', default=False) or context.get("no_mail_to_attendees"):
return res
mail_ids = []
data_pool = self.pool['ir.model.data']
mailmess_pool = self.pool['mail.message']
mail_pool = self.pool['mail.mail']
template_pool = self.pool['email.template']
local_context = context.copy()
color = {
'needsAction': 'grey',
'accepted': 'green',
'tentative': '#FFFF00',
'declined': 'red'
}
if not isinstance(ids, (tuple, list)):
ids = [ids]
dummy, template_id = data_pool.get_object_reference(cr, uid, 'calendar', template_xmlid)
dummy, act_id = data_pool.get_object_reference(cr, uid, 'calendar', "view_calendar_event_calendar")
local_context.update({
'color': color,
'action_id': self.pool['ir.actions.act_window'].search(cr, uid, [('view_id', '=', act_id)], context=context)[0],
'dbname': cr.dbname,
'base_url': self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url', default='http://localhost:8069', context=context)
})
for attendee in self.browse(cr, uid, ids, context=context):
if attendee.email and email_from and (attendee.email != email_from or force):
ics_file = self.get_ics_file(cr, uid, attendee.event_id, context=context)
mail_id = template_pool.send_mail(cr, uid, template_id, attendee.id, context=local_context)
vals = {}
if ics_file:
vals['attachment_ids'] = [(0, 0, {'name': 'invitation.ics',
'datas_fname': 'invitation.ics',
'datas': str(ics_file).encode('base64')})]
vals['model'] = None # We don't want to have the mail in the tchatter while in queue!
the_mailmess = mail_pool.browse(cr, uid, mail_id, context=context).mail_message_id
mailmess_pool.write(cr, uid, [the_mailmess.id], vals, context=context)
mail_ids.append(mail_id)
if mail_ids:
res = mail_pool.send(cr, uid, mail_ids, context=context)
return res
def onchange_user_id(self, cr, uid, ids, user_id, *args, **argv):
"""
Make entry on email and availability on change of user_id field.
@param ids: list of attendee's IDs
@param user_id: changed value of User id
@return: dictionary of values which put value in email and availability fields
"""
if not user_id:
return {'value': {'email': ''}}
user = self.pool['res.users'].browse(cr, uid, user_id, *args)
return {'value': {'email': user.email, 'availability': user.availability}}
def do_tentative(self, cr, uid, ids, context=None, *args):
"""
Makes event invitation as Tentative.
@param ids: list of attendee's IDs
"""
return self.write(cr, uid, ids, {'state': 'tentative'}, context)
def do_accept(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Accepted.
@param ids: list of attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'accepted'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has accepted invitation") % (attendee.cn)),
subtype="calendar.subtype_invitation", context=context)
return res
def do_decline(self, cr, uid, ids, context=None, *args):
"""
Marks event invitation as Declined.
@param ids: list of calendar attendee's IDs
"""
if context is None:
context = {}
meeting_obj = self.pool['calendar.event']
res = self.write(cr, uid, ids, {'state': 'declined'}, context)
for attendee in self.browse(cr, uid, ids, context=context):
meeting_obj.message_post(cr, uid, attendee.event_id.id, body=_(("%s has declined invitation") % (attendee.cn)), subtype="calendar.subtype_invitation", context=context)
return res
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if not vals.get("email") and vals.get("cn"):
cnval = vals.get("cn").split(':')
email = filter(lambda x: x.__contains__('@'), cnval)
vals['email'] = email and email[0] or ''
vals['cn'] = vals.get("cn")
res = super(calendar_attendee, self).create(cr, uid, vals, context=context)
return res
class res_partner(osv.Model):
_inherit = 'res.partner'
_columns = {
'calendar_last_notif_ack': fields.datetime('Last notification marked as read from base Calendar'),
}
def get_attendee_detail(self, cr, uid, ids, meeting_id, context=None):
"""
Return a list of tuple (id, name, status)
Used by web_calendar.js : Many2ManyAttendee
"""
datas = []
meeting = None
if meeting_id:
meeting = self.pool['calendar.event'].browse(cr, uid, get_real_ids(meeting_id), context=context)
for partner in self.browse(cr, uid, ids, context=context):
data = self.name_get(cr, uid, [partner.id], context)[0]
if meeting:
for attendee in meeting.attendee_ids:
if attendee.partner_id.id == partner.id:
data = (data[0], data[1], attendee.state)
datas.append(data)
return datas
def _set_calendar_last_notif_ack(self, cr, uid, context=None):
partner = self.pool['res.users'].browse(cr, uid, uid, context=context).partner_id
self.write(cr, uid, partner.id, {'calendar_last_notif_ack': datetime.now()}, context=context)
return
class calendar_alarm_manager(osv.AbstractModel):
_name = 'calendar.alarm_manager'
def get_next_potential_limit_alarm(self, cr, uid, seconds, notif=True, mail=True, partner_id=None, context=None):
res = {}
base_request = """
SELECT
cal.id,
cal.start - interval '1' minute * calcul_delta.max_delta AS first_alarm,
CASE
WHEN cal.recurrency THEN cal.final_date - interval '1' minute * calcul_delta.min_delta
ELSE cal.stop - interval '1' minute * calcul_delta.min_delta
END as last_alarm,
cal.start as first_event_date,
CASE
WHEN cal.recurrency THEN cal.final_date
ELSE cal.stop
END as last_event_date,
calcul_delta.min_delta,
calcul_delta.max_delta,
cal.rrule AS rule
FROM
calendar_event AS cal
RIGHT JOIN
(
SELECT
rel.calendar_event_id, max(alarm.duration_minutes) AS max_delta,min(alarm.duration_minutes) AS min_delta
FROM
calendar_alarm_calendar_event_rel AS rel
LEFT JOIN calendar_alarm AS alarm ON alarm.id = rel.calendar_alarm_id
WHERE alarm.type in %s
GROUP BY rel.calendar_event_id
) AS calcul_delta ON calcul_delta.calendar_event_id = cal.id
"""
filter_user = """
RIGHT JOIN calendar_event_res_partner_rel AS part_rel ON part_rel.calendar_event_id = cal.id
AND part_rel.res_partner_id = %s
"""
#Add filter on type
type_to_read = ()
if notif:
type_to_read += ('notification',)
if mail:
type_to_read += ('email',)
tuple_params = (type_to_read,)
# ADD FILTER ON PARTNER_ID
if partner_id:
base_request += filter_user
tuple_params += (partner_id, )
#Add filter on hours
tuple_params += (seconds,)
cr.execute("""SELECT *
FROM ( %s WHERE cal.active = True ) AS ALL_EVENTS
WHERE ALL_EVENTS.first_alarm < (now() at time zone 'utc' + interval '%%s' second )
AND ALL_EVENTS.last_event_date > (now() at time zone 'utc')
""" % base_request, tuple_params)
for event_id, first_alarm, last_alarm, first_meeting, last_meeting, min_duration, max_duration, rule in cr.fetchall():
res[event_id] = {
'event_id': event_id,
'first_alarm': first_alarm,
'last_alarm': last_alarm,
'first_meeting': first_meeting,
'last_meeting': last_meeting,
'min_duration': min_duration,
'max_duration': max_duration,
'rrule': rule
}
return res
def do_check_alarm_for_one_date(self, cr, uid, one_date, event, event_maxdelta, in_the_next_X_seconds, after=False, notif=True, mail=True, missing=False, context=None):
# one_date: date of the event to check (not the same that in the event browse if recurrent)
# event: Event browse record
# event_maxdelta: biggest duration from alarms for this event
# in_the_next_X_seconds: looking in the future (in seconds)
# after: if not False: will return alert if after this date (date as string - todo: change in master)
# missing: if not False: will return alert even if we are too late
# notif: Looking for type notification
# mail: looking for type email
res = []
# TODO: replace notif and email in master by alarm_type + remove event_maxdelta and if using it
alarm_type = []
if notif:
alarm_type.append('notification')
if mail:
alarm_type.append('email')
if one_date - timedelta(minutes=(missing and 0 or event_maxdelta)) < datetime.now() + timedelta(seconds=in_the_next_X_seconds): # if an alarm is possible for this date
for alarm in event.alarm_ids:
if alarm.type in alarm_type and \
one_date - timedelta(minutes=(missing and 0 or alarm.duration_minutes)) < datetime.now() + timedelta(seconds=in_the_next_X_seconds) and \
(not after or one_date - timedelta(minutes=alarm.duration_minutes) > openerp.fields.Datetime.from_string(after)):
alert = {
'alarm_id': alarm.id,
'event_id': event.id,
'notify_at': one_date - timedelta(minutes=alarm.duration_minutes),
}
res.append(alert)
return res
def get_next_mail(self, cr, uid, context=None):
now = openerp.fields.Datetime.to_string(datetime.now())
icp = self.pool['ir.config_parameter']
last_notif_mail = icp.get_param(cr, SUPERUSER_ID, 'calendar.last_notif_mail', default=False) or now
try:
cron = self.pool['ir.model.data'].get_object(cr, uid, 'calendar', 'ir_cron_scheduler_alarm', context=context)
except ValueError:
_logger.error("Cron for " + self._name + " can not be identified !")
return False
interval_to_second = {
"weeks": 7 * 24 * 60 * 60,
"days": 24 * 60 * 60,
"hours": 60 * 60,
"minutes": 60,
"seconds": 1
}
if cron.interval_type not in interval_to_second.keys():
_logger.error("Cron delay can not be computed !")
return False
cron_interval = cron.interval_number * interval_to_second[cron.interval_type]
all_events = self.get_next_potential_limit_alarm(cr, uid, cron_interval, notif=False, context=context)
for curEvent in self.pool.get('calendar.event').browse(cr, uid, all_events.keys(), context=context):
max_delta = all_events[curEvent.id]['max_duration']
if curEvent.recurrency:
at_least_one = False
last_found = False
for one_date in self.pool.get('calendar.event').get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
last_found = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, 0, after=last_notif_mail, notif=False, missing=True, context=context)
for alert in last_found:
self.do_mail_reminder(cr, uid, alert, context=context)
at_least_one = True # if it's the first alarm for this recurrent event
if at_least_one and not last_found: # if the precedent event had an alarm but not this one, we can stop the search for this event
break
else:
in_date_format = datetime.strptime(curEvent.start, DEFAULT_SERVER_DATETIME_FORMAT)
last_found = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, 0, after=last_notif_mail, notif=False, missing=True, context=context)
for alert in last_found:
self.do_mail_reminder(cr, uid, alert, context=context)
icp.set_param(cr, SUPERUSER_ID, 'calendar.last_notif_mail', now)
def get_next_notif(self, cr, uid, context=None):
ajax_check_every_seconds = 300
partner = self.pool['res.users'].read(cr, SUPERUSER_ID, uid, ['partner_id', 'calendar_last_notif_ack'], context=context)
all_notif = []
if not partner:
return []
all_events = self.get_next_potential_limit_alarm(cr, uid, ajax_check_every_seconds, partner_id=partner['partner_id'][0], mail=False, context=context)
for event in all_events: # .values()
max_delta = all_events[event]['max_duration']
curEvent = self.pool.get('calendar.event').browse(cr, uid, event, context=context)
if curEvent.recurrency:
bFound = False
LastFound = False
for one_date in self.pool.get("calendar.event").get_recurrent_date_by_event(cr, uid, curEvent, context=context):
in_date_format = one_date.replace(tzinfo=None)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, after=partner['calendar_last_notif_ack'], mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
if not bFound: # if it's the first alarm for this recurrent event
bFound = True
if bFound and not LastFound: # if the precedent event had alarm but not this one, we can stop the search fot this event
break
else:
in_date_format = datetime.strptime(curEvent.start, DEFAULT_SERVER_DATETIME_FORMAT)
LastFound = self.do_check_alarm_for_one_date(cr, uid, in_date_format, curEvent, max_delta, ajax_check_every_seconds, after=partner['calendar_last_notif_ack'], mail=False, context=context)
if LastFound:
for alert in LastFound:
all_notif.append(self.do_notif_reminder(cr, uid, alert, context=context))
return all_notif
def do_mail_reminder(self, cr, uid, alert, context=None):
if context is None:
context = {}
res = False
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
if alarm.type == 'email':
res = self.pool['calendar.attendee']._send_mail_to_attendees(
cr,
uid,
[att.id for att in event.attendee_ids],
email_from=event.user_id.partner_id.email,
template_xmlid='calendar_template_meeting_reminder',
force=True,
context=context
)
return res
def do_notif_reminder(self, cr, uid, alert, context=None):
alarm = self.pool['calendar.alarm'].browse(cr, uid, alert['alarm_id'], context=context)
event = self.pool['calendar.event'].browse(cr, uid, alert['event_id'], context=context)
if alarm.type == 'notification':
message = event.display_time
delta = alert['notify_at'] - datetime.now()
delta = delta.seconds + delta.days * 3600 * 24
return {
'event_id': event.id,
'title': event.name,
'message': message,
'timer': delta,
'notify_at': alert['notify_at'].strftime(DEFAULT_SERVER_DATETIME_FORMAT),
}
class calendar_alarm(osv.Model):
_name = 'calendar.alarm'
_description = 'Event alarm'
def _get_duration(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for alarm in self.browse(cr, uid, ids, context=context):
if alarm.interval == "minutes":
res[alarm.id] = alarm.duration
elif alarm.interval == "hours":
res[alarm.id] = alarm.duration * 60
elif alarm.interval == "days":
res[alarm.id] = alarm.duration * 60 * 24
else:
res[alarm.id] = 0
return res
_columns = {
'name': fields.char('Name', required=True),
'type': fields.selection([('notification', 'Notification'), ('email', 'Email')], 'Type', required=True),
'duration': fields.integer('Amount', required=True),
'interval': fields.selection([('minutes', 'Minutes'), ('hours', 'Hours'), ('days', 'Days')], 'Unit', required=True),
'duration_minutes': fields.function(_get_duration, type='integer', string='Duration in minutes', store=True),
}
_defaults = {
'type': 'notification',
'duration': 1,
'interval': 'hours',
}
def _update_cron(self, cr, uid, context=None):
try:
cron = self.pool['ir.model.data'].get_object(
cr, SUPERUSER_ID, 'calendar', 'ir_cron_scheduler_alarm', context=context)
except ValueError:
return False
return cron.toggle(model=self._name, domain=[('type', '=', 'email')])
def create(self, cr, uid, values, context=None):
res = super(calendar_alarm, self).create(cr, uid, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def write(self, cr, uid, ids, values, context=None):
res = super(calendar_alarm, self).write(cr, uid, ids, values, context=context)
self._update_cron(cr, uid, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(calendar_alarm, self).unlink(cr, uid, ids, context=context)
self._update_cron(cr, uid, context=context)
return res
class ir_values(osv.Model):
_inherit = 'ir.values'
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).set(cr, uid, key, key2, name, new_model,
value, replace, isobject, meta, preserve_user, company)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
if context is None:
context = {}
new_model = []
for data in models:
if type(data) in (list, tuple):
new_model.append((data[0], calendar_id2real_id(data[1])))
else:
new_model.append(data)
return super(ir_values, self).get(cr, uid, key, key2, new_model,
meta, context, res_id_req, without_user, key2_req)
class ir_model(osv.Model):
_inherit = 'ir.model'
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
new_ids = isinstance(ids, (basestring, int, long)) and [ids] or ids
if context is None:
context = {}
data = super(ir_model, self).read(cr, uid, new_ids, fields=fields, context=context, load=load)
if data:
for val in data:
val['id'] = calendar_id2real_id(val['id'])
return isinstance(ids, (basestring, int, long)) and data[0] or data
original_exp_report = openerp.service.report.exp_report
def exp_report(db, uid, object, ids, datas=None, context=None):
"""
Export Report
"""
if object == 'printscreen.list':
original_exp_report(db, uid, object, ids, datas, context)
new_ids = []
for id in ids:
new_ids.append(calendar_id2real_id(id))
if datas.get('id', False):
datas['id'] = calendar_id2real_id(datas['id'])
return original_exp_report(db, uid, object, new_ids, datas, context)
openerp.service.report.exp_report = exp_report
class calendar_event_type(osv.Model):
_name = 'calendar.event.type'
_description = 'Meeting Type'
_columns = {
'name': fields.char('Name', required=True, translate=True),
}
class calendar_event(osv.Model):
""" Model for Calendar Event """
_name = 'calendar.event'
_description = "Event"
_order = "id desc"
_inherit = ["mail.thread", "ir.needaction_mixin"]
def do_run_scheduler(self, cr, uid, id, context=None):
self.pool['calendar.alarm_manager'].get_next_mail(cr, uid, context=context)
def get_recurrent_date_by_event(self, cr, uid, event, context=None):
"""Get recurrent dates based on Rule string and all event where recurrent_id is child
"""
def todate(date):
val = parser.parse(''.join((re.compile('\d')).findall(date)))
## Dates are localized to saved timezone if any, else current timezone.
if not val.tzinfo:
val = pytz.UTC.localize(val)
return val.astimezone(timezone)
if context is None:
context = {}
timezone = pytz.timezone(context.get('tz') or 'UTC')
startdate = pytz.UTC.localize(datetime.strptime(event.start, DEFAULT_SERVER_DATETIME_FORMAT)) # Add "+hh:mm" timezone
if not startdate:
startdate = datetime.now()
## Convert the start date to saved timezone (or context tz) as it'll
## define the correct hour/day asked by the user to repeat for recurrence.
startdate = startdate.astimezone(timezone) # transform "+hh:mm" timezone
rset1 = rrule.rrulestr(str(event.rrule), dtstart=startdate, forceset=True)
ids_depending = self.search(cr, uid, [('recurrent_id', '=', event.id), '|', ('active', '=', False), ('active', '=', True)], context=context)
all_events = self.browse(cr, uid, ids_depending, context=context)
for ev in all_events:
rset1._exdate.append(todate(ev.recurrent_id_date))
return [d.astimezone(pytz.UTC) for d in rset1]
def _get_recurrency_end_date(self, cr, uid, id, context=None):
data = self.read(cr, uid, id, ['final_date', 'recurrency', 'rrule_type', 'count', 'end_type', 'stop'], context=context)
if not data.get('recurrency'):
return False
end_type = data.get('end_type')
final_date = data.get('final_date')
if end_type == 'count' and all(data.get(key) for key in ['count', 'rrule_type', 'stop']):
count = data['count'] + 1
delay, mult = {
'daily': ('days', 1),
'weekly': ('days', 7),
'monthly': ('months', 1),
'yearly': ('years', 1),
}[data['rrule_type']]
deadline = datetime.strptime(data['stop'], tools.DEFAULT_SERVER_DATETIME_FORMAT)
return deadline + relativedelta(**{delay: count * mult})
return final_date
def _find_my_attendee(self, cr, uid, meeting_ids, context=None):
"""
Return the first attendee where the user connected has been invited from all the meeting_ids in parameters
"""
user = self.pool['res.users'].browse(cr, uid, uid, context=context)
for meeting_id in meeting_ids:
for attendee in self.browse(cr, uid, meeting_id, context).attendee_ids:
if user.partner_id.id == attendee.partner_id.id:
return attendee
return False
def get_date_formats(self, cr, uid, context):
lang = context.get("lang")
res_lang = self.pool.get('res.lang')
lang_params = {}
if lang:
ids = res_lang.search(request.cr, uid, [("code", "=", lang)])
if ids:
lang_params = res_lang.read(request.cr, uid, ids[0], ["date_format", "time_format"])
# formats will be used for str{f,p}time() which do not support unicode in Python 2, coerce to str
format_date = lang_params.get("date_format", '%B-%d-%Y').encode('utf-8')
format_time = lang_params.get("time_format", '%I-%M %p').encode('utf-8')
return (format_date, format_time)
def get_display_time_tz(self, cr, uid, ids, tz=False, context=None):
context = dict(context or {})
if tz:
context["tz"] = tz
ev = self.browse(cr, uid, ids, context=context)[0]
return self._get_display_time(cr, uid, ev.start, ev.stop, ev.duration, ev.allday, context=context)
def _get_display_time(self, cr, uid, start, stop, zduration, zallday, context=None):
"""
Return date and time (from to from) based on duration with timezone in string :
eg.
1) if user add duration for 2 hours, return : August-23-2013 at (04-30 To 06-30) (Europe/Brussels)
2) if event all day ,return : AllDay, July-31-2013
"""
context = dict(context or {})
tz = context.get('tz', False)
if not tz: # tz can have a value False, so dont do it in the default value of get !
context['tz'] = self.pool.get('res.users').read(cr, SUPERUSER_ID, uid, ['tz'])['tz']
tz = context['tz']
tz = tools.ustr(tz).encode('utf-8') # make safe for str{p,f}time()
format_date, format_time = self.get_date_formats(cr, uid, context=context)
date = fields.datetime.context_timestamp(cr, uid, datetime.strptime(start, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
date_deadline = fields.datetime.context_timestamp(cr, uid, datetime.strptime(stop, tools.DEFAULT_SERVER_DATETIME_FORMAT), context=context)
event_date = date.strftime(format_date)
display_time = date.strftime(format_time)
if zallday:
time = _("AllDay , %s") % (event_date)
elif zduration < 24:
duration = date + timedelta(hours=zduration)
time = ("%s at (%s To %s) (%s)") % (event_date, display_time, duration.strftime(format_time), tz)
else:
time = ("%s at %s To\n %s at %s (%s)") % (event_date, display_time, date_deadline.strftime(format_date), date_deadline.strftime(format_time), tz)
return time
def _compute(self, cr, uid, ids, fields, arg, context=None):
res = {}
if not isinstance(fields, list):
fields = [fields]
for meeting in self.browse(cr, uid, ids, context=context):
meeting_data = {}
res[meeting.id] = meeting_data
attendee = self._find_my_attendee(cr, uid, [meeting.id], context)
for field in fields:
if field == 'is_attendee':
meeting_data[field] = bool(attendee)
elif field == 'attendee_status':
meeting_data[field] = attendee.state if attendee else 'needsAction'
elif field == 'display_time':
meeting_data[field] = self._get_display_time(cr, uid, meeting.start, meeting.stop, meeting.duration, meeting.allday, context=context)
elif field == "display_start":
meeting_data[field] = meeting.start_date if meeting.allday else meeting.start_datetime
elif field == 'start':
meeting_data[field] = meeting.start_date if meeting.allday else meeting.start_datetime
elif field == 'stop':
meeting_data[field] = meeting.stop_date if meeting.allday else meeting.stop_datetime
return res
def _get_rulestring(self, cr, uid, ids, name, arg, context=None):
"""
Gets Recurrence rule string according to value type RECUR of iCalendar from the values given.
@return: dictionary of rrule value.
"""
result = {}
if not isinstance(ids, list):
ids = [ids]
#read these fields as SUPERUSER because if the record is private a normal search could raise an error
events = self.read(cr, SUPERUSER_ID, ids,
['id', 'byday', 'recurrency', 'final_date', 'rrule_type', 'month_by',
'interval', 'count', 'end_type', 'mo', 'tu', 'we', 'th', 'fr', 'sa',
'su', 'day', 'week_list'], context=context)
for event in events:
if event['recurrency']:
result[event['id']] = self.compute_rule_string(event)
else:
result[event['id']] = ''
return result
# retro compatibility function
def _rrule_write(self, cr, uid, ids, field_name, field_value, args, context=None):
return self._set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=context)
def _set_rulestring(self, cr, uid, ids, field_name, field_value, args, context=None):
if not isinstance(ids, list):
ids = [ids]
data = self._get_empty_rrule_data()
if field_value:
data['recurrency'] = True
for event in self.browse(cr, uid, ids, context=context):
rdate = event.start
update_data = self._parse_rrule(field_value, dict(data), rdate)
data.update(update_data)
self.write(cr, uid, ids, data, context=context)
return True
def _set_date(self, cr, uid, values, id=False, context=None):
if context is None:
context = {}
if values.get('start_datetime') or values.get('start_date') or values.get('start') \
or values.get('stop_datetime') or values.get('stop_date') or values.get('stop'):
allday = values.get("allday", None)
event = self.browse(cr, uid, id, context=context)
if allday is None:
if id:
allday = event.allday
else:
allday = False
_logger.warning("Calendar - All day is not specified, arbitrarily set to False")
#raise osv.except_osv(_('Error!'), ("Need to know if it's an allday or not..."))
key = "date" if allday else "datetime"
notkey = "datetime" if allday else "date"
for fld in ('start', 'stop'):
if values.get('%s_%s' % (fld, key)) or values.get(fld):
values['%s_%s' % (fld, key)] = values.get('%s_%s' % (fld, key)) or values.get(fld)
values['%s_%s' % (fld, notkey)] = None
if fld not in values.keys():
values[fld] = values['%s_%s' % (fld, key)]
diff = False
if allday and (values.get('stop_date') or values.get('start_date')):
stop_date = values.get('stop_date') or event.stop_date
start_date = values.get('start_date') or event.start_date
if stop_date and start_date:
diff = openerp.fields.Date.from_string(stop_date) - openerp.fields.Date.from_string(start_date)
elif values.get('stop_datetime') or values.get('start_datetime'):
stop_datetime = values.get('stop_datetime') or event.stop_datetime
start_datetime = values.get('start_datetime') or event.start_datetime
if stop_datetime and start_datetime:
diff = openerp.fields.Datetime.from_string(stop_datetime) - openerp.fields.Datetime.from_string(start_datetime)
if diff:
duration = float(diff.days) * 24 + (float(diff.seconds) / 3600)
values['duration'] = round(duration, 2)
_track = {
'location': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
'start': {
'calendar.subtype_invitation': lambda self, cr, uid, obj, ctx=None: True,
},
}
_columns = {
'id': fields.integer('ID', readonly=True),
'state': fields.selection([('draft', 'Unconfirmed'), ('open', 'Confirmed')], string='Status', readonly=True, track_visibility='onchange'),
'name': fields.char('Meeting Subject', required=True, states={'done': [('readonly', True)]}),
'is_attendee': fields.function(_compute, string='Attendee', type="boolean", multi='attendee'),
'attendee_status': fields.function(_compute, string='Attendee Status', type="selection", selection=calendar_attendee.STATE_SELECTION, multi='attendee'),
'display_time': fields.function(_compute, string='Event Time', type="char", multi='attendee'),
'display_start': fields.function(_compute, string='Date', type="char", multi='attendee', store=True),
'allday': fields.boolean('All Day', states={'done': [('readonly', True)]}),
'start': fields.function(_compute, string='Calculated start', type="datetime", multi='attendee', store=True, required=True),
'stop': fields.function(_compute, string='Calculated stop', type="datetime", multi='attendee', store=True, required=True),
'start_date': fields.date('Start Date', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'start_datetime': fields.datetime('Start DateTime', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'stop_date': fields.date('End Date', states={'done': [('readonly', True)]}, track_visibility='onchange'),
'stop_datetime': fields.datetime('End Datetime', states={'done': [('readonly', True)]}, track_visibility='onchange'), # old date_deadline
'duration': fields.float('Duration', states={'done': [('readonly', True)]}),
'description': fields.text('Description', states={'done': [('readonly', True)]}),
'class': fields.selection([('public', 'Public'), ('private', 'Private'), ('confidential', 'Public for Employees')], 'Privacy', states={'done': [('readonly', True)]}),
'location': fields.char('Location', help="Location of Event", track_visibility='onchange', states={'done': [('readonly', True)]}),
'show_as': fields.selection([('free', 'Free'), ('busy', 'Busy')], 'Show Time as', states={'done': [('readonly', True)]}),
# RECURRENCE FIELD
'rrule': fields.function(_get_rulestring, type='char', fnct_inv=_set_rulestring, store=True, string='Recurrent Rule'),
'rrule_type': fields.selection([('daily', 'Day(s)'), ('weekly', 'Week(s)'), ('monthly', 'Month(s)'), ('yearly', 'Year(s)')], 'Recurrency', states={'done': [('readonly', True)]}, help="Let the event automatically repeat at that interval"),
'recurrency': fields.boolean('Recurrent', help="Recurrent Meeting"),
'recurrent_id': fields.integer('Recurrent ID'),
'recurrent_id_date': fields.datetime('Recurrent ID date'),
'end_type': fields.selection([('count', 'Number of repetitions'), ('end_date', 'End date')], 'Recurrence Termination'),
'interval': fields.integer('Repeat Every', help="Repeat every (Days/Week/Month/Year)"),
'count': fields.integer('Repeat', help="Repeat x times"),
'mo': fields.boolean('Mon'),
'tu': fields.boolean('Tue'),
'we': fields.boolean('Wed'),
'th': fields.boolean('Thu'),
'fr': fields.boolean('Fri'),
'sa': fields.boolean('Sat'),
'su': fields.boolean('Sun'),
'month_by': fields.selection([('date', 'Date of month'), ('day', 'Day of month')], 'Option', oldname='select1'),
'day': fields.integer('Date of month'),
'week_list': fields.selection([('MO', 'Monday'), ('TU', 'Tuesday'), ('WE', 'Wednesday'), ('TH', 'Thursday'), ('FR', 'Friday'), ('SA', 'Saturday'), ('SU', 'Sunday')], 'Weekday'),
'byday': fields.selection([('1', 'First'), ('2', 'Second'), ('3', 'Third'), ('4', 'Fourth'), ('5', 'Fifth'), ('-1', 'Last')], 'By day'),
'final_date': fields.date('Repeat Until'), # The last event of a recurrence
'user_id': fields.many2one('res.users', 'Responsible', states={'done': [('readonly', True)]}),
'color_partner_id': fields.related('user_id', 'partner_id', 'id', type="integer", string="colorize", store=False), # Color of creator
'active': fields.boolean('Active', help="If the active field is set to false, it will allow you to hide the event alarm information without removing it."),
'categ_ids': fields.many2many('calendar.event.type', 'meeting_category_rel', 'event_id', 'type_id', 'Tags'),
'attendee_ids': fields.one2many('calendar.attendee', 'event_id', 'Attendees', ondelete='cascade'),
'partner_ids': fields.many2many('res.partner', 'calendar_event_res_partner_rel', string='Attendees', states={'done': [('readonly', True)]}),
'alarm_ids': fields.many2many('calendar.alarm', 'calendar_alarm_calendar_event_rel', string='Reminders', ondelete="restrict", copy=False),
}
def _get_default_partners(self, cr, uid, ctx=None):
ret = [self.pool['res.users'].browse(cr, uid, uid, context=ctx).partner_id.id]
active_id = ctx.get('active_id')
if ctx.get('active_model') == 'res.partner' and active_id:
if active_id not in ret:
ret.append(active_id)
return ret
_defaults = {
'end_type': 'count',
'count': 1,
'rrule_type': False,
'allday': False,
'state': 'draft',
'class': 'public',
'show_as': 'busy',
'month_by': 'date',
'interval': 1,
'active': 1,
'user_id': lambda self, cr, uid, ctx: uid,
'partner_ids': _get_default_partners,
}
def _check_closing_date(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context=context):
if event.start_datetime and event.stop_datetime < event.start_datetime:
return False
if event.start_date and event.stop_date < event.start_date:
return False
return True
_constraints = [
(_check_closing_date, 'Error ! End date cannot be set before start date.', ['start_datetime', 'stop_datetime', 'start_date', 'stop_date'])
]
def onchange_allday(self, cr, uid, ids, start=False, end=False, starttime=False, endtime=False, startdatetime=False, enddatetime=False, checkallday=False, context=None):
value = {}
if not ((starttime and endtime) or (start and end)): # At first intialize, we have not datetime
return value
if checkallday: # from datetime to date
startdatetime = startdatetime or start
if startdatetime:
start = datetime.strptime(startdatetime, DEFAULT_SERVER_DATETIME_FORMAT)
value['start_date'] = datetime.strftime(start, DEFAULT_SERVER_DATE_FORMAT)
enddatetime = enddatetime or end
if enddatetime:
end = datetime.strptime(enddatetime, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_date'] = datetime.strftime(end, DEFAULT_SERVER_DATE_FORMAT)
else: # from date to datetime
user = self.pool['res.users'].browse(cr, uid, uid, context)
tz = pytz.timezone(user.tz) if user.tz else pytz.utc
if starttime:
start = openerp.fields.Datetime.from_string(starttime)
startdate = tz.localize(start) # Add "+hh:mm" timezone
startdate = startdate.replace(hour=8) # Set 8 AM in localtime
startdate = startdate.astimezone(pytz.utc) # Convert to UTC
value['start_datetime'] = datetime.strftime(startdate, DEFAULT_SERVER_DATETIME_FORMAT)
elif start:
value['start_datetime'] = start
if endtime:
end = datetime.strptime(endtime.split(' ')[0], DEFAULT_SERVER_DATE_FORMAT)
enddate = tz.localize(end).replace(hour=18).astimezone(pytz.utc)
value['stop_datetime'] = datetime.strftime(enddate, DEFAULT_SERVER_DATETIME_FORMAT)
elif end:
value['stop_datetime'] = end
return {'value': value}
def onchange_dates(self, cr, uid, ids, fromtype, start=False, end=False, checkallday=False, allday=False, context=None):
"""Returns duration and end date based on values passed
@param ids: List of calendar event's IDs.
"""
value = {}
if checkallday != allday:
return value
value['allday'] = checkallday # Force to be rewrited
if allday:
if fromtype == 'start' and start:
start = datetime.strptime(start, DEFAULT_SERVER_DATE_FORMAT)
value['start_datetime'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
value['start'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
if fromtype == 'stop' and end:
end = datetime.strptime(end, DEFAULT_SERVER_DATE_FORMAT)
value['stop_datetime'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
else:
if fromtype == 'start' and start:
start = datetime.strptime(start, DEFAULT_SERVER_DATETIME_FORMAT)
value['start_date'] = datetime.strftime(start, DEFAULT_SERVER_DATE_FORMAT)
value['start'] = datetime.strftime(start, DEFAULT_SERVER_DATETIME_FORMAT)
if fromtype == 'stop' and end:
end = datetime.strptime(end, DEFAULT_SERVER_DATETIME_FORMAT)
value['stop_date'] = datetime.strftime(end, DEFAULT_SERVER_DATE_FORMAT)
value['stop'] = datetime.strftime(end, DEFAULT_SERVER_DATETIME_FORMAT)
return {'value': value}
def new_invitation_token(self, cr, uid, record, partner_id):
return uuid.uuid4().hex
def create_attendees(self, cr, uid, ids, context=None):
if context is None:
context = {}
user_obj = self.pool['res.users']
current_user = user_obj.browse(cr, uid, uid, context=context)
res = {}
for event in self.browse(cr, uid, ids, context):
attendees = {}
for att in event.attendee_ids:
attendees[att.partner_id.id] = True
new_attendees = []
new_att_partner_ids = []
for partner in event.partner_ids:
if partner.id in attendees:
continue
access_token = self.new_invitation_token(cr, uid, event, partner.id)
values = {
'partner_id': partner.id,
'event_id': event.id,
'access_token': access_token,
'email': partner.email,
}
if partner.id == current_user.partner_id.id:
values['state'] = 'accepted'
att_id = self.pool['calendar.attendee'].create(cr, uid, values, context=context)
new_attendees.append(att_id)
new_att_partner_ids.append(partner.id)
if not current_user.email or current_user.email != partner.email:
mail_from = current_user.email or tools.config.get('email_from', False)
if not context.get('no_email'):
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, att_id, email_from=mail_from, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee %s") % (partner.name,), subtype="calendar.subtype_invitation", context=context)
if new_attendees:
self.write(cr, uid, [event.id], {'attendee_ids': [(4, att) for att in new_attendees]}, context=context)
if new_att_partner_ids:
self.message_subscribe(cr, uid, [event.id], new_att_partner_ids, context=context)
# We remove old attendees who are not in partner_ids now.
all_partner_ids = [part.id for part in event.partner_ids]
all_part_attendee_ids = [att.partner_id.id for att in event.attendee_ids]
all_attendee_ids = [att.id for att in event.attendee_ids]
partner_ids_to_remove = map(lambda x: x, set(all_part_attendee_ids + new_att_partner_ids) - set(all_partner_ids))
attendee_ids_to_remove = []
if partner_ids_to_remove:
attendee_ids_to_remove = self.pool["calendar.attendee"].search(cr, uid, [('partner_id.id', 'in', partner_ids_to_remove), ('event_id.id', '=', event.id)], context=context)
if attendee_ids_to_remove:
self.pool['calendar.attendee'].unlink(cr, uid, attendee_ids_to_remove, context)
res[event.id] = {
'new_attendee_ids': new_attendees,
'old_attendee_ids': all_attendee_ids,
'removed_attendee_ids': attendee_ids_to_remove
}
return res
def get_search_fields(self, browse_event, order_fields, r_date=None):
sort_fields = {}
for ord in order_fields:
if ord == 'id' and r_date:
sort_fields[ord] = '%s-%s' % (browse_event[ord], r_date.strftime("%Y%m%d%H%M%S"))
else:
sort_fields[ord] = browse_event[ord]
if type(browse_event[ord]) is openerp.osv.orm.browse_record:
name_get = browse_event[ord].name_get()
if len(name_get) and len(name_get[0]) >= 2:
sort_fields[ord] = name_get[0][1]
if r_date:
sort_fields['sort_start'] = r_date.strftime("%Y%m%d%H%M%S")
else:
sort_fields['sort_start'] = browse_event['display_start'].replace(' ', '').replace('-', '')
return sort_fields
def get_recurrent_ids(self, cr, uid, event_id, domain, order=None, context=None):
"""Gives virtual event ids for recurring events
This method gives ids of dates that comes between start date and end date of calendar views
@param order: The fields (comma separated, format "FIELD {DESC|ASC}") on which the events should be sorted
"""
if not context:
context = {}
if isinstance(event_id, (basestring, int, long)):
ids_to_browse = [event_id] # keep select for return
else:
ids_to_browse = event_id
if order:
order_fields = [field.split()[0] for field in order.split(',')]
else:
# fallback on self._order defined on the model
order_fields = [field.split()[0] for field in self._order.split(',')]
if 'id' not in order_fields:
order_fields.append('id')
result_data = []
result = []
for ev in self.browse(cr, uid, ids_to_browse, context=context):
if not ev.recurrency or not ev.rrule:
result.append(ev.id)
result_data.append(self.get_search_fields(ev, order_fields))
continue
rdates = self.get_recurrent_date_by_event(cr, uid, ev, context=context)
for r_date in rdates:
# fix domain evaluation
# step 1: check date and replace expression by True or False, replace other expressions by True
# step 2: evaluation of & and |
# check if there are one False
pile = []
ok = True
for arg in domain:
if str(arg[0]) in ('start', 'stop', 'final_date'):
if (arg[1] == '='):
ok = r_date.strftime('%Y-%m-%d') == arg[2]
if (arg[1] == '>'):
ok = r_date.strftime('%Y-%m-%d') > arg[2]
if (arg[1] == '<'):
ok = r_date.strftime('%Y-%m-%d') < arg[2]
if (arg[1] == '>='):
ok = r_date.strftime('%Y-%m-%d') >= arg[2]
if (arg[1] == '<='):
ok = r_date.strftime('%Y-%m-%d') <= arg[2]
pile.append(ok)
elif str(arg) == str('&') or str(arg) == str('|'):
pile.append(arg)
else:
pile.append(True)
pile.reverse()
new_pile = []
for item in pile:
if not isinstance(item, basestring):
res = item
elif str(item) == str('&'):
first = new_pile.pop()
second = new_pile.pop()
res = first and second
elif str(item) == str('|'):
first = new_pile.pop()
second = new_pile.pop()
res = first or second
new_pile.append(res)
if [True for item in new_pile if not item]:
continue
result_data.append(self.get_search_fields(ev, order_fields, r_date=r_date))
if order_fields:
uniq = lambda it: collections.OrderedDict((id(x), x) for x in it).values()
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
return 0
sort_params = [key.split()[0] if key[-4:].lower() != 'desc' else '-%s' % key.split()[0] for key in (order or self._order).split(',')]
sort_params = uniq([comp if comp not in ['start', 'start_date', 'start_datetime'] else 'sort_start' for comp in sort_params])
sort_params = uniq([comp if comp not in ['-start', '-start_date', '-start_datetime'] else '-sort_start' for comp in sort_params])
comparers = [((itemgetter(col[1:]), -1) if col[0] == '-' else (itemgetter(col), 1)) for col in sort_params]
ids = [r['id'] for r in sorted(result_data, cmp=comparer)]
if isinstance(event_id, (basestring, int, long)):
return ids and ids[0] or False
else:
return ids
def compute_rule_string(self, data):
"""
Compute rule string according to value type RECUR of iCalendar from the values given.
@param self: the object pointer
@param data: dictionary of freq and interval value
@return: string containing recurring rule (empty if no rule)
"""
if data['interval'] and data['interval'] < 0:
raise osv.except_osv(_('warning!'), _('interval cannot be negative.'))
if data['count'] and data['count'] <= 0:
raise osv.except_osv(_('warning!'), _('count cannot be negative or 0.'))
def get_week_string(freq, data):
weekdays = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
if freq == 'weekly':
byday = map(lambda x: x.upper(), filter(lambda x: data.get(x) and x in weekdays, data))
if byday:
return ';BYDAY=' + ','.join(byday)
return ''
def get_month_string(freq, data):
if freq == 'monthly':
if data.get('month_by') == 'date' and (data.get('day') < 1 or data.get('day') > 31):
raise osv.except_osv(_('Error!'), ("Please select a proper day of the month."))
if data.get('month_by') == 'day': # Eg : Second Monday of the month
return ';BYDAY=' + data.get('byday') + data.get('week_list')
elif data.get('month_by') == 'date': # Eg : 16th of the month
return ';BYMONTHDAY=' + str(data.get('day'))
return ''
def get_end_date(data):
if data.get('final_date'):
data['end_date_new'] = ''.join((re.compile('\d')).findall(data.get('final_date'))) + 'T235959Z'
return (data.get('end_type') == 'count' and (';COUNT=' + str(data.get('count'))) or '') +\
((data.get('end_date_new') and data.get('end_type') == 'end_date' and (';UNTIL=' + data.get('end_date_new'))) or '')
freq = data.get('rrule_type', False) # day/week/month/year
res = ''
if freq:
interval_srting = data.get('interval') and (';INTERVAL=' + str(data.get('interval'))) or ''
res = 'FREQ=' + freq.upper() + get_week_string(freq, data) + interval_srting + get_end_date(data) + get_month_string(freq, data)
return res
def _get_empty_rrule_data(self):
return {
'byday': False,
'recurrency': False,
'final_date': False,
'rrule_type': False,
'month_by': False,
'interval': 0,
'count': False,
'end_type': False,
'mo': False,
'tu': False,
'we': False,
'th': False,
'fr': False,
'sa': False,
'su': False,
'day': False,
'week_list': False
}
def _parse_rrule(self, rule, data, date_start):
day_list = ['mo', 'tu', 'we', 'th', 'fr', 'sa', 'su']
rrule_type = ['yearly', 'monthly', 'weekly', 'daily']
r = rrule.rrulestr(rule, dtstart=datetime.strptime(date_start, DEFAULT_SERVER_DATETIME_FORMAT))
if r._freq > 0 and r._freq < 4:
data['rrule_type'] = rrule_type[r._freq]
data['count'] = r._count
data['interval'] = r._interval
data['final_date'] = r._until and r._until.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
#repeat weekly
if r._byweekday:
for i in xrange(0, 7):
if i in r._byweekday:
data[day_list[i]] = True
data['rrule_type'] = 'weekly'
#repeat monthly by nweekday ((weekday, weeknumber), )
if r._bynweekday:
data['week_list'] = day_list[r._bynweekday[0][0]].upper()
data['byday'] = str(r._bynweekday[0][1])
data['month_by'] = 'day'
data['rrule_type'] = 'monthly'
if r._bymonthday:
data['day'] = r._bymonthday[0]
data['month_by'] = 'date'
data['rrule_type'] = 'monthly'
#repeat yearly but for openerp it's monthly, take same information as monthly but interval is 12 times
if r._bymonth:
data['interval'] = data['interval'] * 12
#FIXEME handle forever case
#end of recurrence
#in case of repeat for ever that we do not support right now
if not (data.get('count') or data.get('final_date')):
data['count'] = 100
if data.get('count'):
data['end_type'] = 'count'
else:
data['end_type'] = 'end_date'
return data
def message_get_subscription_data(self, cr, uid, ids, user_pid=None, context=None):
res = {}
for virtual_id in ids:
real_id = calendar_id2real_id(virtual_id)
result = super(calendar_event, self).message_get_subscription_data(cr, uid, [real_id], user_pid=None, context=context)
res[virtual_id] = result[real_id]
return res
def onchange_partner_ids(self, cr, uid, ids, value, context=None):
""" The basic purpose of this method is to check that destination partners
effectively have email addresses. Otherwise a warning is thrown.
:param value: value format: [[6, 0, [3, 4]]]
"""
res = {'value': {}}
if not value or not value[0] or not value[0][0] == 6:
return
res.update(self.check_partners_email(cr, uid, value[0][2], context=context))
return res
def check_partners_email(self, cr, uid, partner_ids, context=None):
""" Verify that selected partner_ids have an email_address defined.
Otherwise throw a warning. """
partner_wo_email_lst = []
for partner in self.pool['res.partner'].browse(cr, uid, partner_ids, context=context):
if not partner.email:
partner_wo_email_lst.append(partner)
if not partner_wo_email_lst:
return {}
warning_msg = _('The following contacts have no email address :')
for partner in partner_wo_email_lst:
warning_msg += '\n- %s' % (partner.name)
return {'warning': {
'title': _('Email addresses not found'),
'message': warning_msg,
}}
# shows events of the day for this user
def _needaction_domain_get(self, cr, uid, context=None):
return [
('stop', '<=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 23:59:59')),
('start', '>=', time.strftime(DEFAULT_SERVER_DATE_FORMAT + ' 00:00:00')),
('user_id', '=', uid),
]
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, **kwargs):
if isinstance(thread_id, basestring):
thread_id = get_real_ids(thread_id)
if context.get('default_date'):
del context['default_date']
return super(calendar_event, self).message_post(cr, uid, thread_id, body=body, subject=subject, type=type, subtype=subtype, parent_id=parent_id, attachments=attachments, context=context, **kwargs)
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
return super(calendar_event, self).message_subscribe(cr, uid, get_real_ids(ids), partner_ids, subtype_ids=subtype_ids, context=context)
def message_unsubscribe(self, cr, uid, ids, partner_ids, context=None):
return super(calendar_event, self).message_unsubscribe(cr, uid, get_real_ids(ids), partner_ids, context=context)
def do_sendmail(self, cr, uid, ids, context=None):
for event in self.browse(cr, uid, ids, context):
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if current_user.email:
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, [att.id for att in event.attendee_ids], email_from=current_user.email, context=context):
self.message_post(cr, uid, event.id, body=_("An invitation email has been sent to attendee(s)"), subtype="calendar.subtype_invitation", context=context)
return
def get_attendee(self, cr, uid, meeting_id, context=None):
# Used for view in controller
invitation = {'meeting': {}, 'attendee': []}
meeting = self.browse(cr, uid, int(meeting_id), context=context)
invitation['meeting'] = {
'event': meeting.name,
'where': meeting.location,
'when': meeting.display_time
}
for attendee in meeting.attendee_ids:
invitation['attendee'].append({'name': attendee.cn, 'status': attendee.state})
return invitation
def get_interval(self, cr, uid, ids, date, interval, tz=None, context=None):
''' Format and localize some dates to be used in email templates
:param string date: date/time to be formatted
:param string interval: Among 'day', 'month', 'dayname' and 'time' indicating the desired formatting
:param string tz: Timezone indicator (optional)
:return unicode: Formatted date or time (as unicode string, to prevent jinja2 crash)
(Function used only in calendar_event_data.xml) '''
date = openerp.fields.Datetime.from_string(date)
if tz:
timezone = pytz.timezone(tz or 'UTC')
date = date.replace(tzinfo=pytz.timezone('UTC')).astimezone(timezone)
if interval == 'day':
# Day number (1-31)
res = unicode(date.day)
elif interval == 'month':
# Localized month name and year
res = babel.dates.format_date(date=date, format='MMMM y', locale=context.get('lang', 'en_US'))
elif interval == 'dayname':
# Localized day name
res = babel.dates.format_date(date=date, format='EEEE', locale=context.get('lang', 'en_US'))
elif interval == 'time':
# Localized time
dummy, format_time = self.get_date_formats(cr, uid, context=context)
res = tools.ustr(date.strftime(format_time + " %Z"))
return res
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('mymeetings', False):
partner_id = self.pool['res.users'].browse(cr, uid, uid, context).partner_id.id
args += [('partner_ids', 'in', [partner_id])]
new_args = []
for arg in args:
new_arg = arg
if arg[0] in ('start_date', 'start_datetime', 'start',) and arg[1] == ">=":
if context.get('virtual_id', True):
new_args += ['|', '&', ('recurrency', '=', 1), ('final_date', arg[1], arg[2])]
elif arg[0] == "id":
new_id = get_real_ids(arg[2])
new_arg = (arg[0], arg[1], new_id)
new_args.append(new_arg)
if not context.get('virtual_id', True):
return super(calendar_event, self).search(cr, uid, new_args, offset=offset, limit=limit, order=order, count=count, context=context)
# offset, limit, order and count must be treated separately as we may need to deal with virtual ids
res = super(calendar_event, self).search(cr, uid, new_args, offset=0, limit=0, order=None, context=context, count=False)
res = self.get_recurrent_ids(cr, uid, res, args, order=order, context=context)
if count:
return len(res)
elif limit:
return res[offset: offset + limit]
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
self._set_date(cr, uid, default, id=default.get('id'), context=context)
return super(calendar_event, self).copy(cr, uid, calendar_id2real_id(id), default, context)
def _detach_one_event(self, cr, uid, id, values=dict(), context=None):
real_event_id = calendar_id2real_id(id)
data = self.read(cr, uid, id, ['allday', 'start', 'stop', 'rrule', 'duration'])
data['start_date' if data['allday'] else 'start_datetime'] = data['start']
data['stop_date' if data['allday'] else 'stop_datetime'] = data['stop']
if data.get('rrule'):
data.update(
values,
recurrent_id=real_event_id,
recurrent_id_date=data.get('start'),
rrule_type=False,
rrule='',
recurrency=False,
final_date=datetime.strptime(data.get('start'), DEFAULT_SERVER_DATETIME_FORMAT if data['allday'] else DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(hours=values.get('duration', False) or data.get('duration'))
)
#do not copy the id
if data.get('id'):
del(data['id'])
new_id = self.copy(cr, uid, real_event_id, default=data, context=context)
return new_id
def open_after_detach_event(self, cr, uid, ids, context=None):
if context is None:
context = {}
new_id = self._detach_one_event(cr, uid, ids[0], context=context)
return {
'type': 'ir.actions.act_window',
'res_model': 'calendar.event',
'view_mode': 'form',
'res_id': new_id,
'target': 'current',
'flags': {'form': {'action_buttons': True, 'options': {'mode': 'edit'}}}
}
def _name_search(self, cr, user, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
for arg in args:
if arg[0] == 'id':
for n, calendar_id in enumerate(arg[2]):
if isinstance(calendar_id, basestring):
arg[2][n] = calendar_id.split('-')[0]
return super(calendar_event, self)._name_search(cr, user, name=name, args=args, operator=operator, context=context, limit=limit, name_get_uid=name_get_uid)
def write(self, cr, uid, ids, values, context=None):
def _only_changes_to_apply_on_real_ids(field_names):
''' return True if changes are only to be made on the real ids'''
for field in field_names:
if field in ['start', 'start_date', 'start_datetime', 'stop', 'stop_date', 'stop_datetime', 'active']:
return True
return False
if not isinstance(ids, (tuple, list)):
ids = [ids]
context = context or {}
self._set_date(cr, uid, values, id=ids[0], context=context)
for one_ids in ids:
if isinstance(one_ids, (basestring, int, long)):
if len(str(one_ids).split('-')) == 1:
ids = [int(one_ids)]
else:
ids = [one_ids]
res = False
new_id = False
# Special write of complex IDS
for event_id in list(ids):
if len(str(event_id).split('-')) == 1:
continue
ids.remove(event_id)
real_event_id = calendar_id2real_id(event_id)
# if we are setting the recurrency flag to False or if we are only changing fields that
# should be only updated on the real ID and not on the virtual (like message_follower_ids):
# then set real ids to be updated.
if not values.get('recurrency', True) or not _only_changes_to_apply_on_real_ids(values.keys()):
ids.append(real_event_id)
continue
else:
data = self.read(cr, uid, event_id, ['start', 'stop', 'rrule', 'duration'])
if data.get('rrule'):
new_id = self._detach_one_event(cr, uid, event_id, values, context=None)
res = super(calendar_event, self).write(cr, uid, [int(event_id) for event_id in ids], values, context=context)
# set end_date for calendar searching
if values.get('recurrency', True) and values.get('end_type', 'count') in ('count', unicode('count')) and \
(values.get('rrule_type') or values.get('count') or values.get('start') or values.get('stop')):
for id in ids:
final_date = self._get_recurrency_end_date(cr, uid, id, context=context)
super(calendar_event, self).write(cr, uid, [id], {'final_date': final_date}, context=context)
attendees_create = False
if values.get('partner_ids', False):
attendees_create = self.create_attendees(cr, uid, ids, context)
if (values.get('start_date') or values.get('start_datetime', False)) and values.get('active', True):
the_id = new_id or (ids and int(ids[0]))
if the_id:
if attendees_create:
attendees_create = attendees_create[the_id]
mail_to_ids = list(set(attendees_create['old_attendee_ids']) - set(attendees_create['removed_attendee_ids']))
else:
mail_to_ids = [att.id for att in self.browse(cr, uid, the_id, context=context).attendee_ids]
if mail_to_ids:
current_user = self.pool['res.users'].browse(cr, uid, uid, context=context)
if self.pool['calendar.attendee']._send_mail_to_attendees(cr, uid, mail_to_ids, template_xmlid='calendar_template_meeting_changedate', email_from=current_user.email, context=context):
self.message_post(cr, uid, the_id, body=_("A email has been send to specify that the date has been changed !"), subtype="calendar.subtype_invitation", context=context)
return res or True and False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
self._set_date(cr, uid, vals, id=False, context=context)
if not 'user_id' in vals: # Else bug with quick_create when we are filter on an other user
vals['user_id'] = uid
res = super(calendar_event, self).create(cr, uid, vals, context=context)
final_date = self._get_recurrency_end_date(cr, uid, res, context=context)
self.write(cr, uid, [res], {'final_date': final_date}, context=context)
self.create_attendees(cr, uid, [res], context=context)
return res
def export_data(self, cr, uid, ids, *args, **kwargs):
""" Override to convert virtual ids to ids """
real_ids = []
for real_id in get_real_ids(ids):
if real_id not in real_ids:
real_ids.append(real_id)
return super(calendar_event, self).export_data(cr, uid, real_ids, *args, **kwargs)
def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True):
context = dict(context or {})
if 'date' in groupby:
raise osv.except_osv(_('Warning!'), _('Group by date is not supported, use the calendar view instead.'))
virtual_id = context.get('virtual_id', True)
context.update({'virtual_id': False})
res = super(calendar_event, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy)
return res
def read(self, cr, uid, ids, fields=None, context=None, load='_classic_read'):
if context is None:
context = {}
fields2 = fields and fields[:] or None
EXTRAFIELDS = ('class', 'user_id', 'duration', 'allday', 'start', 'start_date', 'start_datetime', 'rrule')
for f in EXTRAFIELDS:
if fields and (f not in fields):
fields2.append(f)
if isinstance(ids, (basestring, int, long)):
select = [ids]
else:
select = ids
select = map(lambda x: (x, calendar_id2real_id(x)), select)
result = []
real_data = super(calendar_event, self).read(cr, uid, [real_id for calendar_id, real_id in select], fields=fields2, context=context, load=load)
real_data = dict(zip([x['id'] for x in real_data], real_data))
for calendar_id, real_id in select:
res = real_data[real_id].copy()
ls = calendar_id2real_id(calendar_id, with_date=res and res.get('duration', 0) > 0 and res.get('duration') or 1)
if not isinstance(ls, (basestring, int, long)) and len(ls) >= 2:
res['start'] = ls[1]
res['stop'] = ls[2]
if res['allday']:
res['start_date'] = ls[1]
res['stop_date'] = ls[2]
else:
res['start_datetime'] = ls[1]
res['stop_datetime'] = ls[2]
if 'display_time' in fields:
res['display_time'] = self._get_display_time(cr, uid, ls[1], ls[2], res['duration'], res['allday'], context=context)
res['id'] = calendar_id
result.append(res)
for r in result:
if r['user_id']:
user_id = type(r['user_id']) in (tuple, list) and r['user_id'][0] or r['user_id']
if user_id == uid:
continue
if r['class'] == 'private':
for f in r.keys():
if f not in ('id', 'allday', 'start', 'stop', 'duration', 'user_id', 'state', 'interval', 'count', 'recurrent_id_date', 'rrule'):
if isinstance(r[f], list):
r[f] = []
else:
r[f] = False
if f == 'name':
r[f] = _('Busy')
for r in result:
for k in EXTRAFIELDS:
if (k in r) and (fields and (k not in fields)):
del r[k]
if isinstance(ids, (basestring, int, long)):
return result and result[0] or False
return result
def unlink(self, cr, uid, ids, can_be_deleted=True, context=None):
if not isinstance(ids, list):
ids = [ids]
res = False
ids_to_exclure = []
ids_to_unlink = []
for event_id in ids:
if can_be_deleted and len(str(event_id).split('-')) == 1: # if ID REAL
if self.browse(cr, uid, int(event_id), context).recurrent_id:
ids_to_exclure.append(event_id)
else:
ids_to_unlink.append(int(event_id))
else:
ids_to_exclure.append(event_id)
if ids_to_unlink:
res = super(calendar_event, self).unlink(cr, uid, ids_to_unlink, context=context)
if ids_to_exclure:
for id_to_exclure in ids_to_exclure:
res = self.write(cr, uid, id_to_exclure, {'active': False}, context=context)
return res
class mail_message(osv.Model):
_inherit = "mail.message"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], basestring):
args[index][2] = get_real_ids(args[index][2])
return super(mail_message, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def _find_allowed_model_wise(self, cr, uid, doc_model, doc_dict, context=None):
if context is None:
context = {}
if doc_model == 'calendar.event':
order = context.get('order', self._order)
for virtual_id in self.pool[doc_model].get_recurrent_ids(cr, uid, doc_dict.keys(), [], order=order, context=context):
doc_dict.setdefault(virtual_id, doc_dict[get_real_ids(virtual_id)])
return super(mail_message, self)._find_allowed_model_wise(cr, uid, doc_model, doc_dict, context=context)
class ir_attachment(osv.Model):
_inherit = "ir.attachment"
def search(self, cr, uid, args, offset=0, limit=0, order=None, context=None, count=False):
'''
convert the search on real ids in the case it was asked on virtual ids, then call super()
'''
for index in range(len(args)):
if args[index][0] == "res_id" and isinstance(args[index][2], basestring):
args[index][2] = get_real_ids(args[index][2])
return super(ir_attachment, self).search(cr, uid, args, offset=offset, limit=limit, order=order, context=context, count=count)
def write(self, cr, uid, ids, vals, context=None):
'''
when posting an attachment (new or not), convert the virtual ids in real ids.
'''
if isinstance(vals.get('res_id'), basestring):
vals['res_id'] = get_real_ids(vals.get('res_id'))
return super(ir_attachment, self).write(cr, uid, ids, vals, context=context)
class ir_http(osv.AbstractModel):
_inherit = 'ir.http'
def _auth_method_calendar(self):
token = request.params['token']
db = request.params['db']
registry = openerp.modules.registry.RegistryManager.get(db)
attendee_pool = registry.get('calendar.attendee')
error_message = False
with registry.cursor() as cr:
attendee_id = attendee_pool.search(cr, openerp.SUPERUSER_ID, [('access_token', '=', token)])
if not attendee_id:
error_message = """Invalid Invitation Token."""
elif request.session.uid and request.session.login != 'anonymous':
# if valid session but user is not match
attendee = attendee_pool.browse(cr, openerp.SUPERUSER_ID, attendee_id[0])
user = registry.get('res.users').browse(cr, openerp.SUPERUSER_ID, request.session.uid)
if attendee.partner_id.id != user.partner_id.id:
error_message = """Invitation cannot be forwarded via email. This event/meeting belongs to %s and you are logged in as %s. Please ask organizer to add you.""" % (attendee.email, user.email)
if error_message:
raise BadRequest(error_message)
return True
class invite_wizard(osv.osv_memory):
_inherit = 'mail.wizard.invite'
def default_get(self, cr, uid, fields, context=None):
'''
in case someone clicked on 'invite others' wizard in the followers widget, transform virtual ids in real ids
'''
if 'default_res_id' in context:
context = dict(context, default_res_id=get_real_ids(context['default_res_id']))
result = super(invite_wizard, self).default_get(cr, uid, fields, context=context)
if 'res_id' in result:
result['res_id'] = get_real_ids(result['res_id'])
return result
|
pbrod/scipy
|
refs/heads/master
|
benchmarks/benchmarks/signal_filtering.py
|
40
|
from __future__ import division, absolute_import, print_function
import numpy as np
try:
from scipy.signal import lfilter, firwin, decimate
except ImportError:
pass
from .common import Benchmark
class Decimate(Benchmark):
param_names = ['q', 'ftype', 'zero_phase']
params = [
[2, 10, 30],
['iir', 'fir'],
[True, False]
]
def setup(self, q, ftype, zero_phase):
np.random.seed(123456)
sample_rate = 10000.
t = np.arange(int(1e6), dtype=np.float64) / sample_rate
self.sig = np.sin(2*np.pi*500*t) + 0.3 * np.sin(2*np.pi*4e3*t)
def time_decimate(self, q, ftype, zero_phase):
decimate(self.sig, q, ftype=ftype, zero_phase=zero_phase)
class Lfilter(Benchmark):
param_names = ['n_samples', 'numtaps']
params = [
[1e3, 50e3, 1e6],
[9, 23, 51]
]
def setup(self, n_samples, numtaps):
np.random.seed(125678)
sample_rate = 25000.
t = np.arange(n_samples, dtype=np.float64) / sample_rate
nyq_rate = sample_rate / 2.
cutoff_hz = 3000.0
self.sig = np.sin(2*np.pi*500*t) + 0.3 * np.sin(2*np.pi*11e3*t)
self.coeff = firwin(numtaps, cutoff_hz/nyq_rate)
def time_lfilter(self, n_samples, numtaps):
lfilter(self.coeff, 1.0, self.sig)
|
vipul-sharma20/oh-mainline
|
refs/heads/master
|
vendor/packages/gdata/src/gdata/tlslite/utils/Python_RSAKey.py
|
239
|
"""Pure-Python RSA implementation."""
from cryptomath import *
import xmltools
from ASN1Parser import ASN1Parser
from RSAKey import *
class Python_RSAKey(RSAKey):
def __init__(self, n=0, e=0, d=0, p=0, q=0, dP=0, dQ=0, qInv=0):
if (n and not e) or (e and not n):
raise AssertionError()
self.n = n
self.e = e
self.d = d
self.p = p
self.q = q
self.dP = dP
self.dQ = dQ
self.qInv = qInv
self.blinder = 0
self.unblinder = 0
def hasPrivateKey(self):
return self.d != 0
def hash(self):
s = self.writeXMLPublicKey('\t\t')
return hashAndBase64(s.strip())
def _rawPrivateKeyOp(self, m):
#Create blinding values, on the first pass:
if not self.blinder:
self.unblinder = getRandomNumber(2, self.n)
self.blinder = powMod(invMod(self.unblinder, self.n), self.e,
self.n)
#Blind the input
m = (m * self.blinder) % self.n
#Perform the RSA operation
c = self._rawPrivateKeyOpHelper(m)
#Unblind the output
c = (c * self.unblinder) % self.n
#Update blinding values
self.blinder = (self.blinder * self.blinder) % self.n
self.unblinder = (self.unblinder * self.unblinder) % self.n
#Return the output
return c
def _rawPrivateKeyOpHelper(self, m):
#Non-CRT version
#c = powMod(m, self.d, self.n)
#CRT version (~3x faster)
s1 = powMod(m, self.dP, self.p)
s2 = powMod(m, self.dQ, self.q)
h = ((s1 - s2) * self.qInv) % self.p
c = s2 + self.q * h
return c
def _rawPublicKeyOp(self, c):
m = powMod(c, self.e, self.n)
return m
def acceptsPassword(self): return False
def write(self, indent=''):
if self.d:
s = indent+'<privateKey xmlns="http://trevp.net/rsa">\n'
else:
s = indent+'<publicKey xmlns="http://trevp.net/rsa">\n'
s += indent+'\t<n>%s</n>\n' % numberToBase64(self.n)
s += indent+'\t<e>%s</e>\n' % numberToBase64(self.e)
if self.d:
s += indent+'\t<d>%s</d>\n' % numberToBase64(self.d)
s += indent+'\t<p>%s</p>\n' % numberToBase64(self.p)
s += indent+'\t<q>%s</q>\n' % numberToBase64(self.q)
s += indent+'\t<dP>%s</dP>\n' % numberToBase64(self.dP)
s += indent+'\t<dQ>%s</dQ>\n' % numberToBase64(self.dQ)
s += indent+'\t<qInv>%s</qInv>\n' % numberToBase64(self.qInv)
s += indent+'</privateKey>'
else:
s += indent+'</publicKey>'
#Only add \n if part of a larger structure
if indent != '':
s += '\n'
return s
def writeXMLPublicKey(self, indent=''):
return Python_RSAKey(self.n, self.e).write(indent)
def generate(bits):
key = Python_RSAKey()
p = getRandomPrime(bits/2, False)
q = getRandomPrime(bits/2, False)
t = lcm(p-1, q-1)
key.n = p * q
key.e = 3L #Needed to be long, for Java
key.d = invMod(key.e, t)
key.p = p
key.q = q
key.dP = key.d % (p-1)
key.dQ = key.d % (q-1)
key.qInv = invMod(q, p)
return key
generate = staticmethod(generate)
def parsePEM(s, passwordCallback=None):
"""Parse a string containing a <privateKey> or <publicKey>, or
PEM-encoded key."""
start = s.find("-----BEGIN PRIVATE KEY-----")
if start != -1:
end = s.find("-----END PRIVATE KEY-----")
if end == -1:
raise SyntaxError("Missing PEM Postfix")
s = s[start+len("-----BEGIN PRIVATE KEY -----") : end]
bytes = base64ToBytes(s)
return Python_RSAKey._parsePKCS8(bytes)
else:
start = s.find("-----BEGIN RSA PRIVATE KEY-----")
if start != -1:
end = s.find("-----END RSA PRIVATE KEY-----")
if end == -1:
raise SyntaxError("Missing PEM Postfix")
s = s[start+len("-----BEGIN RSA PRIVATE KEY -----") : end]
bytes = base64ToBytes(s)
return Python_RSAKey._parseSSLeay(bytes)
raise SyntaxError("Missing PEM Prefix")
parsePEM = staticmethod(parsePEM)
def parseXML(s):
element = xmltools.parseAndStripWhitespace(s)
return Python_RSAKey._parseXML(element)
parseXML = staticmethod(parseXML)
def _parsePKCS8(bytes):
p = ASN1Parser(bytes)
version = p.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized PKCS8 version")
rsaOID = p.getChild(1).value
if list(rsaOID) != [6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0]:
raise SyntaxError("Unrecognized AlgorithmIdentifier")
#Get the privateKey
privateKeyP = p.getChild(2)
#Adjust for OCTET STRING encapsulation
privateKeyP = ASN1Parser(privateKeyP.value)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parsePKCS8 = staticmethod(_parsePKCS8)
def _parseSSLeay(bytes):
privateKeyP = ASN1Parser(bytes)
return Python_RSAKey._parseASN1PrivateKey(privateKeyP)
_parseSSLeay = staticmethod(_parseSSLeay)
def _parseASN1PrivateKey(privateKeyP):
version = privateKeyP.getChild(0).value[0]
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = bytesToNumber(privateKeyP.getChild(1).value)
e = bytesToNumber(privateKeyP.getChild(2).value)
d = bytesToNumber(privateKeyP.getChild(3).value)
p = bytesToNumber(privateKeyP.getChild(4).value)
q = bytesToNumber(privateKeyP.getChild(5).value)
dP = bytesToNumber(privateKeyP.getChild(6).value)
dQ = bytesToNumber(privateKeyP.getChild(7).value)
qInv = bytesToNumber(privateKeyP.getChild(8).value)
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
_parseASN1PrivateKey = staticmethod(_parseASN1PrivateKey)
def _parseXML(element):
try:
xmltools.checkName(element, "privateKey")
except SyntaxError:
xmltools.checkName(element, "publicKey")
#Parse attributes
xmltools.getReqAttribute(element, "xmlns", "http://trevp.net/rsa\Z")
xmltools.checkNoMoreAttributes(element)
#Parse public values (<n> and <e>)
n = base64ToNumber(xmltools.getText(xmltools.getChild(element, 0, "n"), xmltools.base64RegEx))
e = base64ToNumber(xmltools.getText(xmltools.getChild(element, 1, "e"), xmltools.base64RegEx))
d = 0
p = 0
q = 0
dP = 0
dQ = 0
qInv = 0
#Parse private values, if present
if element.childNodes.length>=3:
d = base64ToNumber(xmltools.getText(xmltools.getChild(element, 2, "d"), xmltools.base64RegEx))
p = base64ToNumber(xmltools.getText(xmltools.getChild(element, 3, "p"), xmltools.base64RegEx))
q = base64ToNumber(xmltools.getText(xmltools.getChild(element, 4, "q"), xmltools.base64RegEx))
dP = base64ToNumber(xmltools.getText(xmltools.getChild(element, 5, "dP"), xmltools.base64RegEx))
dQ = base64ToNumber(xmltools.getText(xmltools.getChild(element, 6, "dQ"), xmltools.base64RegEx))
qInv = base64ToNumber(xmltools.getText(xmltools.getLastChild(element, 7, "qInv"), xmltools.base64RegEx))
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
_parseXML = staticmethod(_parseXML)
|
botswana-harvard/dmis-models
|
refs/heads/master
|
manage.py
|
1
|
#!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dmis_models.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
mffrench/fabric
|
refs/heads/v1.0.0-alpha2-elixir
|
test/feature/steps/endorser_impl.py
|
1
|
# Copyright IBM Corp. 2016 All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from behave import *
import json
@when(u'a user deploys chaincode at path {path} with {args} with name {name}')
def deploy_impl(context, path, args, name):
pass
@when(u'a user deploys chaincode at path {path} with {args}')
def step_impl(context, path, args):
deploy_impl(context, path, json.loads(args), "mycc")
@when(u'a user deploys chaincode')
def step_impl(context):
deploy_impl(context,
"github.com/hyperledger/fabric//chaincode_example02",
["init", "a", "100" , "b", "200"],
"mycc")
@when(u'a user queries on the chaincode named {name}')
def query_impl(context, name):
pass
@when(u'a user queries on the chaincode')
def step_impl(context):
query_impl(context, "mycc")
@when(u'a user invokes {count} times on the chaincode named {name} with args {args}')
def invokes_impl(context, count, name, args):
pass
@when(u'a user invokes on the chaincode named {name} with args {args}')
def step_impl(context, name, args):
invokes_impl(context, 1, name, json.loads(args))
@when(u'a user invokes {count} times on the chaincode')
def step_impl(context, count):
invokes_impl(context, count, "mycc", ["txId1", "invoke", "a", 5])
@when(u'a user invokes on the chaincode named {name}')
def step_impl(context, name):
invokes_impl(context, 1, name, ["txId1", "invoke", "a", 5])
@when(u'a user invokes on the chaincode')
def step_impl(context):
invokes_impl(context, 1, "mycc", ["txId1", "invoke", "a", 5])
@then(u'the chaincode is deployed')
def step_impl(context):
pass
@then(u'a user receives expected response')
def step_impl(context):
pass
|
caramucho/ns3-ndnsim
|
refs/heads/mybranch
|
src/propagation/bindings/modulegen__gcc_ILP32.py
|
24
|
from pybindgen import Module, FileCodeSink, param, retval, cppclass, typehandlers
import pybindgen.settings
import warnings
class ErrorHandler(pybindgen.settings.ErrorHandler):
def handle_error(self, wrapper, exception, traceback_):
warnings.warn("exception %r in wrapper %s" % (exception, wrapper))
return True
pybindgen.settings.error_handler = ErrorHandler()
import sys
def module_init():
root_module = Module('ns.propagation', cpp_namespace='::ns3')
return root_module
def register_types(module):
root_module = module.get_root()
## propagation-environment.h (module 'propagation'): ns3::CitySize [enumeration]
module.add_enum('CitySize', ['SmallCity', 'MediumCity', 'LargeCity'])
## propagation-environment.h (module 'propagation'): ns3::EnvironmentType [enumeration]
module.add_enum('EnvironmentType', ['UrbanEnvironment', 'SubUrbanEnvironment', 'OpenAreasEnvironment'])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList [class]
module.add_class('AttributeConstructionList', import_from_module='ns.core')
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item [struct]
module.add_class('Item', import_from_module='ns.core', outer_class=root_module['ns3::AttributeConstructionList'])
## callback.h (module 'core'): ns3::CallbackBase [class]
module.add_class('CallbackBase', import_from_module='ns.core')
## hash.h (module 'core'): ns3::Hasher [class]
module.add_class('Hasher', import_from_module='ns.core')
## object-base.h (module 'core'): ns3::ObjectBase [class]
module.add_class('ObjectBase', allow_subclassing=True, import_from_module='ns.core')
## object.h (module 'core'): ns3::ObjectDeleter [struct]
module.add_class('ObjectDeleter', import_from_module='ns.core')
## propagation-cache.h (module 'propagation'): ns3::PropagationCache<ns3::JakesProcess> [class]
module.add_class('PropagationCache', template_parameters=['ns3::JakesProcess'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Object', 'ns3::ObjectBase', 'ns3::ObjectDeleter'], parent=root_module['ns3::ObjectBase'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## nstime.h (module 'core'): ns3::TimeWithUnit [class]
module.add_class('TimeWithUnit', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId [class]
module.add_class('TypeId', import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeFlag [enumeration]
module.add_enum('AttributeFlag', ['ATTR_GET', 'ATTR_SET', 'ATTR_CONSTRUCT', 'ATTR_SGC'], outer_class=root_module['ns3::TypeId'], import_from_module='ns.core')
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation [struct]
module.add_class('AttributeInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation [struct]
module.add_class('TraceSourceInformation', import_from_module='ns.core', outer_class=root_module['ns3::TypeId'])
## vector.h (module 'core'): ns3::Vector2D [class]
module.add_class('Vector2D', import_from_module='ns.core')
## vector.h (module 'core'): ns3::Vector3D [class]
module.add_class('Vector3D', import_from_module='ns.core')
## empty.h (module 'core'): ns3::empty [class]
module.add_class('empty', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t [class]
module.add_class('int64x64_t', import_from_module='ns.core')
## int64x64-double.h (module 'core'): ns3::int64x64_t::impl_type [enumeration]
module.add_enum('impl_type', ['int128_impl', 'cairo_impl', 'ld_impl'], outer_class=root_module['ns3::int64x64_t'], import_from_module='ns.core')
## object.h (module 'core'): ns3::Object [class]
module.add_class('Object', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
## object.h (module 'core'): ns3::Object::AggregateIterator [class]
module.add_class('AggregateIterator', import_from_module='ns.core', outer_class=root_module['ns3::Object'])
## propagation-delay-model.h (module 'propagation'): ns3::PropagationDelayModel [class]
module.add_class('PropagationDelayModel', parent=root_module['ns3::Object'])
## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel [class]
module.add_class('PropagationLossModel', parent=root_module['ns3::Object'])
## propagation-delay-model.h (module 'propagation'): ns3::RandomPropagationDelayModel [class]
module.add_class('RandomPropagationDelayModel', parent=root_module['ns3::PropagationDelayModel'])
## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel [class]
module.add_class('RandomPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream [class]
module.add_class('RandomVariableStream', import_from_module='ns.core', parent=root_module['ns3::Object'])
## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel [class]
module.add_class('RangePropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable [class]
module.add_class('SequentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeChecker', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeChecker>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::AttributeValue', 'ns3::empty', 'ns3::DefaultDeleter<ns3::AttributeValue>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::CallbackImplBase', 'ns3::empty', 'ns3::DefaultDeleter<ns3::CallbackImplBase>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::Hash::Implementation', 'ns3::empty', 'ns3::DefaultDeleter<ns3::Hash::Implementation>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > [class]
module.add_class('SimpleRefCount', automatic_type_narrowing=True, import_from_module='ns.core', template_parameters=['ns3::TraceSourceAccessor', 'ns3::empty', 'ns3::DefaultDeleter<ns3::TraceSourceAccessor>'], parent=root_module['ns3::empty'], memory_policy=cppclass.ReferenceCountingMethodsPolicy(incref_method='Ref', decref_method='Unref', peekref_method='GetReferenceCount'))
## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel [class]
module.add_class('ThreeLogDistancePropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## nstime.h (module 'core'): ns3::Time [class]
module.add_class('Time', import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time::Unit [enumeration]
module.add_enum('Unit', ['Y', 'D', 'H', 'MIN', 'S', 'MS', 'US', 'NS', 'PS', 'FS', 'LAST'], outer_class=root_module['ns3::Time'], import_from_module='ns.core')
## nstime.h (module 'core'): ns3::Time [class]
root_module['ns3::Time'].implicitly_converts_to(root_module['ns3::int64x64_t'])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor [class]
module.add_class('TraceSourceAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable [class]
module.add_class('TriangularRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel [class]
module.add_class('TwoRayGroundPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable [class]
module.add_class('UniformRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable [class]
module.add_class('WeibullRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable [class]
module.add_class('ZetaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable [class]
module.add_class('ZipfRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::AttributeAccessor [class]
module.add_class('AttributeAccessor', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
## attribute.h (module 'core'): ns3::AttributeChecker [class]
module.add_class('AttributeChecker', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
## attribute.h (module 'core'): ns3::AttributeValue [class]
module.add_class('AttributeValue', allow_subclassing=False, automatic_type_narrowing=True, import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
## callback.h (module 'core'): ns3::CallbackChecker [class]
module.add_class('CallbackChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## callback.h (module 'core'): ns3::CallbackImplBase [class]
module.add_class('CallbackImplBase', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
## callback.h (module 'core'): ns3::CallbackValue [class]
module.add_class('CallbackValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable [class]
module.add_class('ConstantRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## propagation-delay-model.h (module 'propagation'): ns3::ConstantSpeedPropagationDelayModel [class]
module.add_class('ConstantSpeedPropagationDelayModel', parent=root_module['ns3::PropagationDelayModel'])
## cost231-propagation-loss-model.h (module 'propagation'): ns3::Cost231PropagationLossModel [class]
module.add_class('Cost231PropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## cost231-propagation-loss-model.h (module 'propagation'): ns3::Cost231PropagationLossModel::Environment [enumeration]
module.add_enum('Environment', ['SubUrban', 'MediumCity', 'Metropolitan'], outer_class=root_module['ns3::Cost231PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable [class]
module.add_class('DeterministicRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable [class]
module.add_class('EmpiricalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## attribute.h (module 'core'): ns3::EmptyAttributeValue [class]
module.add_class('EmptyAttributeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable [class]
module.add_class('ErlangRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable [class]
module.add_class('ExponentialRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel [class]
module.add_class('FixedRssLossModel', parent=root_module['ns3::PropagationLossModel'])
## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel [class]
module.add_class('FriisPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable [class]
module.add_class('GammaRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): ns3::ItuR1411LosPropagationLossModel [class]
module.add_class('ItuR1411LosPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): ns3::ItuR1411NlosOverRooftopPropagationLossModel [class]
module.add_class('ItuR1411NlosOverRooftopPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## jakes-process.h (module 'propagation'): ns3::JakesProcess [class]
module.add_class('JakesProcess', parent=root_module['ns3::Object'])
## jakes-propagation-loss-model.h (module 'propagation'): ns3::JakesPropagationLossModel [class]
module.add_class('JakesPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): ns3::Kun2600MhzPropagationLossModel [class]
module.add_class('Kun2600MhzPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel [class]
module.add_class('LogDistancePropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable [class]
module.add_class('LogNormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel [class]
module.add_class('MatrixPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## mobility-model.h (module 'mobility'): ns3::MobilityModel [class]
module.add_class('MobilityModel', import_from_module='ns.mobility', parent=root_module['ns3::Object'])
## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel [class]
module.add_class('NakagamiPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable [class]
module.add_class('NormalRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## okumura-hata-propagation-loss-model.h (module 'propagation'): ns3::OkumuraHataPropagationLossModel [class]
module.add_class('OkumuraHataPropagationLossModel', parent=root_module['ns3::PropagationLossModel'])
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable [class]
module.add_class('ParetoRandomVariable', import_from_module='ns.core', parent=root_module['ns3::RandomVariableStream'])
## nstime.h (module 'core'): ns3::TimeValue [class]
module.add_class('TimeValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## type-id.h (module 'core'): ns3::TypeIdChecker [class]
module.add_class('TypeIdChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## type-id.h (module 'core'): ns3::TypeIdValue [class]
module.add_class('TypeIdValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector2DChecker [class]
module.add_class('Vector2DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector2DValue [class]
module.add_class('Vector2DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
## vector.h (module 'core'): ns3::Vector3DChecker [class]
module.add_class('Vector3DChecker', import_from_module='ns.core', parent=root_module['ns3::AttributeChecker'])
## vector.h (module 'core'): ns3::Vector3DValue [class]
module.add_class('Vector3DValue', import_from_module='ns.core', parent=root_module['ns3::AttributeValue'])
typehandlers.add_type_alias(u'ns3::Vector3D', u'ns3::Vector')
typehandlers.add_type_alias(u'ns3::Vector3D*', u'ns3::Vector*')
typehandlers.add_type_alias(u'ns3::Vector3D&', u'ns3::Vector&')
module.add_typedef(root_module['ns3::Vector3D'], 'Vector')
typehandlers.add_type_alias(u'ns3::Vector3DValue', u'ns3::VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DValue*', u'ns3::VectorValue*')
typehandlers.add_type_alias(u'ns3::Vector3DValue&', u'ns3::VectorValue&')
module.add_typedef(root_module['ns3::Vector3DValue'], 'VectorValue')
typehandlers.add_type_alias(u'ns3::Vector3DChecker', u'ns3::VectorChecker')
typehandlers.add_type_alias(u'ns3::Vector3DChecker*', u'ns3::VectorChecker*')
typehandlers.add_type_alias(u'ns3::Vector3DChecker&', u'ns3::VectorChecker&')
module.add_typedef(root_module['ns3::Vector3DChecker'], 'VectorChecker')
## Register a nested module for the namespace FatalImpl
nested_module = module.add_cpp_namespace('FatalImpl')
register_types_ns3_FatalImpl(nested_module)
## Register a nested module for the namespace Hash
nested_module = module.add_cpp_namespace('Hash')
register_types_ns3_Hash(nested_module)
def register_types_ns3_FatalImpl(module):
root_module = module.get_root()
def register_types_ns3_Hash(module):
root_module = module.get_root()
## hash-function.h (module 'core'): ns3::Hash::Implementation [class]
module.add_class('Implementation', import_from_module='ns.core', parent=root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash32Function_ptr')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash32Function_ptr*')
typehandlers.add_type_alias(u'uint32_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash32Function_ptr&')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *', u'ns3::Hash::Hash64Function_ptr')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) **', u'ns3::Hash::Hash64Function_ptr*')
typehandlers.add_type_alias(u'uint64_t ( * ) ( char const *, size_t ) *&', u'ns3::Hash::Hash64Function_ptr&')
## Register a nested module for the namespace Function
nested_module = module.add_cpp_namespace('Function')
register_types_ns3_Hash_Function(nested_module)
def register_types_ns3_Hash_Function(module):
root_module = module.get_root()
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a [class]
module.add_class('Fnv1a', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32 [class]
module.add_class('Hash32', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64 [class]
module.add_class('Hash64', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3 [class]
module.add_class('Murmur3', import_from_module='ns.core', parent=root_module['ns3::Hash::Implementation'])
def register_methods(root_module):
register_Ns3AttributeConstructionList_methods(root_module, root_module['ns3::AttributeConstructionList'])
register_Ns3AttributeConstructionListItem_methods(root_module, root_module['ns3::AttributeConstructionList::Item'])
register_Ns3CallbackBase_methods(root_module, root_module['ns3::CallbackBase'])
register_Ns3Hasher_methods(root_module, root_module['ns3::Hasher'])
register_Ns3ObjectBase_methods(root_module, root_module['ns3::ObjectBase'])
register_Ns3ObjectDeleter_methods(root_module, root_module['ns3::ObjectDeleter'])
register_Ns3PropagationCache__Ns3JakesProcess_methods(root_module, root_module['ns3::PropagationCache< ns3::JakesProcess >'])
register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, root_module['ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter >'])
register_Ns3TimeWithUnit_methods(root_module, root_module['ns3::TimeWithUnit'])
register_Ns3TypeId_methods(root_module, root_module['ns3::TypeId'])
register_Ns3TypeIdAttributeInformation_methods(root_module, root_module['ns3::TypeId::AttributeInformation'])
register_Ns3TypeIdTraceSourceInformation_methods(root_module, root_module['ns3::TypeId::TraceSourceInformation'])
register_Ns3Vector2D_methods(root_module, root_module['ns3::Vector2D'])
register_Ns3Vector3D_methods(root_module, root_module['ns3::Vector3D'])
register_Ns3Empty_methods(root_module, root_module['ns3::empty'])
register_Ns3Int64x64_t_methods(root_module, root_module['ns3::int64x64_t'])
register_Ns3Object_methods(root_module, root_module['ns3::Object'])
register_Ns3ObjectAggregateIterator_methods(root_module, root_module['ns3::Object::AggregateIterator'])
register_Ns3PropagationDelayModel_methods(root_module, root_module['ns3::PropagationDelayModel'])
register_Ns3PropagationLossModel_methods(root_module, root_module['ns3::PropagationLossModel'])
register_Ns3RandomPropagationDelayModel_methods(root_module, root_module['ns3::RandomPropagationDelayModel'])
register_Ns3RandomPropagationLossModel_methods(root_module, root_module['ns3::RandomPropagationLossModel'])
register_Ns3RandomVariableStream_methods(root_module, root_module['ns3::RandomVariableStream'])
register_Ns3RangePropagationLossModel_methods(root_module, root_module['ns3::RangePropagationLossModel'])
register_Ns3SequentialRandomVariable_methods(root_module, root_module['ns3::SequentialRandomVariable'])
register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >'])
register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >'])
register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >'])
register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >'])
register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >'])
register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, root_module['ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >'])
register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, root_module['ns3::ThreeLogDistancePropagationLossModel'])
register_Ns3Time_methods(root_module, root_module['ns3::Time'])
register_Ns3TraceSourceAccessor_methods(root_module, root_module['ns3::TraceSourceAccessor'])
register_Ns3TriangularRandomVariable_methods(root_module, root_module['ns3::TriangularRandomVariable'])
register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, root_module['ns3::TwoRayGroundPropagationLossModel'])
register_Ns3UniformRandomVariable_methods(root_module, root_module['ns3::UniformRandomVariable'])
register_Ns3WeibullRandomVariable_methods(root_module, root_module['ns3::WeibullRandomVariable'])
register_Ns3ZetaRandomVariable_methods(root_module, root_module['ns3::ZetaRandomVariable'])
register_Ns3ZipfRandomVariable_methods(root_module, root_module['ns3::ZipfRandomVariable'])
register_Ns3AttributeAccessor_methods(root_module, root_module['ns3::AttributeAccessor'])
register_Ns3AttributeChecker_methods(root_module, root_module['ns3::AttributeChecker'])
register_Ns3AttributeValue_methods(root_module, root_module['ns3::AttributeValue'])
register_Ns3CallbackChecker_methods(root_module, root_module['ns3::CallbackChecker'])
register_Ns3CallbackImplBase_methods(root_module, root_module['ns3::CallbackImplBase'])
register_Ns3CallbackValue_methods(root_module, root_module['ns3::CallbackValue'])
register_Ns3ConstantRandomVariable_methods(root_module, root_module['ns3::ConstantRandomVariable'])
register_Ns3ConstantSpeedPropagationDelayModel_methods(root_module, root_module['ns3::ConstantSpeedPropagationDelayModel'])
register_Ns3Cost231PropagationLossModel_methods(root_module, root_module['ns3::Cost231PropagationLossModel'])
register_Ns3DeterministicRandomVariable_methods(root_module, root_module['ns3::DeterministicRandomVariable'])
register_Ns3EmpiricalRandomVariable_methods(root_module, root_module['ns3::EmpiricalRandomVariable'])
register_Ns3EmptyAttributeValue_methods(root_module, root_module['ns3::EmptyAttributeValue'])
register_Ns3ErlangRandomVariable_methods(root_module, root_module['ns3::ErlangRandomVariable'])
register_Ns3ExponentialRandomVariable_methods(root_module, root_module['ns3::ExponentialRandomVariable'])
register_Ns3FixedRssLossModel_methods(root_module, root_module['ns3::FixedRssLossModel'])
register_Ns3FriisPropagationLossModel_methods(root_module, root_module['ns3::FriisPropagationLossModel'])
register_Ns3GammaRandomVariable_methods(root_module, root_module['ns3::GammaRandomVariable'])
register_Ns3ItuR1411LosPropagationLossModel_methods(root_module, root_module['ns3::ItuR1411LosPropagationLossModel'])
register_Ns3ItuR1411NlosOverRooftopPropagationLossModel_methods(root_module, root_module['ns3::ItuR1411NlosOverRooftopPropagationLossModel'])
register_Ns3JakesProcess_methods(root_module, root_module['ns3::JakesProcess'])
register_Ns3JakesPropagationLossModel_methods(root_module, root_module['ns3::JakesPropagationLossModel'])
register_Ns3Kun2600MhzPropagationLossModel_methods(root_module, root_module['ns3::Kun2600MhzPropagationLossModel'])
register_Ns3LogDistancePropagationLossModel_methods(root_module, root_module['ns3::LogDistancePropagationLossModel'])
register_Ns3LogNormalRandomVariable_methods(root_module, root_module['ns3::LogNormalRandomVariable'])
register_Ns3MatrixPropagationLossModel_methods(root_module, root_module['ns3::MatrixPropagationLossModel'])
register_Ns3MobilityModel_methods(root_module, root_module['ns3::MobilityModel'])
register_Ns3NakagamiPropagationLossModel_methods(root_module, root_module['ns3::NakagamiPropagationLossModel'])
register_Ns3NormalRandomVariable_methods(root_module, root_module['ns3::NormalRandomVariable'])
register_Ns3OkumuraHataPropagationLossModel_methods(root_module, root_module['ns3::OkumuraHataPropagationLossModel'])
register_Ns3ParetoRandomVariable_methods(root_module, root_module['ns3::ParetoRandomVariable'])
register_Ns3TimeValue_methods(root_module, root_module['ns3::TimeValue'])
register_Ns3TypeIdChecker_methods(root_module, root_module['ns3::TypeIdChecker'])
register_Ns3TypeIdValue_methods(root_module, root_module['ns3::TypeIdValue'])
register_Ns3Vector2DChecker_methods(root_module, root_module['ns3::Vector2DChecker'])
register_Ns3Vector2DValue_methods(root_module, root_module['ns3::Vector2DValue'])
register_Ns3Vector3DChecker_methods(root_module, root_module['ns3::Vector3DChecker'])
register_Ns3Vector3DValue_methods(root_module, root_module['ns3::Vector3DValue'])
register_Ns3HashImplementation_methods(root_module, root_module['ns3::Hash::Implementation'])
register_Ns3HashFunctionFnv1a_methods(root_module, root_module['ns3::Hash::Function::Fnv1a'])
register_Ns3HashFunctionHash32_methods(root_module, root_module['ns3::Hash::Function::Hash32'])
register_Ns3HashFunctionHash64_methods(root_module, root_module['ns3::Hash::Function::Hash64'])
register_Ns3HashFunctionMurmur3_methods(root_module, root_module['ns3::Hash::Function::Murmur3'])
return
def register_Ns3AttributeConstructionList_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList(ns3::AttributeConstructionList const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::AttributeConstructionList() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): void ns3::AttributeConstructionList::Add(std::string name, ns3::Ptr<ns3::AttributeChecker const> checker, ns3::Ptr<ns3::AttributeValue> value) [member function]
cls.add_method('Add',
'void',
[param('std::string', 'name'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker'), param('ns3::Ptr< ns3::AttributeValue >', 'value')])
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::Begin() const [member function]
cls.add_method('Begin',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): std::_List_const_iterator<ns3::AttributeConstructionList::Item> ns3::AttributeConstructionList::End() const [member function]
cls.add_method('End',
'std::_List_const_iterator< ns3::AttributeConstructionList::Item >',
[],
is_const=True)
## attribute-construction-list.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeConstructionList::Find(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('Find',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True)
return
def register_Ns3AttributeConstructionListItem_methods(root_module, cls):
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item() [constructor]
cls.add_constructor([])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::Item(ns3::AttributeConstructionList::Item const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeConstructionList::Item const &', 'arg0')])
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## attribute-construction-list.h (module 'core'): ns3::AttributeConstructionList::Item::value [variable]
cls.add_instance_attribute('value', 'ns3::Ptr< ns3::AttributeValue >', is_const=False)
return
def register_Ns3CallbackBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::CallbackBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::Ptr<ns3::CallbackImplBase> ns3::CallbackBase::GetImpl() const [member function]
cls.add_method('GetImpl',
'ns3::Ptr< ns3::CallbackImplBase >',
[],
is_const=True)
## callback.h (module 'core'): ns3::CallbackBase::CallbackBase(ns3::Ptr<ns3::CallbackImplBase> impl) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::CallbackImplBase >', 'impl')],
visibility='protected')
## callback.h (module 'core'): static std::string ns3::CallbackBase::Demangle(std::string const & mangled) [member function]
cls.add_method('Demangle',
'std::string',
[param('std::string const &', 'mangled')],
is_static=True, visibility='protected')
return
def register_Ns3Hasher_methods(root_module, cls):
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Hasher const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hasher const &', 'arg0')])
## hash.h (module 'core'): ns3::Hasher::Hasher() [constructor]
cls.add_constructor([])
## hash.h (module 'core'): ns3::Hasher::Hasher(ns3::Ptr<ns3::Hash::Implementation> hp) [constructor]
cls.add_constructor([param('ns3::Ptr< ns3::Hash::Implementation >', 'hp')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint32_t ns3::Hasher::GetHash32(std::string const s) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('std::string const', 's')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')])
## hash.h (module 'core'): uint64_t ns3::Hasher::GetHash64(std::string const s) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('std::string const', 's')])
## hash.h (module 'core'): ns3::Hasher & ns3::Hasher::clear() [member function]
cls.add_method('clear',
'ns3::Hasher &',
[])
return
def register_Ns3ObjectBase_methods(root_module, cls):
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase() [constructor]
cls.add_constructor([])
## object-base.h (module 'core'): ns3::ObjectBase::ObjectBase(ns3::ObjectBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectBase const &', 'arg0')])
## object-base.h (module 'core'): void ns3::ObjectBase::GetAttribute(std::string name, ns3::AttributeValue & value) const [member function]
cls.add_method('GetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'value')],
is_const=True)
## object-base.h (module 'core'): bool ns3::ObjectBase::GetAttributeFailSafe(std::string name, ns3::AttributeValue & attribute) const [member function]
cls.add_method('GetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue &', 'attribute')],
is_const=True)
## object-base.h (module 'core'): ns3::TypeId ns3::ObjectBase::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## object-base.h (module 'core'): static ns3::TypeId ns3::ObjectBase::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object-base.h (module 'core'): void ns3::ObjectBase::SetAttribute(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttribute',
'void',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::SetAttributeFailSafe(std::string name, ns3::AttributeValue const & value) [member function]
cls.add_method('SetAttributeFailSafe',
'bool',
[param('std::string', 'name'), param('ns3::AttributeValue const &', 'value')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceConnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceConnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnect(std::string name, std::string context, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnect',
'bool',
[param('std::string', 'name'), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): bool ns3::ObjectBase::TraceDisconnectWithoutContext(std::string name, ns3::CallbackBase const & cb) [member function]
cls.add_method('TraceDisconnectWithoutContext',
'bool',
[param('std::string', 'name'), param('ns3::CallbackBase const &', 'cb')])
## object-base.h (module 'core'): void ns3::ObjectBase::ConstructSelf(ns3::AttributeConstructionList const & attributes) [member function]
cls.add_method('ConstructSelf',
'void',
[param('ns3::AttributeConstructionList const &', 'attributes')],
visibility='protected')
## object-base.h (module 'core'): void ns3::ObjectBase::NotifyConstructionCompleted() [member function]
cls.add_method('NotifyConstructionCompleted',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectDeleter_methods(root_module, cls):
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter() [constructor]
cls.add_constructor([])
## object.h (module 'core'): ns3::ObjectDeleter::ObjectDeleter(ns3::ObjectDeleter const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ObjectDeleter const &', 'arg0')])
## object.h (module 'core'): static void ns3::ObjectDeleter::Delete(ns3::Object * object) [member function]
cls.add_method('Delete',
'void',
[param('ns3::Object *', 'object')],
is_static=True)
return
def register_Ns3PropagationCache__Ns3JakesProcess_methods(root_module, cls):
## propagation-cache.h (module 'propagation'): ns3::PropagationCache<ns3::JakesProcess>::PropagationCache(ns3::PropagationCache<ns3::JakesProcess> const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PropagationCache< ns3::JakesProcess > const &', 'arg0')])
## propagation-cache.h (module 'propagation'): ns3::PropagationCache<ns3::JakesProcess>::PropagationCache() [constructor]
cls.add_constructor([])
## propagation-cache.h (module 'propagation'): void ns3::PropagationCache<ns3::JakesProcess>::AddPathData(ns3::Ptr<ns3::JakesProcess> data, ns3::Ptr<ns3::MobilityModel const> a, ns3::Ptr<ns3::MobilityModel const> b, uint32_t modelUid) [member function]
cls.add_method('AddPathData',
'void',
[param('ns3::Ptr< ns3::JakesProcess >', 'data'), param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b'), param('uint32_t', 'modelUid')])
## propagation-cache.h (module 'propagation'): ns3::Ptr<ns3::JakesProcess> ns3::PropagationCache<ns3::JakesProcess>::GetPathData(ns3::Ptr<ns3::MobilityModel const> a, ns3::Ptr<ns3::MobilityModel const> b, uint32_t modelUid) [member function]
cls.add_method('GetPathData',
'ns3::Ptr< ns3::JakesProcess >',
[param('ns3::Ptr< ns3::MobilityModel const >', 'a'), param('ns3::Ptr< ns3::MobilityModel const >', 'b'), param('uint32_t', 'modelUid')])
return
def register_Ns3SimpleRefCount__Ns3Object_Ns3ObjectBase_Ns3ObjectDeleter_methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::SimpleRefCount(ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter> const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Object, ns3::ObjectBase, ns3::ObjectDeleter>::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3TimeWithUnit_methods(root_module, cls):
cls.add_output_stream_operator()
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::TimeWithUnit const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeWithUnit const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeWithUnit::TimeWithUnit(ns3::Time const time, ns3::Time::Unit const unit) [constructor]
cls.add_constructor([param('ns3::Time const', 'time'), param('ns3::Time::Unit const', 'unit')])
return
def register_Ns3TypeId_methods(root_module, cls):
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('!=')
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('==')
## type-id.h (module 'core'): ns3::TypeId::TypeId(char const * name) [constructor]
cls.add_constructor([param('char const *', 'name')])
## type-id.h (module 'core'): ns3::TypeId::TypeId() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TypeId(ns3::TypeId const & o) [copy constructor]
cls.add_constructor([param('ns3::TypeId const &', 'o')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddAttribute(std::string name, std::string help, uint32_t flags, ns3::AttributeValue const & initialValue, ns3::Ptr<ns3::AttributeAccessor const> accessor, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('AddAttribute',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('uint32_t', 'flags'), param('ns3::AttributeValue const &', 'initialValue'), param('ns3::Ptr< ns3::AttributeAccessor const >', 'accessor'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::AddTraceSource(std::string name, std::string help, ns3::Ptr<ns3::TraceSourceAccessor const> accessor) [member function]
cls.add_method('AddTraceSource',
'ns3::TypeId',
[param('std::string', 'name'), param('std::string', 'help'), param('ns3::Ptr< ns3::TraceSourceAccessor const >', 'accessor')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation ns3::TypeId::GetAttribute(uint32_t i) const [member function]
cls.add_method('GetAttribute',
'ns3::TypeId::AttributeInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetAttributeFullName(uint32_t i) const [member function]
cls.add_method('GetAttributeFullName',
'std::string',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetAttributeN() const [member function]
cls.add_method('GetAttributeN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): ns3::Callback<ns3::ObjectBase*,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty,ns3::empty> ns3::TypeId::GetConstructor() const [member function]
cls.add_method('GetConstructor',
'ns3::Callback< ns3::ObjectBase *, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty, ns3::empty >',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetGroupName() const [member function]
cls.add_method('GetGroupName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetHash() const [member function]
cls.add_method('GetHash',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeId::GetName() const [member function]
cls.add_method('GetName',
'std::string',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::GetParent() const [member function]
cls.add_method('GetParent',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::GetRegistered(uint32_t i) [member function]
cls.add_method('GetRegistered',
'ns3::TypeId',
[param('uint32_t', 'i')],
is_static=True)
## type-id.h (module 'core'): static uint32_t ns3::TypeId::GetRegisteredN() [member function]
cls.add_method('GetRegisteredN',
'uint32_t',
[],
is_static=True)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation ns3::TypeId::GetTraceSource(uint32_t i) const [member function]
cls.add_method('GetTraceSource',
'ns3::TypeId::TraceSourceInformation',
[param('uint32_t', 'i')],
is_const=True)
## type-id.h (module 'core'): uint32_t ns3::TypeId::GetTraceSourceN() const [member function]
cls.add_method('GetTraceSourceN',
'uint32_t',
[],
is_const=True)
## type-id.h (module 'core'): uint16_t ns3::TypeId::GetUid() const [member function]
cls.add_method('GetUid',
'uint16_t',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasConstructor() const [member function]
cls.add_method('HasConstructor',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::HasParent() const [member function]
cls.add_method('HasParent',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::HideFromDocumentation() [member function]
cls.add_method('HideFromDocumentation',
'ns3::TypeId',
[])
## type-id.h (module 'core'): bool ns3::TypeId::IsChildOf(ns3::TypeId other) const [member function]
cls.add_method('IsChildOf',
'bool',
[param('ns3::TypeId', 'other')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::LookupAttributeByName(std::string name, ns3::TypeId::AttributeInformation * info) const [member function]
cls.add_method('LookupAttributeByName',
'bool',
[param('std::string', 'name'), param('ns3::TypeId::AttributeInformation *', 'info', transfer_ownership=False)],
is_const=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByHash(uint32_t hash) [member function]
cls.add_method('LookupByHash',
'ns3::TypeId',
[param('uint32_t', 'hash')],
is_static=True)
## type-id.h (module 'core'): static bool ns3::TypeId::LookupByHashFailSafe(uint32_t hash, ns3::TypeId * tid) [member function]
cls.add_method('LookupByHashFailSafe',
'bool',
[param('uint32_t', 'hash'), param('ns3::TypeId *', 'tid')],
is_static=True)
## type-id.h (module 'core'): static ns3::TypeId ns3::TypeId::LookupByName(std::string name) [member function]
cls.add_method('LookupByName',
'ns3::TypeId',
[param('std::string', 'name')],
is_static=True)
## type-id.h (module 'core'): ns3::Ptr<ns3::TraceSourceAccessor const> ns3::TypeId::LookupTraceSourceByName(std::string name) const [member function]
cls.add_method('LookupTraceSourceByName',
'ns3::Ptr< ns3::TraceSourceAccessor const >',
[param('std::string', 'name')],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::MustHideFromDocumentation() const [member function]
cls.add_method('MustHideFromDocumentation',
'bool',
[],
is_const=True)
## type-id.h (module 'core'): bool ns3::TypeId::SetAttributeInitialValue(uint32_t i, ns3::Ptr<ns3::AttributeValue const> initialValue) [member function]
cls.add_method('SetAttributeInitialValue',
'bool',
[param('uint32_t', 'i'), param('ns3::Ptr< ns3::AttributeValue const >', 'initialValue')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetGroupName(std::string groupName) [member function]
cls.add_method('SetGroupName',
'ns3::TypeId',
[param('std::string', 'groupName')])
## type-id.h (module 'core'): ns3::TypeId ns3::TypeId::SetParent(ns3::TypeId tid) [member function]
cls.add_method('SetParent',
'ns3::TypeId',
[param('ns3::TypeId', 'tid')])
## type-id.h (module 'core'): void ns3::TypeId::SetUid(uint16_t tid) [member function]
cls.add_method('SetUid',
'void',
[param('uint16_t', 'tid')])
return
def register_Ns3TypeIdAttributeInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::AttributeInformation(ns3::TypeId::AttributeInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::AttributeInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::AttributeAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::checker [variable]
cls.add_instance_attribute('checker', 'ns3::Ptr< ns3::AttributeChecker const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::flags [variable]
cls.add_instance_attribute('flags', 'uint32_t', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::initialValue [variable]
cls.add_instance_attribute('initialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::AttributeInformation::originalInitialValue [variable]
cls.add_instance_attribute('originalInitialValue', 'ns3::Ptr< ns3::AttributeValue const >', is_const=False)
return
def register_Ns3TypeIdTraceSourceInformation_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::TraceSourceInformation(ns3::TypeId::TraceSourceInformation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeId::TraceSourceInformation const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::accessor [variable]
cls.add_instance_attribute('accessor', 'ns3::Ptr< ns3::TraceSourceAccessor const >', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::help [variable]
cls.add_instance_attribute('help', 'std::string', is_const=False)
## type-id.h (module 'core'): ns3::TypeId::TraceSourceInformation::name [variable]
cls.add_instance_attribute('name', 'std::string', is_const=False)
return
def register_Ns3Vector2D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector2D::Vector2D(ns3::Vector2D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D(double _x, double _y) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y')])
## vector.h (module 'core'): ns3::Vector2D::Vector2D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector2D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
return
def register_Ns3Vector3D_methods(root_module, cls):
cls.add_output_stream_operator()
## vector.h (module 'core'): ns3::Vector3D::Vector3D(ns3::Vector3D const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D(double _x, double _y, double _z) [constructor]
cls.add_constructor([param('double', '_x'), param('double', '_y'), param('double', '_z')])
## vector.h (module 'core'): ns3::Vector3D::Vector3D() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3D::x [variable]
cls.add_instance_attribute('x', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::y [variable]
cls.add_instance_attribute('y', 'double', is_const=False)
## vector.h (module 'core'): ns3::Vector3D::z [variable]
cls.add_instance_attribute('z', 'double', is_const=False)
return
def register_Ns3Empty_methods(root_module, cls):
## empty.h (module 'core'): ns3::empty::empty() [constructor]
cls.add_constructor([])
## empty.h (module 'core'): ns3::empty::empty(ns3::empty const & arg0) [copy constructor]
cls.add_constructor([param('ns3::empty const &', 'arg0')])
return
def register_Ns3Int64x64_t_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_unary_numeric_operator('-')
cls.add_binary_numeric_operator('/', root_module['ns3::int64x64_t'], root_module['ns3::int64x64_t'], param('ns3::int64x64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('*=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('+=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::int64x64_t const &', u'right'))
cls.add_inplace_numeric_operator('/=', param('ns3::int64x64_t const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t() [constructor]
cls.add_constructor([])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long double v) [constructor]
cls.add_constructor([param('long double', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(int64_t hi, uint64_t lo) [constructor]
cls.add_constructor([param('int64_t', 'hi'), param('uint64_t', 'lo')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::int64x64_t(ns3::int64x64_t const & o) [copy constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): double ns3::int64x64_t::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## int64x64-double.h (module 'core'): int64_t ns3::int64x64_t::GetHigh() const [member function]
cls.add_method('GetHigh',
'int64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): uint64_t ns3::int64x64_t::GetLow() const [member function]
cls.add_method('GetLow',
'uint64_t',
[],
is_const=True)
## int64x64-double.h (module 'core'): static ns3::int64x64_t ns3::int64x64_t::Invert(uint64_t v) [member function]
cls.add_method('Invert',
'ns3::int64x64_t',
[param('uint64_t', 'v')],
is_static=True)
## int64x64-double.h (module 'core'): void ns3::int64x64_t::MulByInvert(ns3::int64x64_t const & o) [member function]
cls.add_method('MulByInvert',
'void',
[param('ns3::int64x64_t const &', 'o')])
## int64x64-double.h (module 'core'): ns3::int64x64_t::implementation [variable]
cls.add_static_attribute('implementation', 'ns3::int64x64_t::impl_type const', is_const=True)
return
def register_Ns3Object_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::Object() [constructor]
cls.add_constructor([])
## object.h (module 'core'): void ns3::Object::AggregateObject(ns3::Ptr<ns3::Object> other) [member function]
cls.add_method('AggregateObject',
'void',
[param('ns3::Ptr< ns3::Object >', 'other')])
## object.h (module 'core'): void ns3::Object::Dispose() [member function]
cls.add_method('Dispose',
'void',
[])
## object.h (module 'core'): ns3::Object::AggregateIterator ns3::Object::GetAggregateIterator() const [member function]
cls.add_method('GetAggregateIterator',
'ns3::Object::AggregateIterator',
[],
is_const=True)
## object.h (module 'core'): ns3::TypeId ns3::Object::GetInstanceTypeId() const [member function]
cls.add_method('GetInstanceTypeId',
'ns3::TypeId',
[],
is_const=True, is_virtual=True)
## object.h (module 'core'): static ns3::TypeId ns3::Object::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## object.h (module 'core'): void ns3::Object::Initialize() [member function]
cls.add_method('Initialize',
'void',
[])
## object.h (module 'core'): ns3::Object::Object(ns3::Object const & o) [copy constructor]
cls.add_constructor([param('ns3::Object const &', 'o')],
visibility='protected')
## object.h (module 'core'): void ns3::Object::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::DoInitialize() [member function]
cls.add_method('DoInitialize',
'void',
[],
visibility='protected', is_virtual=True)
## object.h (module 'core'): void ns3::Object::NotifyNewAggregate() [member function]
cls.add_method('NotifyNewAggregate',
'void',
[],
visibility='protected', is_virtual=True)
return
def register_Ns3ObjectAggregateIterator_methods(root_module, cls):
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator(ns3::Object::AggregateIterator const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Object::AggregateIterator const &', 'arg0')])
## object.h (module 'core'): ns3::Object::AggregateIterator::AggregateIterator() [constructor]
cls.add_constructor([])
## object.h (module 'core'): bool ns3::Object::AggregateIterator::HasNext() const [member function]
cls.add_method('HasNext',
'bool',
[],
is_const=True)
## object.h (module 'core'): ns3::Ptr<ns3::Object const> ns3::Object::AggregateIterator::Next() [member function]
cls.add_method('Next',
'ns3::Ptr< ns3::Object const >',
[])
return
def register_Ns3PropagationDelayModel_methods(root_module, cls):
## propagation-delay-model.h (module 'propagation'): ns3::PropagationDelayModel::PropagationDelayModel() [constructor]
cls.add_constructor([])
## propagation-delay-model.h (module 'propagation'): ns3::PropagationDelayModel::PropagationDelayModel(ns3::PropagationDelayModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::PropagationDelayModel const &', 'arg0')])
## propagation-delay-model.h (module 'propagation'): int64_t ns3::PropagationDelayModel::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## propagation-delay-model.h (module 'propagation'): ns3::Time ns3::PropagationDelayModel::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## propagation-delay-model.h (module 'propagation'): static ns3::TypeId ns3::PropagationDelayModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-delay-model.h (module 'propagation'): int64_t ns3::PropagationDelayModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3PropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::PropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::PropagationLossModel::PropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::PropagationLossModel::SetNext(ns3::Ptr<ns3::PropagationLossModel> next) [member function]
cls.add_method('SetNext',
'void',
[param('ns3::Ptr< ns3::PropagationLossModel >', 'next')])
## propagation-loss-model.h (module 'propagation'): ns3::Ptr<ns3::PropagationLossModel> ns3::PropagationLossModel::GetNext() [member function]
cls.add_method('GetNext',
'ns3::Ptr< ns3::PropagationLossModel >',
[])
## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::CalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('CalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## propagation-loss-model.h (module 'propagation'): double ns3::PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::PropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3RandomPropagationDelayModel_methods(root_module, cls):
## propagation-delay-model.h (module 'propagation'): ns3::RandomPropagationDelayModel::RandomPropagationDelayModel(ns3::RandomPropagationDelayModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::RandomPropagationDelayModel const &', 'arg0')])
## propagation-delay-model.h (module 'propagation'): ns3::RandomPropagationDelayModel::RandomPropagationDelayModel() [constructor]
cls.add_constructor([])
## propagation-delay-model.h (module 'propagation'): ns3::Time ns3::RandomPropagationDelayModel::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True)
## propagation-delay-model.h (module 'propagation'): static ns3::TypeId ns3::RandomPropagationDelayModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-delay-model.h (module 'propagation'): int64_t ns3::RandomPropagationDelayModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3RandomPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RandomPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::RandomPropagationLossModel::RandomPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::RandomPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::RandomPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3RandomVariableStream_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::RandomVariableStream::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::RandomVariableStream::RandomVariableStream() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetStream(int64_t stream) [member function]
cls.add_method('SetStream',
'void',
[param('int64_t', 'stream')])
## random-variable-stream.h (module 'core'): int64_t ns3::RandomVariableStream::GetStream() const [member function]
cls.add_method('GetStream',
'int64_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): void ns3::RandomVariableStream::SetAntithetic(bool isAntithetic) [member function]
cls.add_method('SetAntithetic',
'void',
[param('bool', 'isAntithetic')])
## random-variable-stream.h (module 'core'): bool ns3::RandomVariableStream::IsAntithetic() const [member function]
cls.add_method('IsAntithetic',
'bool',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::RandomVariableStream::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::RandomVariableStream::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_pure_virtual=True, is_virtual=True)
## random-variable-stream.h (module 'core'): ns3::RngStream * ns3::RandomVariableStream::Peek() const [member function]
cls.add_method('Peek',
'ns3::RngStream *',
[],
is_const=True, visibility='protected')
return
def register_Ns3RangePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::RangePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::RangePropagationLossModel::RangePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::RangePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::RangePropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3SequentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::SequentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::SequentialRandomVariable::SequentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): ns3::Ptr<ns3::RandomVariableStream> ns3::SequentialRandomVariable::GetIncrement() const [member function]
cls.add_method('GetIncrement',
'ns3::Ptr< ns3::RandomVariableStream >',
[],
is_const=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetConsecutive() const [member function]
cls.add_method('GetConsecutive',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::SequentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::SequentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter< ns3::AttributeAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeAccessor, ns3::empty, ns3::DefaultDeleter<ns3::AttributeAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeChecker_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeChecker__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter< ns3::AttributeChecker > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeChecker, ns3::empty, ns3::DefaultDeleter<ns3::AttributeChecker> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3AttributeValue_Ns3Empty_Ns3DefaultDeleter__lt__ns3AttributeValue__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::SimpleRefCount(ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter< ns3::AttributeValue > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::AttributeValue, ns3::empty, ns3::DefaultDeleter<ns3::AttributeValue> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3CallbackImplBase_Ns3Empty_Ns3DefaultDeleter__lt__ns3CallbackImplBase__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::SimpleRefCount(ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter< ns3::CallbackImplBase > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::CallbackImplBase, ns3::empty, ns3::DefaultDeleter<ns3::CallbackImplBase> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3HashImplementation_Ns3Empty_Ns3DefaultDeleter__lt__ns3HashImplementation__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::SimpleRefCount(ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter< ns3::Hash::Implementation > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::Hash::Implementation, ns3::empty, ns3::DefaultDeleter<ns3::Hash::Implementation> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3SimpleRefCount__Ns3TraceSourceAccessor_Ns3Empty_Ns3DefaultDeleter__lt__ns3TraceSourceAccessor__gt___methods(root_module, cls):
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount() [constructor]
cls.add_constructor([])
## simple-ref-count.h (module 'core'): ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::SimpleRefCount(ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> > const & o) [copy constructor]
cls.add_constructor([param('ns3::SimpleRefCount< ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter< ns3::TraceSourceAccessor > > const &', 'o')])
## simple-ref-count.h (module 'core'): static void ns3::SimpleRefCount<ns3::TraceSourceAccessor, ns3::empty, ns3::DefaultDeleter<ns3::TraceSourceAccessor> >::Cleanup() [member function]
cls.add_method('Cleanup',
'void',
[],
is_static=True)
return
def register_Ns3ThreeLogDistancePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ThreeLogDistancePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::ThreeLogDistancePropagationLossModel::ThreeLogDistancePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::ThreeLogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::ThreeLogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3Time_methods(root_module, cls):
cls.add_binary_numeric_operator('*', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_numeric_operator('+', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('-', root_module['ns3::Time'], root_module['ns3::Time'], param('ns3::Time const &', u'right'))
cls.add_binary_numeric_operator('/', root_module['ns3::Time'], root_module['ns3::Time'], param('int64_t const &', u'right'))
cls.add_binary_comparison_operator('<')
cls.add_binary_comparison_operator('>')
cls.add_binary_comparison_operator('!=')
cls.add_inplace_numeric_operator('+=', param('ns3::Time const &', u'right'))
cls.add_inplace_numeric_operator('-=', param('ns3::Time const &', u'right'))
cls.add_output_stream_operator()
cls.add_binary_comparison_operator('<=')
cls.add_binary_comparison_operator('==')
cls.add_binary_comparison_operator('>=')
## nstime.h (module 'core'): ns3::Time::Time() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::Time::Time(ns3::Time const & o) [copy constructor]
cls.add_constructor([param('ns3::Time const &', 'o')])
## nstime.h (module 'core'): ns3::Time::Time(double v) [constructor]
cls.add_constructor([param('double', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(int v) [constructor]
cls.add_constructor([param('int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long int v) [constructor]
cls.add_constructor([param('long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long int v) [constructor]
cls.add_constructor([param('long long int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(unsigned int v) [constructor]
cls.add_constructor([param('unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long unsigned int v) [constructor]
cls.add_constructor([param('long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(long long unsigned int v) [constructor]
cls.add_constructor([param('long long unsigned int', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(ns3::int64x64_t const & v) [constructor]
cls.add_constructor([param('ns3::int64x64_t const &', 'v')])
## nstime.h (module 'core'): ns3::Time::Time(std::string const & s) [constructor]
cls.add_constructor([param('std::string const &', 's')])
## nstime.h (module 'core'): ns3::TimeWithUnit ns3::Time::As(ns3::Time::Unit const unit) const [member function]
cls.add_method('As',
'ns3::TimeWithUnit',
[param('ns3::Time::Unit const', 'unit')],
is_const=True)
## nstime.h (module 'core'): int ns3::Time::Compare(ns3::Time const & o) const [member function]
cls.add_method('Compare',
'int',
[param('ns3::Time const &', 'o')],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::From(ns3::int64x64_t const & value, ns3::Time::Unit unit) [member function]
cls.add_method('From',
'ns3::Time',
[param('ns3::int64x64_t const &', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromDouble(double value, ns3::Time::Unit unit) [member function]
cls.add_method('FromDouble',
'ns3::Time',
[param('double', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::FromInteger(uint64_t value, ns3::Time::Unit unit) [member function]
cls.add_method('FromInteger',
'ns3::Time',
[param('uint64_t', 'value'), param('ns3::Time::Unit', 'unit')],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetDays() const [member function]
cls.add_method('GetDays',
'double',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetDouble() const [member function]
cls.add_method('GetDouble',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetFemtoSeconds() const [member function]
cls.add_method('GetFemtoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetHours() const [member function]
cls.add_method('GetHours',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetInteger() const [member function]
cls.add_method('GetInteger',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMicroSeconds() const [member function]
cls.add_method('GetMicroSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetMilliSeconds() const [member function]
cls.add_method('GetMilliSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetMinutes() const [member function]
cls.add_method('GetMinutes',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetNanoSeconds() const [member function]
cls.add_method('GetNanoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetPicoSeconds() const [member function]
cls.add_method('GetPicoSeconds',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time::Unit ns3::Time::GetResolution() [member function]
cls.add_method('GetResolution',
'ns3::Time::Unit',
[],
is_static=True)
## nstime.h (module 'core'): double ns3::Time::GetSeconds() const [member function]
cls.add_method('GetSeconds',
'double',
[],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::GetTimeStep() const [member function]
cls.add_method('GetTimeStep',
'int64_t',
[],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::GetYears() const [member function]
cls.add_method('GetYears',
'double',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsNegative() const [member function]
cls.add_method('IsNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsPositive() const [member function]
cls.add_method('IsPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyNegative() const [member function]
cls.add_method('IsStrictlyNegative',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsStrictlyPositive() const [member function]
cls.add_method('IsStrictlyPositive',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): bool ns3::Time::IsZero() const [member function]
cls.add_method('IsZero',
'bool',
[],
is_const=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Max() [member function]
cls.add_method('Max',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static ns3::Time ns3::Time::Min() [member function]
cls.add_method('Min',
'ns3::Time',
[],
is_static=True)
## nstime.h (module 'core'): static void ns3::Time::SetResolution(ns3::Time::Unit resolution) [member function]
cls.add_method('SetResolution',
'void',
[param('ns3::Time::Unit', 'resolution')],
is_static=True)
## nstime.h (module 'core'): static bool ns3::Time::StaticInit() [member function]
cls.add_method('StaticInit',
'bool',
[],
is_static=True)
## nstime.h (module 'core'): ns3::int64x64_t ns3::Time::To(ns3::Time::Unit unit) const [member function]
cls.add_method('To',
'ns3::int64x64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): double ns3::Time::ToDouble(ns3::Time::Unit unit) const [member function]
cls.add_method('ToDouble',
'double',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
## nstime.h (module 'core'): int64_t ns3::Time::ToInteger(ns3::Time::Unit unit) const [member function]
cls.add_method('ToInteger',
'int64_t',
[param('ns3::Time::Unit', 'unit')],
is_const=True)
return
def register_Ns3TraceSourceAccessor_methods(root_module, cls):
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor(ns3::TraceSourceAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TraceSourceAccessor const &', 'arg0')])
## trace-source-accessor.h (module 'core'): ns3::TraceSourceAccessor::TraceSourceAccessor() [constructor]
cls.add_constructor([])
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Connect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Connect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::ConnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('ConnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::Disconnect(ns3::ObjectBase * obj, std::string context, ns3::CallbackBase const & cb) const [member function]
cls.add_method('Disconnect',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('std::string', 'context'), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## trace-source-accessor.h (module 'core'): bool ns3::TraceSourceAccessor::DisconnectWithoutContext(ns3::ObjectBase * obj, ns3::CallbackBase const & cb) const [member function]
cls.add_method('DisconnectWithoutContext',
'bool',
[param('ns3::ObjectBase *', 'obj', transfer_ownership=False), param('ns3::CallbackBase const &', 'cb')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3TriangularRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::TriangularRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::TriangularRandomVariable::TriangularRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue(double mean, double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger(uint32_t mean, uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::TriangularRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::TriangularRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3TwoRayGroundPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::TwoRayGroundPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::TwoRayGroundPropagationLossModel::TwoRayGroundPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetFrequency(double frequency) [member function]
cls.add_method('SetFrequency',
'void',
[param('double', 'frequency')])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetSystemLoss(double systemLoss) [member function]
cls.add_method('SetSystemLoss',
'void',
[param('double', 'systemLoss')])
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetMinDistance(double minDistance) [member function]
cls.add_method('SetMinDistance',
'void',
[param('double', 'minDistance')])
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetMinDistance() const [member function]
cls.add_method('GetMinDistance',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetFrequency() const [member function]
cls.add_method('GetFrequency',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::GetSystemLoss() const [member function]
cls.add_method('GetSystemLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): void ns3::TwoRayGroundPropagationLossModel::SetHeightAboveZ(double heightAboveZ) [member function]
cls.add_method('SetHeightAboveZ',
'void',
[param('double', 'heightAboveZ')])
## propagation-loss-model.h (module 'propagation'): double ns3::TwoRayGroundPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::TwoRayGroundPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3UniformRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::UniformRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::UniformRandomVariable::UniformRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMin() const [member function]
cls.add_method('GetMin',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetMax() const [member function]
cls.add_method('GetMax',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue(double min, double max) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'min'), param('double', 'max')])
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger(uint32_t min, uint32_t max) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'min'), param('uint32_t', 'max')])
## random-variable-stream.h (module 'core'): double ns3::UniformRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::UniformRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3WeibullRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::WeibullRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::WeibullRandomVariable::WeibullRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetScale() const [member function]
cls.add_method('GetScale',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue(double scale, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'scale'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger(uint32_t scale, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'scale'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::WeibullRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::WeibullRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZetaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZetaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZetaRandomVariable::ZetaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue(double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger(uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZetaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZetaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ZipfRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ZipfRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ZipfRandomVariable::ZipfRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetN() const [member function]
cls.add_method('GetN',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue(uint32_t n, double alpha) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'n'), param('double', 'alpha')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger(uint32_t n, uint32_t alpha) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'n'), param('uint32_t', 'alpha')])
## random-variable-stream.h (module 'core'): double ns3::ZipfRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ZipfRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3AttributeAccessor_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor(ns3::AttributeAccessor const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeAccessor const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeAccessor::AttributeAccessor() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Get(ns3::ObjectBase const * object, ns3::AttributeValue & attribute) const [member function]
cls.add_method('Get',
'bool',
[param('ns3::ObjectBase const *', 'object'), param('ns3::AttributeValue &', 'attribute')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasGetter() const [member function]
cls.add_method('HasGetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::HasSetter() const [member function]
cls.add_method('HasSetter',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeAccessor::Set(ns3::ObjectBase * object, ns3::AttributeValue const & value) const [member function]
cls.add_method('Set',
'bool',
[param('ns3::ObjectBase *', 'object', transfer_ownership=False), param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeChecker_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker(ns3::AttributeChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeChecker const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeChecker::AttributeChecker() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): bool ns3::AttributeChecker::Check(ns3::AttributeValue const & value) const [member function]
cls.add_method('Check',
'bool',
[param('ns3::AttributeValue const &', 'value')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::Copy(ns3::AttributeValue const & source, ns3::AttributeValue & destination) const [member function]
cls.add_method('Copy',
'bool',
[param('ns3::AttributeValue const &', 'source'), param('ns3::AttributeValue &', 'destination')],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::Create() const [member function]
cls.add_method('Create',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeChecker::CreateValidValue(ns3::AttributeValue const & value) const [member function]
cls.add_method('CreateValidValue',
'ns3::Ptr< ns3::AttributeValue >',
[param('ns3::AttributeValue const &', 'value')],
is_const=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetUnderlyingTypeInformation() const [member function]
cls.add_method('GetUnderlyingTypeInformation',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeChecker::GetValueTypeName() const [member function]
cls.add_method('GetValueTypeName',
'std::string',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeChecker::HasUnderlyingTypeInformation() const [member function]
cls.add_method('HasUnderlyingTypeInformation',
'bool',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3AttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue(ns3::AttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::AttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::AttributeValue::AttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::AttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_pure_virtual=True, is_const=True, is_virtual=True)
## attribute.h (module 'core'): bool ns3::AttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_virtual=True)
## attribute.h (module 'core'): std::string ns3::AttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackChecker_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackChecker::CallbackChecker(ns3::CallbackChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackChecker const &', 'arg0')])
return
def register_Ns3CallbackImplBase_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackImplBase::CallbackImplBase(ns3::CallbackImplBase const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackImplBase const &', 'arg0')])
## callback.h (module 'core'): bool ns3::CallbackImplBase::IsEqual(ns3::Ptr<ns3::CallbackImplBase const> other) const [member function]
cls.add_method('IsEqual',
'bool',
[param('ns3::Ptr< ns3::CallbackImplBase const >', 'other')],
is_pure_virtual=True, is_const=True, is_virtual=True)
return
def register_Ns3CallbackValue_methods(root_module, cls):
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::CallbackValue const &', 'arg0')])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue() [constructor]
cls.add_constructor([])
## callback.h (module 'core'): ns3::CallbackValue::CallbackValue(ns3::CallbackBase const & base) [constructor]
cls.add_constructor([param('ns3::CallbackBase const &', 'base')])
## callback.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::CallbackValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## callback.h (module 'core'): bool ns3::CallbackValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## callback.h (module 'core'): std::string ns3::CallbackValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## callback.h (module 'core'): void ns3::CallbackValue::Set(ns3::CallbackBase base) [member function]
cls.add_method('Set',
'void',
[param('ns3::CallbackBase', 'base')])
return
def register_Ns3ConstantRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ConstantRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ConstantRandomVariable::ConstantRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetConstant() const [member function]
cls.add_method('GetConstant',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue(double constant) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'constant')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger(uint32_t constant) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'constant')])
## random-variable-stream.h (module 'core'): double ns3::ConstantRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ConstantRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ConstantSpeedPropagationDelayModel_methods(root_module, cls):
## propagation-delay-model.h (module 'propagation'): ns3::ConstantSpeedPropagationDelayModel::ConstantSpeedPropagationDelayModel(ns3::ConstantSpeedPropagationDelayModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::ConstantSpeedPropagationDelayModel const &', 'arg0')])
## propagation-delay-model.h (module 'propagation'): ns3::ConstantSpeedPropagationDelayModel::ConstantSpeedPropagationDelayModel() [constructor]
cls.add_constructor([])
## propagation-delay-model.h (module 'propagation'): ns3::Time ns3::ConstantSpeedPropagationDelayModel::GetDelay(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetDelay',
'ns3::Time',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, is_virtual=True)
## propagation-delay-model.h (module 'propagation'): double ns3::ConstantSpeedPropagationDelayModel::GetSpeed() const [member function]
cls.add_method('GetSpeed',
'double',
[],
is_const=True)
## propagation-delay-model.h (module 'propagation'): static ns3::TypeId ns3::ConstantSpeedPropagationDelayModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-delay-model.h (module 'propagation'): void ns3::ConstantSpeedPropagationDelayModel::SetSpeed(double speed) [member function]
cls.add_method('SetSpeed',
'void',
[param('double', 'speed')])
## propagation-delay-model.h (module 'propagation'): int64_t ns3::ConstantSpeedPropagationDelayModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3Cost231PropagationLossModel_methods(root_module, cls):
## cost231-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::Cost231PropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## cost231-propagation-loss-model.h (module 'propagation'): ns3::Cost231PropagationLossModel::Cost231PropagationLossModel() [constructor]
cls.add_constructor([])
## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetBSAntennaHeight(double height) [member function]
cls.add_method('SetBSAntennaHeight',
'void',
[param('double', 'height')])
## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetSSAntennaHeight(double height) [member function]
cls.add_method('SetSSAntennaHeight',
'void',
[param('double', 'height')])
## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetEnvironment(ns3::Cost231PropagationLossModel::Environment env) [member function]
cls.add_method('SetEnvironment',
'void',
[param('ns3::Cost231PropagationLossModel::Environment', 'env')])
## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetLambda(double lambda) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'lambda')])
## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetMinDistance(double minDistance) [member function]
cls.add_method('SetMinDistance',
'void',
[param('double', 'minDistance')])
## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetBSAntennaHeight() const [member function]
cls.add_method('GetBSAntennaHeight',
'double',
[],
is_const=True)
## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetSSAntennaHeight() const [member function]
cls.add_method('GetSSAntennaHeight',
'double',
[],
is_const=True)
## cost231-propagation-loss-model.h (module 'propagation'): ns3::Cost231PropagationLossModel::Environment ns3::Cost231PropagationLossModel::GetEnvironment() const [member function]
cls.add_method('GetEnvironment',
'ns3::Cost231PropagationLossModel::Environment',
[],
is_const=True)
## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetMinDistance() const [member function]
cls.add_method('GetMinDistance',
'double',
[],
is_const=True)
## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetLambda(double frequency, double speed) [member function]
cls.add_method('SetLambda',
'void',
[param('double', 'frequency'), param('double', 'speed')])
## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::GetShadowing() [member function]
cls.add_method('GetShadowing',
'double',
[])
## cost231-propagation-loss-model.h (module 'propagation'): void ns3::Cost231PropagationLossModel::SetShadowing(double shadowing) [member function]
cls.add_method('SetShadowing',
'void',
[param('double', 'shadowing')])
## cost231-propagation-loss-model.h (module 'propagation'): double ns3::Cost231PropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## cost231-propagation-loss-model.h (module 'propagation'): int64_t ns3::Cost231PropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3DeterministicRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::DeterministicRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::DeterministicRandomVariable::DeterministicRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::DeterministicRandomVariable::SetValueArray(double * values, uint64_t length) [member function]
cls.add_method('SetValueArray',
'void',
[param('double *', 'values'), param('uint64_t', 'length')])
## random-variable-stream.h (module 'core'): double ns3::DeterministicRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::DeterministicRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3EmpiricalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::EmpiricalRandomVariable::EmpiricalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::CDF(double v, double c) [member function]
cls.add_method('CDF',
'void',
[param('double', 'v'), param('double', 'c')])
## random-variable-stream.h (module 'core'): uint32_t ns3::EmpiricalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::EmpiricalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): double ns3::EmpiricalRandomVariable::Interpolate(double arg0, double arg1, double arg2, double arg3, double arg4) [member function]
cls.add_method('Interpolate',
'double',
[param('double', 'arg0'), param('double', 'arg1'), param('double', 'arg2'), param('double', 'arg3'), param('double', 'arg4')],
visibility='private', is_virtual=True)
## random-variable-stream.h (module 'core'): void ns3::EmpiricalRandomVariable::Validate() [member function]
cls.add_method('Validate',
'void',
[],
visibility='private', is_virtual=True)
return
def register_Ns3EmptyAttributeValue_methods(root_module, cls):
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue(ns3::EmptyAttributeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::EmptyAttributeValue const &', 'arg0')])
## attribute.h (module 'core'): ns3::EmptyAttributeValue::EmptyAttributeValue() [constructor]
cls.add_constructor([])
## attribute.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::EmptyAttributeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, visibility='private', is_virtual=True)
## attribute.h (module 'core'): bool ns3::EmptyAttributeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
visibility='private', is_virtual=True)
## attribute.h (module 'core'): std::string ns3::EmptyAttributeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, visibility='private', is_virtual=True)
return
def register_Ns3ErlangRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ErlangRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ErlangRandomVariable::ErlangRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetK() const [member function]
cls.add_method('GetK',
'uint32_t',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetLambda() const [member function]
cls.add_method('GetLambda',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue(uint32_t k, double lambda) [member function]
cls.add_method('GetValue',
'double',
[param('uint32_t', 'k'), param('double', 'lambda')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger(uint32_t k, uint32_t lambda) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'k'), param('uint32_t', 'lambda')])
## random-variable-stream.h (module 'core'): double ns3::ErlangRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ErlangRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ExponentialRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ExponentialRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ExponentialRandomVariable::ExponentialRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue(double mean, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger(uint32_t mean, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ExponentialRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ExponentialRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3FixedRssLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FixedRssLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::FixedRssLossModel::FixedRssLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::FixedRssLossModel::SetRss(double rss) [member function]
cls.add_method('SetRss',
'void',
[param('double', 'rss')])
## propagation-loss-model.h (module 'propagation'): double ns3::FixedRssLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::FixedRssLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3FriisPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::FriisPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::FriisPropagationLossModel::FriisPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetFrequency(double frequency) [member function]
cls.add_method('SetFrequency',
'void',
[param('double', 'frequency')])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetSystemLoss(double systemLoss) [member function]
cls.add_method('SetSystemLoss',
'void',
[param('double', 'systemLoss')])
## propagation-loss-model.h (module 'propagation'): void ns3::FriisPropagationLossModel::SetMinLoss(double minLoss) [member function]
cls.add_method('SetMinLoss',
'void',
[param('double', 'minLoss')])
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetMinLoss() const [member function]
cls.add_method('GetMinLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetFrequency() const [member function]
cls.add_method('GetFrequency',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::GetSystemLoss() const [member function]
cls.add_method('GetSystemLoss',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): double ns3::FriisPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::FriisPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3GammaRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::GammaRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::GammaRandomVariable::GammaRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetAlpha() const [member function]
cls.add_method('GetAlpha',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetBeta() const [member function]
cls.add_method('GetBeta',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue(double alpha, double beta) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'alpha'), param('double', 'beta')])
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger(uint32_t alpha, uint32_t beta) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'alpha'), param('uint32_t', 'beta')])
## random-variable-stream.h (module 'core'): double ns3::GammaRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::GammaRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3ItuR1411LosPropagationLossModel_methods(root_module, cls):
## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ItuR1411LosPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): ns3::ItuR1411LosPropagationLossModel::ItuR1411LosPropagationLossModel() [constructor]
cls.add_constructor([])
## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): void ns3::ItuR1411LosPropagationLossModel::SetFrequency(double freq) [member function]
cls.add_method('SetFrequency',
'void',
[param('double', 'freq')])
## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): double ns3::ItuR1411LosPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): double ns3::ItuR1411LosPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## itu-r-1411-los-propagation-loss-model.h (module 'propagation'): int64_t ns3::ItuR1411LosPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3ItuR1411NlosOverRooftopPropagationLossModel_methods(root_module, cls):
## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::ItuR1411NlosOverRooftopPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): ns3::ItuR1411NlosOverRooftopPropagationLossModel::ItuR1411NlosOverRooftopPropagationLossModel() [constructor]
cls.add_constructor([])
## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): void ns3::ItuR1411NlosOverRooftopPropagationLossModel::SetFrequency(double freq) [member function]
cls.add_method('SetFrequency',
'void',
[param('double', 'freq')])
## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): double ns3::ItuR1411NlosOverRooftopPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): double ns3::ItuR1411NlosOverRooftopPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## itu-r-1411-nlos-over-rooftop-propagation-loss-model.h (module 'propagation'): int64_t ns3::ItuR1411NlosOverRooftopPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3JakesProcess_methods(root_module, cls):
## jakes-process.h (module 'propagation'): ns3::JakesProcess::JakesProcess(ns3::JakesProcess const & arg0) [copy constructor]
cls.add_constructor([param('ns3::JakesProcess const &', 'arg0')])
## jakes-process.h (module 'propagation'): ns3::JakesProcess::JakesProcess() [constructor]
cls.add_constructor([])
## jakes-process.h (module 'propagation'): void ns3::JakesProcess::DoDispose() [member function]
cls.add_method('DoDispose',
'void',
[],
is_virtual=True)
## jakes-process.h (module 'propagation'): double ns3::JakesProcess::GetChannelGainDb() const [member function]
cls.add_method('GetChannelGainDb',
'double',
[],
is_const=True)
## jakes-process.h (module 'propagation'): std::complex<double> ns3::JakesProcess::GetComplexGain() const [member function]
cls.add_method('GetComplexGain',
'std::complex< double >',
[],
is_const=True)
## jakes-process.h (module 'propagation'): static ns3::TypeId ns3::JakesProcess::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## jakes-process.h (module 'propagation'): void ns3::JakesProcess::SetPropagationLossModel(ns3::Ptr<const ns3::PropagationLossModel> arg0) [member function]
cls.add_method('SetPropagationLossModel',
'void',
[param('ns3::Ptr< ns3::PropagationLossModel const >', 'arg0')])
return
def register_Ns3JakesPropagationLossModel_methods(root_module, cls):
## jakes-propagation-loss-model.h (module 'propagation'): ns3::JakesPropagationLossModel::PI [variable]
cls.add_static_attribute('PI', 'double const', is_const=True)
## jakes-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::JakesPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## jakes-propagation-loss-model.h (module 'propagation'): ns3::JakesPropagationLossModel::JakesPropagationLossModel() [constructor]
cls.add_constructor([])
## jakes-propagation-loss-model.h (module 'propagation'): double ns3::JakesPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## jakes-propagation-loss-model.h (module 'propagation'): int64_t ns3::JakesPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3Kun2600MhzPropagationLossModel_methods(root_module, cls):
## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::Kun2600MhzPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): ns3::Kun2600MhzPropagationLossModel::Kun2600MhzPropagationLossModel() [constructor]
cls.add_constructor([])
## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): double ns3::Kun2600MhzPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): double ns3::Kun2600MhzPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## kun-2600-mhz-propagation-loss-model.h (module 'propagation'): int64_t ns3::Kun2600MhzPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3LogDistancePropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::LogDistancePropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::LogDistancePropagationLossModel::LogDistancePropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetPathLossExponent(double n) [member function]
cls.add_method('SetPathLossExponent',
'void',
[param('double', 'n')])
## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::GetPathLossExponent() const [member function]
cls.add_method('GetPathLossExponent',
'double',
[],
is_const=True)
## propagation-loss-model.h (module 'propagation'): void ns3::LogDistancePropagationLossModel::SetReference(double referenceDistance, double referenceLoss) [member function]
cls.add_method('SetReference',
'void',
[param('double', 'referenceDistance'), param('double', 'referenceLoss')])
## propagation-loss-model.h (module 'propagation'): double ns3::LogDistancePropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::LogDistancePropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3LogNormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::LogNormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::LogNormalRandomVariable::LogNormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetMu() const [member function]
cls.add_method('GetMu',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetSigma() const [member function]
cls.add_method('GetSigma',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue(double mu, double sigma) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mu'), param('double', 'sigma')])
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger(uint32_t mu, uint32_t sigma) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mu'), param('uint32_t', 'sigma')])
## random-variable-stream.h (module 'core'): double ns3::LogNormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::LogNormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3MatrixPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::MatrixPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::MatrixPropagationLossModel::MatrixPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b, double loss, bool symmetric=true) [member function]
cls.add_method('SetLoss',
'void',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b'), param('double', 'loss'), param('bool', 'symmetric', default_value='true')])
## propagation-loss-model.h (module 'propagation'): void ns3::MatrixPropagationLossModel::SetDefaultLoss(double arg0) [member function]
cls.add_method('SetDefaultLoss',
'void',
[param('double', 'arg0')])
## propagation-loss-model.h (module 'propagation'): double ns3::MatrixPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::MatrixPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3MobilityModel_methods(root_module, cls):
## mobility-model.h (module 'mobility'): ns3::MobilityModel::MobilityModel(ns3::MobilityModel const & arg0) [copy constructor]
cls.add_constructor([param('ns3::MobilityModel const &', 'arg0')])
## mobility-model.h (module 'mobility'): ns3::MobilityModel::MobilityModel() [constructor]
cls.add_constructor([])
## mobility-model.h (module 'mobility'): int64_t ns3::MobilityModel::AssignStreams(int64_t stream) [member function]
cls.add_method('AssignStreams',
'int64_t',
[param('int64_t', 'stream')])
## mobility-model.h (module 'mobility'): double ns3::MobilityModel::GetDistanceFrom(ns3::Ptr<ns3::MobilityModel const> position) const [member function]
cls.add_method('GetDistanceFrom',
'double',
[param('ns3::Ptr< ns3::MobilityModel const >', 'position')],
is_const=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::GetPosition() const [member function]
cls.add_method('GetPosition',
'ns3::Vector',
[],
is_const=True)
## mobility-model.h (module 'mobility'): double ns3::MobilityModel::GetRelativeSpeed(ns3::Ptr<ns3::MobilityModel const> other) const [member function]
cls.add_method('GetRelativeSpeed',
'double',
[param('ns3::Ptr< ns3::MobilityModel const >', 'other')],
is_const=True)
## mobility-model.h (module 'mobility'): static ns3::TypeId ns3::MobilityModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::GetVelocity() const [member function]
cls.add_method('GetVelocity',
'ns3::Vector',
[],
is_const=True)
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::SetPosition(ns3::Vector const & position) [member function]
cls.add_method('SetPosition',
'void',
[param('ns3::Vector const &', 'position')])
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::NotifyCourseChange() const [member function]
cls.add_method('NotifyCourseChange',
'void',
[],
is_const=True, visibility='protected')
## mobility-model.h (module 'mobility'): int64_t ns3::MobilityModel::DoAssignStreams(int64_t start) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'start')],
visibility='private', is_virtual=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::DoGetPosition() const [member function]
cls.add_method('DoGetPosition',
'ns3::Vector',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## mobility-model.h (module 'mobility'): ns3::Vector ns3::MobilityModel::DoGetVelocity() const [member function]
cls.add_method('DoGetVelocity',
'ns3::Vector',
[],
is_pure_virtual=True, is_const=True, visibility='private', is_virtual=True)
## mobility-model.h (module 'mobility'): void ns3::MobilityModel::DoSetPosition(ns3::Vector const & position) [member function]
cls.add_method('DoSetPosition',
'void',
[param('ns3::Vector const &', 'position')],
is_pure_virtual=True, visibility='private', is_virtual=True)
return
def register_Ns3NakagamiPropagationLossModel_methods(root_module, cls):
## propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::NakagamiPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## propagation-loss-model.h (module 'propagation'): ns3::NakagamiPropagationLossModel::NakagamiPropagationLossModel() [constructor]
cls.add_constructor([])
## propagation-loss-model.h (module 'propagation'): double ns3::NakagamiPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## propagation-loss-model.h (module 'propagation'): int64_t ns3::NakagamiPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3NormalRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::INFINITE_VALUE [variable]
cls.add_static_attribute('INFINITE_VALUE', 'double const', is_const=True)
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::NormalRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::NormalRandomVariable::NormalRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetVariance() const [member function]
cls.add_method('GetVariance',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue(double mean, double variance, double bound=ns3::NormalRandomVariable::INFINITE_VALUE) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'variance'), param('double', 'bound', default_value='ns3::NormalRandomVariable::INFINITE_VALUE')])
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger(uint32_t mean, uint32_t variance, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'variance'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::NormalRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::NormalRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3OkumuraHataPropagationLossModel_methods(root_module, cls):
## okumura-hata-propagation-loss-model.h (module 'propagation'): static ns3::TypeId ns3::OkumuraHataPropagationLossModel::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## okumura-hata-propagation-loss-model.h (module 'propagation'): ns3::OkumuraHataPropagationLossModel::OkumuraHataPropagationLossModel() [constructor]
cls.add_constructor([])
## okumura-hata-propagation-loss-model.h (module 'propagation'): double ns3::OkumuraHataPropagationLossModel::GetLoss(ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('GetLoss',
'double',
[param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True)
## okumura-hata-propagation-loss-model.h (module 'propagation'): double ns3::OkumuraHataPropagationLossModel::DoCalcRxPower(double txPowerDbm, ns3::Ptr<ns3::MobilityModel> a, ns3::Ptr<ns3::MobilityModel> b) const [member function]
cls.add_method('DoCalcRxPower',
'double',
[param('double', 'txPowerDbm'), param('ns3::Ptr< ns3::MobilityModel >', 'a'), param('ns3::Ptr< ns3::MobilityModel >', 'b')],
is_const=True, visibility='private', is_virtual=True)
## okumura-hata-propagation-loss-model.h (module 'propagation'): int64_t ns3::OkumuraHataPropagationLossModel::DoAssignStreams(int64_t stream) [member function]
cls.add_method('DoAssignStreams',
'int64_t',
[param('int64_t', 'stream')],
visibility='private', is_virtual=True)
return
def register_Ns3ParetoRandomVariable_methods(root_module, cls):
## random-variable-stream.h (module 'core'): static ns3::TypeId ns3::ParetoRandomVariable::GetTypeId() [member function]
cls.add_method('GetTypeId',
'ns3::TypeId',
[],
is_static=True)
## random-variable-stream.h (module 'core'): ns3::ParetoRandomVariable::ParetoRandomVariable() [constructor]
cls.add_constructor([])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetMean() const [member function]
cls.add_method('GetMean',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetShape() const [member function]
cls.add_method('GetShape',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetBound() const [member function]
cls.add_method('GetBound',
'double',
[],
is_const=True)
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue(double mean, double shape, double bound) [member function]
cls.add_method('GetValue',
'double',
[param('double', 'mean'), param('double', 'shape'), param('double', 'bound')])
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger(uint32_t mean, uint32_t shape, uint32_t bound) [member function]
cls.add_method('GetInteger',
'uint32_t',
[param('uint32_t', 'mean'), param('uint32_t', 'shape'), param('uint32_t', 'bound')])
## random-variable-stream.h (module 'core'): double ns3::ParetoRandomVariable::GetValue() [member function]
cls.add_method('GetValue',
'double',
[],
is_virtual=True)
## random-variable-stream.h (module 'core'): uint32_t ns3::ParetoRandomVariable::GetInteger() [member function]
cls.add_method('GetInteger',
'uint32_t',
[],
is_virtual=True)
return
def register_Ns3TimeValue_methods(root_module, cls):
## nstime.h (module 'core'): ns3::TimeValue::TimeValue() [constructor]
cls.add_constructor([])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::TimeValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TimeValue const &', 'arg0')])
## nstime.h (module 'core'): ns3::TimeValue::TimeValue(ns3::Time const & value) [constructor]
cls.add_constructor([param('ns3::Time const &', 'value')])
## nstime.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TimeValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): bool ns3::TimeValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## nstime.h (module 'core'): ns3::Time ns3::TimeValue::Get() const [member function]
cls.add_method('Get',
'ns3::Time',
[],
is_const=True)
## nstime.h (module 'core'): std::string ns3::TimeValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## nstime.h (module 'core'): void ns3::TimeValue::Set(ns3::Time const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Time const &', 'value')])
return
def register_Ns3TypeIdChecker_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdChecker::TypeIdChecker(ns3::TypeIdChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdChecker const &', 'arg0')])
return
def register_Ns3TypeIdValue_methods(root_module, cls):
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue() [constructor]
cls.add_constructor([])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeIdValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::TypeIdValue const &', 'arg0')])
## type-id.h (module 'core'): ns3::TypeIdValue::TypeIdValue(ns3::TypeId const & value) [constructor]
cls.add_constructor([param('ns3::TypeId const &', 'value')])
## type-id.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::TypeIdValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): bool ns3::TypeIdValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## type-id.h (module 'core'): ns3::TypeId ns3::TypeIdValue::Get() const [member function]
cls.add_method('Get',
'ns3::TypeId',
[],
is_const=True)
## type-id.h (module 'core'): std::string ns3::TypeIdValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## type-id.h (module 'core'): void ns3::TypeIdValue::Set(ns3::TypeId const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::TypeId const &', 'value')])
return
def register_Ns3Vector2DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DChecker::Vector2DChecker(ns3::Vector2DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DChecker const &', 'arg0')])
return
def register_Ns3Vector2DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector2DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector2DValue::Vector2DValue(ns3::Vector2D const & value) [constructor]
cls.add_constructor([param('ns3::Vector2D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector2DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector2DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector2D ns3::Vector2DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector2D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector2DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector2DValue::Set(ns3::Vector2D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector2D const &', 'value')])
return
def register_Ns3Vector3DChecker_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DChecker::Vector3DChecker(ns3::Vector3DChecker const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DChecker const &', 'arg0')])
return
def register_Ns3Vector3DValue_methods(root_module, cls):
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue() [constructor]
cls.add_constructor([])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3DValue const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Vector3DValue const &', 'arg0')])
## vector.h (module 'core'): ns3::Vector3DValue::Vector3DValue(ns3::Vector3D const & value) [constructor]
cls.add_constructor([param('ns3::Vector3D const &', 'value')])
## vector.h (module 'core'): ns3::Ptr<ns3::AttributeValue> ns3::Vector3DValue::Copy() const [member function]
cls.add_method('Copy',
'ns3::Ptr< ns3::AttributeValue >',
[],
is_const=True, is_virtual=True)
## vector.h (module 'core'): bool ns3::Vector3DValue::DeserializeFromString(std::string value, ns3::Ptr<ns3::AttributeChecker const> checker) [member function]
cls.add_method('DeserializeFromString',
'bool',
[param('std::string', 'value'), param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_virtual=True)
## vector.h (module 'core'): ns3::Vector3D ns3::Vector3DValue::Get() const [member function]
cls.add_method('Get',
'ns3::Vector3D',
[],
is_const=True)
## vector.h (module 'core'): std::string ns3::Vector3DValue::SerializeToString(ns3::Ptr<ns3::AttributeChecker const> checker) const [member function]
cls.add_method('SerializeToString',
'std::string',
[param('ns3::Ptr< ns3::AttributeChecker const >', 'checker')],
is_const=True, is_virtual=True)
## vector.h (module 'core'): void ns3::Vector3DValue::Set(ns3::Vector3D const & value) [member function]
cls.add_method('Set',
'void',
[param('ns3::Vector3D const &', 'value')])
return
def register_Ns3HashImplementation_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation(ns3::Hash::Implementation const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Implementation const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Implementation::Implementation() [constructor]
cls.add_constructor([])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Implementation::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_pure_virtual=True, is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Implementation::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Implementation::clear() [member function]
cls.add_method('clear',
'void',
[],
is_pure_virtual=True, is_virtual=True)
return
def register_Ns3HashFunctionFnv1a_methods(root_module, cls):
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a(ns3::Hash::Function::Fnv1a const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Fnv1a const &', 'arg0')])
## hash-fnv.h (module 'core'): ns3::Hash::Function::Fnv1a::Fnv1a() [constructor]
cls.add_constructor([])
## hash-fnv.h (module 'core'): uint32_t ns3::Hash::Function::Fnv1a::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): uint64_t ns3::Hash::Function::Fnv1a::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-fnv.h (module 'core'): void ns3::Hash::Function::Fnv1a::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash32_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Function::Hash32 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash32 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash32::Hash32(ns3::Hash::Hash32Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash32Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash32::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash32::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionHash64_methods(root_module, cls):
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Function::Hash64 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Hash64 const &', 'arg0')])
## hash-function.h (module 'core'): ns3::Hash::Function::Hash64::Hash64(ns3::Hash::Hash64Function_ptr hp) [constructor]
cls.add_constructor([param('ns3::Hash::Hash64Function_ptr', 'hp')])
## hash-function.h (module 'core'): uint32_t ns3::Hash::Function::Hash64::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): uint64_t ns3::Hash::Function::Hash64::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-function.h (module 'core'): void ns3::Hash::Function::Hash64::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_Ns3HashFunctionMurmur3_methods(root_module, cls):
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3(ns3::Hash::Function::Murmur3 const & arg0) [copy constructor]
cls.add_constructor([param('ns3::Hash::Function::Murmur3 const &', 'arg0')])
## hash-murmur3.h (module 'core'): ns3::Hash::Function::Murmur3::Murmur3() [constructor]
cls.add_constructor([])
## hash-murmur3.h (module 'core'): uint32_t ns3::Hash::Function::Murmur3::GetHash32(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash32',
'uint32_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): uint64_t ns3::Hash::Function::Murmur3::GetHash64(char const * buffer, size_t const size) [member function]
cls.add_method('GetHash64',
'uint64_t',
[param('char const *', 'buffer'), param('size_t const', 'size')],
is_virtual=True)
## hash-murmur3.h (module 'core'): void ns3::Hash::Function::Murmur3::clear() [member function]
cls.add_method('clear',
'void',
[],
is_virtual=True)
return
def register_functions(root_module):
module = root_module
register_functions_ns3_FatalImpl(module.get_submodule('FatalImpl'), root_module)
register_functions_ns3_Hash(module.get_submodule('Hash'), root_module)
return
def register_functions_ns3_FatalImpl(module, root_module):
return
def register_functions_ns3_Hash(module, root_module):
register_functions_ns3_Hash_Function(module.get_submodule('Function'), root_module)
return
def register_functions_ns3_Hash_Function(module, root_module):
return
def main():
out = FileCodeSink(sys.stdout)
root_module = module_init()
register_types(root_module)
register_methods(root_module)
register_functions(root_module)
root_module.generate(out)
if __name__ == '__main__':
main()
|
odin1314/security_monkey
|
refs/heads/master
|
security_monkey/monitors.py
|
7
|
"""
.. module: security_monkey.monitors
:platform: Unix
:synopsis: Monitors are a grouping of a watcher and it's associated auditor
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <pkelley@netflix.com> @monkeysecurity
"""
from security_monkey.watchers.iam.iam_role import IAMRole
from security_monkey.watchers.iam.iam_group import IAMGroup
from security_monkey.watchers.iam.iam_ssl import IAMSSL
from security_monkey.watchers.iam.iam_user import IAMUser
from security_monkey.watchers.iam.managed_policy import ManagedPolicy
from security_monkey.auditors.iam.iam_user import IAMUserAuditor
from security_monkey.auditors.iam.iam_group import IAMGroupAuditor
from security_monkey.auditors.iam.iam_ssl import IAMSSLAuditor
from security_monkey.auditors.iam.iam_role import IAMRoleAuditor
from security_monkey.auditors.iam.managed_policy import ManagedPolicyAuditor
from security_monkey.watchers.sns import SNS
from security_monkey.auditors.sns import SNSAuditor
from security_monkey.watchers.sqs import SQS
from security_monkey.watchers.keypair import Keypair
from security_monkey.watchers.security_group import SecurityGroup
from security_monkey.auditors.security_group import SecurityGroupAuditor
from security_monkey.watchers.rds_security_group import RDSSecurityGroup
from security_monkey.auditors.rds_security_group import RDSSecurityGroupAuditor
from security_monkey.watchers.s3 import S3
from security_monkey.auditors.s3 import S3Auditor
from security_monkey.watchers.elb import ELB
from security_monkey.auditors.elb import ELBAuditor
from security_monkey.watchers.redshift import Redshift
from security_monkey.auditors.redshift import RedshiftAuditor
from security_monkey.watchers.elastic_ip import ElasticIP
from security_monkey.watchers.ses import SES
from security_monkey.auditors.ses import SESAuditor
from security_monkey.watchers.vpc.vpc import VPC
from security_monkey.watchers.vpc.subnet import Subnet
from security_monkey.watchers.vpc.route_table import RouteTable
class Monitor(object):
"""Collects a watcher with the associated auditor"""
def __init__(self, index, watcher_class, auditor_class=None):
self.index = index
self.watcher_class = watcher_class
self.auditor_class = auditor_class
def has_auditor(self):
return self.auditor_class is not None
__MONITORS = {
SQS.index:
Monitor(SQS.index, SQS, None),
ELB.index:
Monitor(ELB.index, ELB, ELBAuditor),
IAMSSL.index:
Monitor(IAMSSL.index, IAMSSL, IAMSSLAuditor),
RDSSecurityGroup.index:
Monitor(RDSSecurityGroup.index, RDSSecurityGroup, RDSSecurityGroupAuditor),
SecurityGroup.index:
Monitor(SecurityGroup.index, SecurityGroup, SecurityGroupAuditor),
S3.index:
Monitor(S3.index, S3, S3Auditor),
IAMUser.index:
Monitor(IAMUser.index, IAMUser, IAMUserAuditor),
IAMRole.index:
Monitor(IAMRole.index, IAMRole, IAMRoleAuditor),
IAMGroup.index:
Monitor(IAMGroup.index, IAMGroup, IAMGroupAuditor),
Keypair.index:
Monitor(Keypair.index, Keypair, None),
SNS.index:
Monitor(SNS.index, SNS, SNSAuditor),
Redshift.index:
Monitor(Redshift.index, Redshift, RedshiftAuditor),
ElasticIP.index:
Monitor(ElasticIP.index, ElasticIP, None),
SES.index:
Monitor(SES.index, SES, SESAuditor),
VPC.index:
Monitor(VPC.index, VPC, None),
Subnet.index:
Monitor(Subnet.index, Subnet, None),
RouteTable.index:
Monitor(RouteTable.index, RouteTable, None),
ManagedPolicy.index:
Monitor(ManagedPolicy.index, ManagedPolicy, ManagedPolicyAuditor)
}
def get_monitor(monitor_name):
return __MONITORS.get(monitor_name)
def all_monitors():
return __MONITORS.itervalues()
|
ubc/edx-platform
|
refs/heads/release
|
common/lib/xmodule/xmodule/tests/test_combined_open_ended.py
|
73
|
"""
Tests for the various pieces of the CombinedOpenEndedGrading system
OpenEndedChild
OpenEndedModule
"""
import json
import logging
import unittest
from datetime import datetime
from lxml import etree
from lxml.html import fragment_fromstring
from mock import Mock, MagicMock, patch
from pytz import UTC
from webob.multidict import MultiDict
from xmodule.open_ended_grading_classes.openendedchild import OpenEndedChild
from xmodule.open_ended_grading_classes.open_ended_module import OpenEndedModule
from xmodule.open_ended_grading_classes.self_assessment_module import SelfAssessmentModule
from xmodule.open_ended_grading_classes.combined_open_ended_modulev1 import CombinedOpenEndedV1Module
from xmodule.combined_open_ended_module import CombinedOpenEndedModule
from opaque_keys.edx.locations import Location
from xmodule.tests import get_test_system, test_util_open_ended
from xmodule.progress import Progress
from xmodule.validation import StudioValidationMessage
from xmodule.x_module import STUDENT_VIEW
from xmodule.tests.test_util_open_ended import (
DummyModulestore, TEST_STATE_SA_IN,
MOCK_INSTANCE_STATE, TEST_STATE_SA, TEST_STATE_AI, TEST_STATE_AI2, TEST_STATE_AI2_INVALID,
TEST_STATE_SINGLE, TEST_STATE_PE_SINGLE, MockUploadedFile, INSTANCE_INCONSISTENT_STATE,
INSTANCE_INCONSISTENT_STATE2, INSTANCE_INCONSISTENT_STATE3, INSTANCE_INCONSISTENT_STATE4,
INSTANCE_INCONSISTENT_STATE5
)
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
import capa.xqueue_interface as xqueue_interface
log = logging.getLogger(__name__)
ORG = 'edX'
COURSE = 'open_ended' # name of directory with course data
class OpenEndedChildTest(unittest.TestCase):
"""
Test the open ended child class
"""
location = Location("edX", "sa_test", "2012_Fall", "selfassessment", "SampleQuestion")
metadata = json.dumps({'attempts': '10'})
prompt = etree.XML("<prompt>This is a question prompt</prompt>")
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
<option>Second option</option>
</category>
</rubric></rubric>'''
max_score = 1
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': None,
's3_interface': "",
'open_ended_grading_interface': {},
'skip_basic_checks': False,
'control': {
'required_peer_grading': 1,
'peer_grader_count': 1,
'min_to_calibrate': 3,
'max_to_calibrate': 6,
'peer_grade_finished_submissions_when_none_pending': False,
}
}
definition = Mock()
descriptor = Mock()
def setUp(self):
super(OpenEndedChildTest, self).setUp()
self.test_system = get_test_system()
self.test_system.open_ended_grading_interface = None
self.openendedchild = OpenEndedChild(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
def test_latest_answer_empty(self):
answer = self.openendedchild.latest_answer()
self.assertEqual(answer, "")
def test_latest_score_empty(self):
answer = self.openendedchild.latest_score()
self.assertEqual(answer, None)
def test_latest_post_assessment_empty(self):
answer = self.openendedchild.latest_post_assessment(self.test_system)
self.assertEqual(answer, "")
def test_new_history_entry(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
answer = self.openendedchild.latest_answer()
self.assertEqual(answer, new_answer)
new_answer = "Newer Answer"
self.openendedchild.new_history_entry(new_answer)
answer = self.openendedchild.latest_answer()
self.assertEqual(new_answer, answer)
def test_record_latest_score(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
new_score = 3
self.openendedchild.record_latest_score(new_score)
score = self.openendedchild.latest_score()
self.assertEqual(score, 3)
new_score = 4
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(new_score)
score = self.openendedchild.latest_score()
self.assertEqual(score, 4)
def test_record_latest_post_assessment(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
post_assessment = "Post assessment"
self.openendedchild.record_latest_post_assessment(post_assessment)
self.assertEqual(post_assessment,
self.openendedchild.latest_post_assessment(self.test_system))
def test_get_score(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
score = self.openendedchild.get_score()
self.assertEqual(score['score'], 0)
self.assertEqual(score['total'], self.static_data['max_score'])
new_score = 4
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(new_score)
score = self.openendedchild.get_score()
self.assertEqual(score['score'], new_score)
self.assertEqual(score['total'], self.static_data['max_score'])
def test_reset(self):
self.openendedchild.reset(self.test_system)
state = json.loads(self.openendedchild.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedChild.INITIAL)
def test_is_last_response_correct(self):
new_answer = "New Answer"
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(self.static_data['max_score'])
self.assertEqual(self.openendedchild.is_last_response_correct(),
'correct')
self.openendedchild.new_history_entry(new_answer)
self.openendedchild.record_latest_score(0)
self.assertEqual(self.openendedchild.is_last_response_correct(),
'incorrect')
class OpenEndedModuleTest(unittest.TestCase):
"""
Test the open ended module class
"""
location = Location("edX", "sa_test", "2012_Fall", "selfassessment", "SampleQuestion")
metadata = json.dumps({'attempts': '10'})
prompt = etree.XML("<prompt>This is a question prompt</prompt>")
rubric = etree.XML('''<rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
</category>
</rubric>''')
max_score = 4
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': None,
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'control': {
'required_peer_grading': 1,
'peer_grader_count': 1,
'min_to_calibrate': 3,
'max_to_calibrate': 6,
'peer_grade_finished_submissions_when_none_pending': False,
}
}
oeparam = etree.XML('''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
''')
definition = {'oeparam': oeparam}
descriptor = Mock()
feedback = {
"success": True,
"feedback": "Grader Feedback"
}
single_score_msg = {
'correct': True,
'score': 4,
'msg': 'Grader Message',
'feedback': json.dumps(feedback),
'grader_type': 'IN',
'grader_id': '1',
'submission_id': '1',
'success': True,
'rubric_scores': [0],
'rubric_scores_complete': True,
'rubric_xml': etree.tostring(rubric)
}
multiple_score_msg = {
'correct': True,
'score': [0, 1],
'msg': 'Grader Message',
'feedback': [json.dumps(feedback), json.dumps(feedback)],
'grader_type': 'PE',
'grader_id': ['1', '2'],
'submission_id': '1',
'success': True,
'rubric_scores': [[0], [0]],
'rubric_scores_complete': [True, True],
'rubric_xml': [etree.tostring(rubric), etree.tostring(rubric)]
}
def setUp(self):
super(OpenEndedModuleTest, self).setUp()
self.test_system = get_test_system()
self.test_system.open_ended_grading_interface = None
self.test_system.location = self.location
self.mock_xqueue = MagicMock()
self.mock_xqueue.send_to_queue.return_value = (0, "Queued")
def constructed_callback(dispatch="score_update"):
return dispatch
self.test_system.xqueue = {'interface': self.mock_xqueue, 'construct_callback': constructed_callback,
'default_queuename': 'testqueue',
'waittime': 1}
self.openendedmodule = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
def test_message_post(self):
"""Test message_post() sends feedback to xqueue."""
submission_time = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
feedback_post = {
'feedback': 'feedback text',
'submission_id': '1',
'grader_id': '1',
'score': 3
}
result = self.openendedmodule.message_post(feedback_post, self.test_system)
self.assertTrue(result['success'])
# make sure it's actually sending something we want to the queue
mock_send_to_queue_body_arg = json.loads(self.mock_xqueue.send_to_queue.call_args[1]['body'])
self.assertEqual(mock_send_to_queue_body_arg['feedback'], feedback_post['feedback'])
self.assertEqual(mock_send_to_queue_body_arg['submission_id'], int(feedback_post['submission_id']))
self.assertEqual(mock_send_to_queue_body_arg['grader_id'], int(feedback_post['grader_id']))
self.assertEqual(mock_send_to_queue_body_arg['score'], feedback_post['score'])
body_arg_student_info = json.loads(mock_send_to_queue_body_arg['student_info'])
self.assertEqual(body_arg_student_info['anonymous_student_id'], self.test_system.anonymous_student_id)
self.assertGreaterEqual(body_arg_student_info['submission_time'], submission_time)
state = json.loads(self.openendedmodule.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedModule.DONE)
def test_message_post_fail(self):
"""Test message_post() if unable to send feedback to xqueue."""
self.mock_xqueue.send_to_queue.return_value = (1, "Not Queued")
feedback_post = {
'feedback': 'feedback text',
'submission_id': '1',
'grader_id': '1',
'score': 3
}
result = self.openendedmodule.message_post(feedback_post, self.test_system)
self.assertFalse(result['success'])
state = json.loads(self.openendedmodule.get_instance_state())
self.assertNotEqual(state['child_state'], OpenEndedModule.DONE)
def test_send_to_grader(self):
student_response = "This is a student submission"
submission_time = datetime.strftime(datetime.now(UTC), xqueue_interface.dateformat)
result, __ = self.openendedmodule.send_to_grader(student_response, self.test_system)
self.assertTrue(result)
mock_send_to_queue_body_arg = json.loads(self.mock_xqueue.send_to_queue.call_args[1]['body'])
self.assertEqual(mock_send_to_queue_body_arg['student_response'], student_response)
self.assertEqual(mock_send_to_queue_body_arg['max_score'], self.max_score)
body_arg_student_info = json.loads(mock_send_to_queue_body_arg['student_info'])
self.assertEqual(body_arg_student_info['anonymous_student_id'], self.test_system.anonymous_student_id)
self.assertGreaterEqual(body_arg_student_info['submission_time'], submission_time)
def test_send_to_grader_fail(self):
"""Test send_to_grader() if unable to send submission to xqueue."""
student_response = "This is a student submission"
self.mock_xqueue.send_to_queue.return_value = (1, "Not Queued")
result, __ = self.openendedmodule.send_to_grader(student_response, self.test_system)
self.assertFalse(result)
def test_save_answer_fail(self):
"""Test save_answer() if unable to send submission to grader."""
submission = "This is a student submission"
self.openendedmodule.send_to_grader = Mock(return_value=(False, "Failed"))
response = self.openendedmodule.save_answer(
{"student_answer": submission},
get_test_system()
)
self.assertFalse(response['success'])
self.assertNotEqual(self.openendedmodule.latest_answer(), submission)
self.assertEqual(self.openendedmodule.stored_answer, submission)
state = json.loads(self.openendedmodule.get_instance_state())
self.assertEqual(state['child_state'], OpenEndedModule.INITIAL)
self.assertEqual(state['stored_answer'], submission)
def update_score_single(self):
self.openendedmodule.new_history_entry("New Entry")
get = {'queuekey': "abcd",
'xqueue_body': json.dumps(self.single_score_msg)}
self.openendedmodule.update_score(get, self.test_system)
def update_score_multiple(self):
self.openendedmodule.new_history_entry("New Entry")
get = {'queuekey': "abcd",
'xqueue_body': json.dumps(self.multiple_score_msg)}
self.openendedmodule.update_score(get, self.test_system)
def test_latest_post_assessment(self):
self.update_score_single()
assessment = self.openendedmodule.latest_post_assessment(self.test_system)
self.assertFalse(assessment == '')
# check for errors
self.assertFalse('errors' in assessment)
def test_update_score_single(self):
self.update_score_single()
score = self.openendedmodule.latest_score()
self.assertEqual(score, 4)
def test_update_score_multiple(self):
"""
Tests that a score of [0, 1] gets aggregated to 1. A change in behavior added by @jbau
"""
self.update_score_multiple()
score = self.openendedmodule.latest_score()
self.assertEquals(score, 1)
@patch('xmodule.open_ended_grading_classes.open_ended_module.log.error')
def test_update_score_nohistory(self, error_logger):
"""
Tests error handling when there is no child_history
"""
# NOTE that we are not creating any history items
get = {'queuekey': "abcd",
'xqueue_body': json.dumps(self.multiple_score_msg)}
error_msg = ("Trying to update score without existing studentmodule child_history:\n"
" location: i4x://edX/sa_test/selfassessment/SampleQuestion\n"
" score: 1\n"
" grader_ids: [u'1', u'2']\n"
" submission_ids: [u'1', u'1']")
self.openendedmodule.update_score(get, self.test_system)
(msg,), _ = error_logger.call_args
self.assertTrue(error_logger.called)
self.assertEqual(msg, error_msg)
def test_open_ended_display(self):
"""
Test storing answer with the open ended module.
"""
# Create a module with no state yet. Important that this start off as a blank slate.
test_module = OpenEndedModule(self.test_system, self.location,
self.definition, self.descriptor, self.static_data, self.metadata)
saved_response = "Saved response."
submitted_response = "Submitted response."
# Initially, there will be no stored answer.
self.assertEqual(test_module.stored_answer, None)
# And the initial answer to display will be an empty string.
self.assertEqual(test_module.get_display_answer(), "")
# Now, store an answer in the module.
test_module.handle_ajax("store_answer", {'student_answer': saved_response}, get_test_system())
# The stored answer should now equal our response.
self.assertEqual(test_module.stored_answer, saved_response)
self.assertEqual(test_module.get_display_answer(), saved_response)
# Mock out the send_to_grader function so it doesn't try to connect to the xqueue.
test_module.send_to_grader = Mock(return_value=(True, "Success"))
# Submit a student response to the question.
test_module.handle_ajax(
"save_answer",
{"student_answer": submitted_response},
get_test_system()
)
# Submitting an answer should clear the stored answer.
self.assertEqual(test_module.stored_answer, None)
# Confirm that the answer is stored properly.
self.assertEqual(test_module.latest_answer(), submitted_response)
def test_parse_score_msg(self):
"""
Test _parse_score_msg with empty dict.
"""
assessment = self.openendedmodule._parse_score_msg("{}", self.test_system)
self.assertEqual(assessment.get("valid"), False)
class CombinedOpenEndedModuleTest(unittest.TestCase):
"""
Unit tests for the combined open ended xmodule
"""
location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
definition_template = """
<combinedopenended attempts="10000">
{rubric}
{prompt}
<task>
{task1}
</task>
<task>
{task2}
</task>
</combinedopenended>
"""
prompt = "<prompt>This is a question prompt</prompt>"
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
<option>Second option</option>
</category>
</rubric></rubric>'''
max_score = 1
metadata = {'attempts': '10', 'max_score': max_score}
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': "",
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'graded': True,
}
oeparam = etree.XML('''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
''')
task_xml1 = '''
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
'''
task_xml2 = '''
<openended min_score_to_attempt="1" max_score_to_attempt="1">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>'''
definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2)
descriptor = Mock(data=full_definition)
test_system = get_test_system()
test_system.open_ended_grading_interface = None
usage_key = test_system.course_id.make_usage_key('combinedopenended', 'test_loc')
# ScopeIds has 4 fields: user_id, block_type, def_id, usage_id
scope_ids = ScopeIds(1, 'combinedopenended', usage_key, usage_key)
combinedoe_container = CombinedOpenEndedModule(
descriptor=descriptor,
runtime=test_system,
field_data=DictFieldData({
'data': full_definition,
'weight': '1',
}),
scope_ids=scope_ids,
)
def setUp(self):
super(CombinedOpenEndedModuleTest, self).setUp()
self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=self.static_data)
def test_get_tag_name(self):
"""
Test to see if the xml tag name is correct
"""
name = self.combinedoe.get_tag_name("<t>Tag</t>")
self.assertEqual(name, "t")
def test_get_last_response(self):
"""
See if we can parse the last response
"""
response_dict = self.combinedoe.get_last_response(0)
self.assertEqual(response_dict['type'], "selfassessment")
self.assertEqual(response_dict['max_score'], self.max_score)
self.assertEqual(response_dict['state'], CombinedOpenEndedV1Module.INITIAL)
def test_create_task(self):
combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2])
first_task = combinedoe.create_task(combinedoe.task_states[0], combinedoe.task_xml[0])
self.assertIsInstance(first_task, SelfAssessmentModule)
second_task = combinedoe.create_task(combinedoe.task_states[1], combinedoe.task_xml[1])
self.assertIsInstance(second_task, OpenEndedModule)
def test_get_task_number(self):
combinedoe = self.generate_oe_module(TEST_STATE_AI, 1, [self.task_xml1, self.task_xml2])
first_task = combinedoe.get_task_number(0)
self.assertIsInstance(first_task, SelfAssessmentModule)
second_task = combinedoe.get_task_number(1)
self.assertIsInstance(second_task, OpenEndedModule)
third_task = combinedoe.get_task_number(2)
self.assertIsNone(third_task)
def test_update_task_states(self):
"""
See if we can update the task states properly
"""
changed = self.combinedoe.update_task_states()
self.assertFalse(changed)
current_task = self.combinedoe.current_task
current_task.change_state(CombinedOpenEndedV1Module.DONE)
changed = self.combinedoe.update_task_states()
self.assertTrue(changed)
def test_get_max_score(self):
"""
Try to get the max score of the problem
"""
self.combinedoe.update_task_states()
self.combinedoe.state = "done"
self.combinedoe.is_scored = True
max_score = self.combinedoe.max_score()
self.assertEqual(max_score, 1)
def test_container_get_max_score(self):
"""
See if we can get the max score from the actual xmodule
"""
# The progress view requires that this function be exposed
max_score = self.combinedoe_container.max_score()
self.assertEqual(max_score, None)
def test_container_get_progress(self):
"""
See if we can get the progress from the actual xmodule
"""
progress = self.combinedoe_container.max_score()
self.assertEqual(progress, None)
def test_get_progress(self):
"""
Test if we can get the correct progress from the combined open ended class
"""
self.combinedoe.update_task_states()
self.combinedoe.state = "done"
self.combinedoe.is_scored = True
progress = self.combinedoe.get_progress()
self.assertIsInstance(progress, Progress)
# progress._a is the score of the xmodule, which is 0 right now.
self.assertEqual(progress._a, 0)
# progress._b is the max_score (which is 1), divided by the weight (which is 1).
self.assertEqual(progress._b, 1)
def test_container_weight(self):
"""
Check the problem weight in the container
"""
weight = self.combinedoe_container.weight
self.assertEqual(weight, 1)
def test_container_child_weight(self):
"""
Test the class to see if it picks up the right weight
"""
weight = self.combinedoe_container.child_module.weight
self.assertEqual(weight, 1)
def test_get_score(self):
"""
See if scoring works
"""
score_dict = self.combinedoe.get_score()
self.assertEqual(score_dict['score'], 0)
self.assertEqual(score_dict['total'], 1)
def test_alternate_orderings(self):
"""
Try multiple ordering of definitions to see if the problem renders different steps correctly.
"""
t1 = self.task_xml1
t2 = self.task_xml2
xml_to_test = [[t1], [t2], [t1, t1], [t1, t2], [t2, t2], [t2, t1], [t1, t2, t1]]
for xml in xml_to_test:
definition = {'prompt': etree.XML(self.prompt), 'rubric': etree.XML(self.rubric), 'task_xml': xml}
descriptor = Mock(data=definition)
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=self.static_data)
changed = combinedoe.update_task_states()
self.assertFalse(changed)
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state={'task_states': TEST_STATE_SA})
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state={'task_states': TEST_STATE_SA_IN})
def test_get_score_realistic(self):
"""
Try to parse the correct score from a json instance state
"""
instance_state = json.loads(MOCK_INSTANCE_STATE)
rubric = """
<rubric>
<rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
<option>The response is a marginal answer to the question. It may contain some elements of a proficient response, but it is inaccurate or incomplete.</option>
<option>The response is a proficient answer to the question. It is generally correct, although it may contain minor inaccuracies. There is limited evidence of higher-order thinking.</option>
<option>The response is correct, complete, and contains evidence of higher-order thinking.</option>
</category>
</rubric>
</rubric>
"""
definition = {'prompt': etree.XML(self.prompt), 'rubric': etree.XML(rubric),
'task_xml': [self.task_xml1, self.task_xml2]}
descriptor = Mock(data=definition)
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=instance_state)
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 15.0)
def generate_oe_module(self, task_state, task_number, task_xml):
"""
Return a combined open ended module with the specified parameters
"""
definition = {
'prompt': etree.XML(self.prompt),
'rubric': etree.XML(self.rubric),
'task_xml': task_xml
}
descriptor = Mock(data=definition)
module = Mock(scope_ids=Mock(usage_id='dummy-usage-id'))
instance_state = {'task_states': task_state, 'graded': True}
if task_number is not None:
instance_state.update({'current_task_number': task_number})
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
definition,
descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=instance_state)
return combinedoe
def ai_state_reset(self, task_state, task_number=None):
"""
See if state is properly reset
"""
combinedoe = self.generate_oe_module(task_state, task_number, [self.task_xml2])
html = combinedoe.get_html()
self.assertIsInstance(html, basestring)
score = combinedoe.get_score()
if combinedoe.is_scored:
self.assertEqual(score['score'], 0)
else:
self.assertEqual(score['score'], None)
def ai_state_success(self, task_state, task_number=None, iscore=2, tasks=None):
"""
See if state stays the same
"""
if tasks is None:
tasks = [self.task_xml1, self.task_xml2]
combinedoe = self.generate_oe_module(task_state, task_number, tasks)
html = combinedoe.get_html()
self.assertIsInstance(html, basestring)
score = combinedoe.get_score()
self.assertEqual(int(score['score']), iscore)
def test_ai_state_reset(self):
self.ai_state_reset(TEST_STATE_AI)
def test_ai_state2_reset(self):
self.ai_state_reset(TEST_STATE_AI2)
def test_ai_invalid_state(self):
self.ai_state_reset(TEST_STATE_AI2_INVALID)
def test_ai_state_rest_task_number(self):
self.ai_state_reset(TEST_STATE_AI, task_number=2)
self.ai_state_reset(TEST_STATE_AI, task_number=5)
self.ai_state_reset(TEST_STATE_AI, task_number=1)
self.ai_state_reset(TEST_STATE_AI, task_number=0)
def test_ai_state_success(self):
self.ai_state_success(TEST_STATE_AI)
def test_state_single(self):
self.ai_state_success(TEST_STATE_SINGLE, iscore=12)
def test_state_pe_single(self):
self.ai_state_success(TEST_STATE_PE_SINGLE, iscore=0, tasks=[self.task_xml2])
def test_deprecation_message(self):
"""
Test the validation message produced for deprecation.
"""
# pylint: disable=no-member
validation = self.combinedoe_container.validate()
deprecation_msg = "ORA1 is no longer supported. To use this assessment, " \
"replace this ORA1 component with an ORA2 component."
validation.summary.text = deprecation_msg
validation.summary.type = 'error'
self.assertEqual(
validation.summary.text,
deprecation_msg
)
self.assertEqual(validation.summary.type, StudioValidationMessage.ERROR)
class CombinedOpenEndedModuleConsistencyTest(unittest.TestCase):
"""
Unit tests for the combined open ended xmodule rubric scores consistency.
"""
# location, definition_template, prompt, rubric, max_score, metadata, oeparam, task_xml1, task_xml2
# All these variables are used to construct the xmodule descriptor.
location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
definition_template = """
<combinedopenended attempts="10000">
{rubric}
{prompt}
<task>
{task1}
</task>
<task>
{task2}
</task>
</combinedopenended>
"""
prompt = "<prompt>This is a question prompt</prompt>"
rubric = '''<rubric><rubric>
<category>
<description>Response Quality</description>
<option>The response is not a satisfactory answer to the question. It either fails to address the question or does so in a limited way, with no evidence of higher-order thinking.</option>
<option>Second option</option>
</category>
</rubric></rubric>'''
max_score = 10
metadata = {'attempts': '10', 'max_score': max_score}
oeparam = etree.XML('''
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
''')
task_xml1 = '''
<selfassessment>
<hintprompt>
What hint about this problem would you give to someone?
</hintprompt>
<submitmessage>
Save Succcesful. Thanks for participating!
</submitmessage>
</selfassessment>
'''
task_xml2 = '''
<openended min_score_to_attempt="1" max_score_to_attempt="10">
<openendedparam>
<initial_display>Enter essay here.</initial_display>
<answer_display>This is the answer.</answer_display>
<grader_payload>{"grader_settings" : "ml_grading.conf", "problem_id" : "6.002x/Welcome/OETest"}</grader_payload>
</openendedparam>
</openended>'''
static_data = {
'max_attempts': 20,
'prompt': prompt,
'rubric': rubric,
'max_score': max_score,
'display_name': 'Name',
'accept_file_upload': False,
'close_date': "",
's3_interface': test_util_open_ended.S3_INTERFACE,
'open_ended_grading_interface': test_util_open_ended.OPEN_ENDED_GRADING_INTERFACE,
'skip_basic_checks': False,
'graded': True,
}
definition = {'prompt': etree.XML(prompt), 'rubric': etree.XML(rubric), 'task_xml': [task_xml1, task_xml2]}
full_definition = definition_template.format(prompt=prompt, rubric=rubric, task1=task_xml1, task2=task_xml2)
descriptor = Mock(data=full_definition)
test_system = get_test_system()
test_system.open_ended_grading_interface = None
usage_key = test_system.course_id.make_usage_key('combinedopenended', 'test_loc')
# ScopeIds has 4 fields: user_id, block_type, def_id, usage_id
scope_ids = ScopeIds(1, 'combinedopenended', usage_key, usage_key)
combinedoe_container = CombinedOpenEndedModule(
descriptor=descriptor,
runtime=test_system,
field_data=DictFieldData({
'data': full_definition,
'weight': '1',
}),
scope_ids=scope_ids,
)
def setUp(self):
super(CombinedOpenEndedModuleConsistencyTest, self).setUp()
self.combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE))
def test_get_score(self):
"""
If grader type is ML score should be updated from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
score_dict = self.combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_pe_grader(self):
"""
If grader type is PE score should not be updated from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE2))
score_dict = combinedoe.get_score()
self.assertNotEqual(score_dict['score'], 15.0)
def test_get_score_with_different_score_value_in_rubric(self):
"""
If grader type is ML score should be updated from rubric scores. Aggregate rubric scores = sum([5])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE3))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 25.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_old_task_states(self):
"""
If grader type is ML and old_task_states are present in instance inconsistent state score should be updated
from rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE4))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
def test_get_score_with_score_missing(self):
"""
If grader type is ML and score field is missing in instance inconsistent state score should be updated from
rubric scores. Aggregate rubric scores = sum([3])*5.
"""
combinedoe = CombinedOpenEndedV1Module(self.test_system,
self.location,
self.definition,
self.descriptor,
static_data=self.static_data,
metadata=self.metadata,
instance_state=json.loads(INSTANCE_INCONSISTENT_STATE5))
score_dict = combinedoe.get_score()
self.assertEqual(score_dict['score'], 15.0)
self.assertEqual(score_dict['total'], 5.0)
class OpenEndedModuleXmlTest(unittest.TestCase, DummyModulestore):
"""
Test the student flow in the combined open ended xmodule
"""
problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion")
answer = "blah blah"
assessment = [0, 1]
hint = "blah"
def get_module_system(self, descriptor):
def construct_callback(dispatch="score_update"):
return dispatch
test_system = get_test_system()
test_system.open_ended_grading_interface = None
test_system.xqueue['interface'] = Mock(
send_to_queue=Mock(return_value=(0, "Queued"))
)
test_system.xqueue['construct_callback'] = construct_callback
return test_system
def setUp(self):
super(OpenEndedModuleXmlTest, self).setUp()
self.setup_modulestore(COURSE)
def _handle_ajax(self, dispatch, content):
# Load the module from persistence
module = self._module()
# Call handle_ajax on the module
result = module.handle_ajax(dispatch, content)
# Persist the state
module.save()
return result
def _module(self):
return self.get_module_from_location(self.problem_location)
def test_open_ended_load_and_save(self):
"""
See if we can load the module and save an answer
@return:
"""
# Try saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(task_one_json['child_history'][0]['answer'], self.answer)
def test_open_ended_flow_reset(self):
"""
Test the flow of the module if we complete the self assessment step and then reset
@return:
"""
assessment = [0, 1]
# Simulate a student saving an answer
self._handle_ajax("get_html", {})
self._handle_ajax("save_answer", {"student_answer": self.answer})
self._handle_ajax("get_html", {})
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
self._handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
self._handle_ajax("get_combined_rubric", {})
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 0)
html = self._module().render(STUDENT_VIEW).content
self.assertIsInstance(html, basestring)
rubric = self._handle_ajax("get_combined_rubric", {})
self.assertIsInstance(rubric, basestring)
self.assertEqual(self._module().state, "assessing")
self._handle_ajax("reset", {})
self.assertEqual(self._module().current_task_number, 0)
def test_open_ended_flow_with_xqueue_failure(self):
"""
Test a two step problem where the student first goes through the self assessment step, and then the
open ended step with the xqueue failing in the first step.
"""
assessment = [1, 1]
# Simulate a student saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
status = self._handle_ajax("get_status", {})
self.assertIsInstance(status, basestring)
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
mock_xqueue_interface = Mock(
send_to_queue=Mock(return_value=(1, "Not Queued"))
)
# Call handle_ajax on the module with xqueue down
module = self._module()
with patch.dict(module.xmodule_runtime.xqueue, {'interface': mock_xqueue_interface}):
module.handle_ajax("save_assessment", assessment_dict)
self.assertEqual(module.current_task_number, 1)
self.assertTrue((module.child_module.get_task_number(1).child_created))
module.save()
# Check that next time the OpenEndedModule is loaded it calls send_to_grader
with patch.object(OpenEndedModule, 'send_to_grader') as mock_send_to_grader:
mock_send_to_grader.return_value = (False, "Not Queued")
module = self._module().child_module.get_score()
self.assertTrue(mock_send_to_grader.called)
self.assertTrue((self._module().child_module.get_task_number(1).child_created))
# Loading it this time should send submission to xqueue correctly
self.assertFalse((self._module().child_module.get_task_number(1).child_created))
self.assertEqual(self._module().current_task_number, 1)
self.assertEqual(self._module().state, OpenEndedChild.ASSESSING)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 1)
self._module().render(STUDENT_VIEW)
# Try to get the rubric from the module
self._handle_ajax("get_combined_rubric", {})
self.assertEqual(self._module().state, OpenEndedChild.ASSESSING)
# Make a fake reply from the queue
queue_reply = {
'queuekey': "",
'xqueue_body': json.dumps({
'score': 0,
'feedback': json.dumps({"spelling": "Spelling: Ok.", "grammar": "Grammar: Ok.",
"markup-text": " all of us can think of a book that we hope none of our children or any other children have taken off the shelf . but if i have the right to remove that book from the shelf that work i abhor then you also have exactly the same right and so does everyone else . and then we <bg>have no books left</bg> on the shelf for any of us . <bs>katherine</bs> <bs>paterson</bs> , author write a persuasive essay to a newspaper reflecting your vies on censorship <bg>in libraries . do</bg> you believe that certain materials , such as books , music , movies , magazines , <bg>etc . , should be</bg> removed from the shelves if they are found <bg>offensive ? support your</bg> position with convincing arguments from your own experience , observations <bg>, and or reading .</bg> "}),
'grader_type': "ML",
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_xml': "<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>",
'rubric_scores_complete': True,
})
}
self._handle_ajax("check_for_score", {})
# Update the module with the fake queue reply
self._handle_ajax("score_update", queue_reply)
module = self._module()
self.assertFalse(module.ready_to_reset)
self.assertEqual(module.current_task_number, 1)
# Get html and other data client will request
module.render(STUDENT_VIEW)
self._handle_ajax("skip_post_assessment", {})
# Get all results
self._handle_ajax("get_combined_rubric", {})
# reset the problem
self._handle_ajax("reset", {})
self.assertEqual(self._module().state, "initial")
def test_open_ended_flow_correct(self):
"""
Test a two step problem where the student first goes through the self assessment step, and then the
open ended step.
@return:
"""
assessment = [1, 1]
# Simulate a student saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
status = self._handle_ajax("get_status", {})
self.assertIsInstance(status, basestring)
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
self._handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 1)
self._module().render(STUDENT_VIEW)
# Try to get the rubric from the module
self._handle_ajax("get_combined_rubric", {})
# Make a fake reply from the queue
queue_reply = {
'queuekey': "",
'xqueue_body': json.dumps({
'score': 0,
'feedback': json.dumps({"spelling": "Spelling: Ok.", "grammar": "Grammar: Ok.",
"markup-text": " all of us can think of a book that we hope none of our children or any other children have taken off the shelf . but if i have the right to remove that book from the shelf that work i abhor then you also have exactly the same right and so does everyone else . and then we <bg>have no books left</bg> on the shelf for any of us . <bs>katherine</bs> <bs>paterson</bs> , author write a persuasive essay to a newspaper reflecting your vies on censorship <bg>in libraries . do</bg> you believe that certain materials , such as books , music , movies , magazines , <bg>etc . , should be</bg> removed from the shelves if they are found <bg>offensive ? support your</bg> position with convincing arguments from your own experience , observations <bg>, and or reading .</bg> "}),
'grader_type': "ML",
'success': True,
'grader_id': 1,
'submission_id': 1,
'rubric_xml': "<rubric><category><description>Writing Applications</description><score>0</score><option points='0'> The essay loses focus, has little information or supporting details, and the organization makes it difficult to follow.</option><option points='1'> The essay presents a mostly unified theme, includes sufficient information to convey the theme, and is generally organized well.</option></category><category><description> Language Conventions </description><score>0</score><option points='0'> The essay demonstrates a reasonable command of proper spelling and grammar. </option><option points='1'> The essay demonstrates superior command of proper spelling and grammar.</option></category></rubric>",
'rubric_scores_complete': True,
})
}
self._handle_ajax("check_for_score", {})
# Update the module with the fake queue reply
self._handle_ajax("score_update", queue_reply)
module = self._module()
self.assertFalse(module.ready_to_reset)
self.assertEqual(module.current_task_number, 1)
# Get html and other data client will request
module.render(STUDENT_VIEW)
self._handle_ajax("skip_post_assessment", {})
# Get all results
self._handle_ajax("get_combined_rubric", {})
# reset the problem
self._handle_ajax("reset", {})
self.assertEqual(self._module().state, "initial")
class OpenEndedModuleXmlAttemptTest(unittest.TestCase, DummyModulestore):
"""
Test if student is able to reset the problem
"""
problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestion1Attempt")
answer = "blah blah"
assessment = [0, 1]
hint = "blah"
def get_module_system(self, descriptor):
test_system = get_test_system()
test_system.open_ended_grading_interface = None
test_system.xqueue['interface'] = Mock(
send_to_queue=Mock(return_value=(0, "Queued"))
)
return test_system
def setUp(self):
super(OpenEndedModuleXmlAttemptTest, self).setUp()
self.setup_modulestore(COURSE)
def _handle_ajax(self, dispatch, content):
# Load the module from persistence
module = self._module()
# Call handle_ajax on the module
result = module.handle_ajax(dispatch, content)
# Persist the state
module.save()
return result
def _module(self):
return self.get_module_from_location(self.problem_location)
def test_reset_fail(self):
"""
Test the flow of the module if we complete the self assessment step and then reset
Since the problem only allows one attempt, should fail.
@return:
"""
assessment = [0, 1]
# Simulate a student saving an answer
self._handle_ajax("save_answer", {"student_answer": self.answer})
# Mock a student submitting an assessment
assessment_dict = MultiDict({'assessment': sum(assessment)})
assessment_dict.extend(('score_list[]', val) for val in assessment)
self._handle_ajax("save_assessment", assessment_dict)
task_one_json = json.loads(self._module().task_states[0])
self.assertEqual(json.loads(task_one_json['child_history'][0]['post_assessment']), assessment)
# Move to the next step in the problem
self._handle_ajax("next_problem", {})
self.assertEqual(self._module().current_task_number, 0)
html = self._module().render(STUDENT_VIEW).content
self.assertIsInstance(html, basestring)
# Module should now be done
rubric = self._handle_ajax("get_combined_rubric", {})
self.assertIsInstance(rubric, basestring)
self.assertEqual(self._module().state, "done")
# Try to reset, should fail because only 1 attempt is allowed
reset_data = json.loads(self._handle_ajax("reset", {}))
self.assertEqual(reset_data['success'], False)
class OpenEndedModuleXmlImageUploadTest(unittest.TestCase, DummyModulestore):
"""
Test if student is able to upload images properly.
"""
problem_location = Location("edX", "open_ended", "2012_Fall", "combinedopenended", "SampleQuestionImageUpload")
answer_text = "Hello, this is my amazing answer."
file_text = "Hello, this is my amazing file."
file_name = "Student file 1"
answer_link = "http://www.edx.org"
autolink_tag = '<a target="_blank" href='
autolink_tag_swapped = '<a href='
def get_module_system(self, descriptor):
test_system = get_test_system()
test_system.open_ended_grading_interface = None
test_system.s3_interface = test_util_open_ended.S3_INTERFACE
test_system.xqueue['interface'] = Mock(
send_to_queue=Mock(return_value=(0, "Queued"))
)
return test_system
def setUp(self):
super(OpenEndedModuleXmlImageUploadTest, self).setUp()
self.setup_modulestore(COURSE)
def test_file_upload_fail(self):
"""
Test to see if a student submission without a file attached fails.
"""
module = self.get_module_from_location(self.problem_location)
# Simulate a student saving an answer
response = module.handle_ajax("save_answer", {"student_answer": self.answer_text})
response = json.loads(response)
self.assertFalse(response['success'])
self.assertIn('error', response)
@patch(
'xmodule.open_ended_grading_classes.openendedchild.S3Connection',
test_util_open_ended.MockS3Connection
)
@patch(
'xmodule.open_ended_grading_classes.openendedchild.Key',
test_util_open_ended.MockS3Key
)
def test_file_upload_success(self):
"""
Test to see if a student submission with a file is handled properly.
"""
module = self.get_module_from_location(self.problem_location)
# Simulate a student saving an answer with a file
response = module.handle_ajax("save_answer", {
"student_answer": self.answer_text,
"valid_files_attached": True,
"student_file": [MockUploadedFile(self.file_name, self.file_text)],
})
response = json.loads(response)
self.assertTrue(response['success'])
self.assertIn(self.file_name, response['student_response'])
self.assertTrue(self.autolink_tag in response['student_response'] or
self.autolink_tag_swapped in response['student_response'])
def test_link_submission_success(self):
"""
Students can submit links instead of files. Check that the link is properly handled.
"""
module = self.get_module_from_location(self.problem_location)
# Simulate a student saving an answer with a link.
response = module.handle_ajax("save_answer", {
"student_answer": "{0} {1}".format(self.answer_text, self.answer_link)
})
response = json.loads(response)
self.assertTrue(response['success'])
self.assertIn(self.answer_link, response['student_response'])
self.assertTrue(self.autolink_tag in response['student_response'] or
self.autolink_tag_swapped in response['student_response'])
class OpenEndedModuleUtilTest(unittest.TestCase):
"""
Tests for the util functions of OpenEndedModule. Currently just for the html_sanitizer and <br/> inserter
"""
script_dirty = u'<script>alert("xss!")</script>'
script_clean = u'alert("xss!")'
img_dirty = u'<img alt="cats" height="200" onclick="eval()" src="http://example.com/lolcats.jpg" width="200">'
img_clean = u'<img width="200" alt="cats" height="200" src="http://example.com/lolcats.jpg">'
embed_dirty = u'<embed height="200" id="cats" onhover="eval()" src="http://example.com/lolcats.swf" width="200"/>'
embed_clean = u'<embed width="200" height="200" id="cats" src="http://example.com/lolcats.swf">'
iframe_dirty = u'<iframe class="cats" height="200" onerror="eval()" src="http://example.com/lolcats" width="200"/>'
iframe_clean = ur'<iframe (height="200" ?|class="cats" ?|width="200" ?|src="http://example.com/lolcats" ?)+></iframe>'
text = u'I am a \u201c\xfcber student\u201d'
text_lessthan_noencd = u'This used to be broken < by the other parser. 3>5'
text_lessthan_encode = u'This used to be broken < by the other parser. 3>5'
text_linebreaks = u"St\xfcdent submission:\nI like lamp."
text_brs = u"St\xfcdent submission:<br/>I like lamp."
link_text = u'I love going to www.lolcatz.com'
link_atag = u'I love going to <a target="_blank" href="http://www.lolcatz.com">www.lolcatz.com</a>'
def assertHtmlEqual(self, actual, expected):
"""
Assert that two strings represent the same html.
"""
return self._assertHtmlEqual(
fragment_fromstring(actual, create_parent='div'),
fragment_fromstring(expected, create_parent='div')
)
def _assertHtmlEqual(self, actual, expected):
"""
Assert that two HTML ElementTree elements are equal.
"""
self.assertEqual(actual.tag, expected.tag)
self.assertEqual(actual.attrib, expected.attrib)
self.assertEqual(actual.text, expected.text)
self.assertEqual(actual.tail, expected.tail)
self.assertEqual(len(actual), len(expected))
for actual_child, expected_child in zip(actual, expected):
self._assertHtmlEqual(actual_child, expected_child)
def test_script(self):
"""
Basic test for stripping <script>
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.script_dirty), self.script_clean)
def test_img(self):
"""
Basic test for passing through img, but stripping bad attr
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.img_dirty), self.img_clean)
def test_embed(self):
"""
Basic test for passing through embed, but stripping bad attr
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.embed_dirty), self.embed_clean)
def test_iframe(self):
"""
Basic test for passing through iframe, but stripping bad attr
"""
self.assertRegexpMatches(OpenEndedChild.sanitize_html(self.iframe_dirty), self.iframe_clean)
def test_text(self):
"""
Test for passing through text unchanged, including unicode
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text), self.text)
def test_lessthan(self):
"""
Tests that `<` in text context is handled properly
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.text_lessthan_noencd), self.text_lessthan_encode)
def test_linebreaks(self):
"""
tests the replace_newlines function
"""
self.assertHtmlEqual(OpenEndedChild.replace_newlines(self.text_linebreaks), self.text_brs)
def test_linkify(self):
"""
tests the replace_newlines function
"""
self.assertHtmlEqual(OpenEndedChild.sanitize_html(self.link_text), self.link_atag)
def test_combined(self):
"""
tests a combination of inputs
"""
test_input = u"{}\n{}\n{}\n\n{}{}\n{}".format(self.link_text,
self.text,
self.script_dirty,
self.embed_dirty,
self.text_lessthan_noencd,
self.img_dirty)
test_output = u"{}<br/>{}<br/>{}<br/><br/>{}{}<br/>{}".format(self.link_atag,
self.text,
self.script_clean,
self.embed_clean,
self.text_lessthan_encode,
self.img_clean)
self.assertHtmlEqual(OpenEndedChild.sanitize_html(test_input), test_output)
|
jose36/jmdl5
|
refs/heads/master
|
servers/vidxden.py
|
35
|
# -*- coding: iso-8859-1 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para vidxden
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
from core import unpackerjs
def test_video_exists( page_url ):
logger.info("[vidxden.py] test_video_exists(page_url='%s')" % page_url)
data = scrapertools.cache_page( page_url )
if "This server is in maintenance mode, please try again later." in data:
return False,"El servidor de vidxden no está disponible<br/>por tareas de mantenimiento"
elif "No such file or the file has been removed due to copyright infringement issues." in data:
return False,"Ese video ha sido borrado de vidxden"
else:
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[vidxden.py] url="+page_url)
# Lo pide una vez
headers = []
headers.append(['User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14'])
data = scrapertools.cache_page( page_url , headers=headers )
fname = scrapertools.get_match(data,'<input name="fname" type="hidden" value="([^"]+)">')
codigo = scrapertools.get_match(page_url,'vidxden.com/(\w+)')
# Lo pide una segunda vez, como si hubieras hecho click en el banner
#op=download1&usr_login=&id=qtrv0ufkz3e4&fname=El_cazador_de_sue_os-dvd.avi&referer=&method_free=Continue+to+Video
headers.append(['Referer',page_url])
post = "op=download1&usr_login=&id="+codigo+"&fname="+fname+"&referer=&method_free=Continue+to+Video"
data = scrapertools.cache_page( page_url , post=post, headers=headers )
logger.info("data="+data)
# Extrae el trozo cifrado
#<div id="embedcontmvshre" style="position: absolute; top: 0; left: 0; visibility: hidden;"><script type='text/javascript'>eval(function(p,a,c,k,e,d){while(c--)if(k[c])p=p.replace(new RegExp('\\b'+c.toString(a)+'\\b','g'),k[c]);return p}('1i.1h(\'<8 10="1g"1f="1e:1d-1c-1b-1a-19"q="p"o="n"18="3://b.7.4/a/17.16"><2 1="u"0="t"/><2 1="s"0="r"/><2 1="m"0="3://i/l/6.k"/><2 1="f"0="5"><2 1="g"0="5"/><2 1="e"0="c"/><2 1="j"0="h"/><2 1="z"0="3://y.x.4:w/d/v/6"/><9 10="15"14="13/7"z="3://y.x.4:w/d/v/6"u="t"s="r"q="p"o="n"m="3://i/l/6.k"j="h"g="5"f="5"e="c"12="3://b.7.4/a/11/"></9></8>\');',36,55,'value|name|param|http|com|false|qtrv0ufkz3e4|divx|object|embed|plugin|go|Play||previewMessage|allowContextMenu|bannerEnabled|true||autoPlay|jpg|00249|previewImage|318|height|640|width|transparent|wmode|Stage6|custommode|opujxvaorizu2mdg6fst2fjdzlrn4p437h3lsbz5fjkxs|364|divxden|s31|src|id|download|pluginspage|video|type|np_vid|cab|DivXBrowserPlugin|codebase|CC0F21721616|9C46|41fa|D0AB|67DABFBF|clsid|classid|ie_vid|write|document'.split('|')))</script></div>
patron = "(<script type='text/javascript'>eval\(function.*?</script>)"
matches = re.compile(patron,re.DOTALL).findall(data)
#scrapertools.printMatches(matches)
data = ""
if len(matches)>0:
data = matches[0]
logger.info("[vidxden.py] bloque packed="+data)
else:
logger.info("[vidxden.py] no encuentra bloque packed="+data)
return ""
# Lo descifra
descifrado = unpackerjs.unpackjs(data)
# Extrae la URL del vídeo
logger.info("descifrado="+descifrado)
# Extrae la URL
patron = '<param name="src"value="([^"]+)"/>'
matches = re.compile(patron,re.DOTALL).findall(descifrado)
scrapertools.printMatches(matches)
if len(matches)==0:
descifrado = descifrado.replace("\\","")
patron = "file','([^']+)'"
matches = re.compile(patron,re.DOTALL).findall(descifrado)
scrapertools.printMatches(matches)
video_urls = []
if len(matches)>0:
video_urls.append( ["[vidxden]",matches[0]+"|Referer="+urllib.quote(page_url)+"&User-Agent="+urllib.quote('Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14')])
for video_url in video_urls:
logger.info("[vidxden.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos de este servidor en el texto pasado
def find_videos(text):
encontrados = set()
devuelve = []
# http://www.vidxden.com/embed-3e1cwjigcicj-width-770-height-385.html
patronvideos = 'vidxden.com/embed-([a-z0-9]+)'
logger.info("[vidxden.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(text)
for match in matches:
titulo = "[vidxden]"
url = "http://www.vidxden.com/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'vidxden' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
# http://www.vidxden.com/qya0qmf3k502
patronvideos = 'vidxden.com/([a-z0-9]+)'
logger.info("[vidxden.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(text)
for match in matches:
titulo = "[vidxden]"
url = "http://www.vidxden.com/"+match
if url!="http://www.vidxden.com/embed" and url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'vidxden' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
|
synicalsyntax/zulip
|
refs/heads/master
|
zerver/tests/test_settings.py
|
2
|
import time
from typing import Any, Dict
from unittest import mock
import ujson
from django.http import HttpResponse
from django.test import override_settings
from zerver.lib.initial_password import initial_password
from zerver.lib.rate_limiter import add_ratelimit_rule, remove_ratelimit_rule
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import get_test_image_file
from zerver.lib.users import get_all_api_keys
from zerver.models import UserProfile, get_user_profile_by_api_key
class ChangeSettingsTest(ZulipTestCase):
def check_well_formed_change_settings_response(self, result: Dict[str, Any]) -> None:
self.assertIn("full_name", result)
# DEPRECATED, to be deleted after all uses of check_for_toggle_param
# are converted into check_for_toggle_param_patch.
def check_for_toggle_param(self, pattern: str, param: str) -> None:
self.login('hamlet')
user_profile = self.example_user('hamlet')
json_result = self.client_post(pattern,
{param: ujson.dumps(True)})
self.assert_json_success(json_result)
# refetch user_profile object to correctly handle caching
user_profile = self.example_user('hamlet')
self.assertEqual(getattr(user_profile, param), True)
json_result = self.client_post(pattern,
{param: ujson.dumps(False)})
self.assert_json_success(json_result)
# refetch user_profile object to correctly handle caching
user_profile = self.example_user('hamlet')
self.assertEqual(getattr(user_profile, param), False)
# TODO: requires method consolidation, right now, there's no alternative
# for check_for_toggle_param for PATCH.
def check_for_toggle_param_patch(self, pattern: str, param: str) -> None:
self.login('hamlet')
user_profile = self.example_user('hamlet')
json_result = self.client_patch(pattern,
{param: ujson.dumps(True)})
self.assert_json_success(json_result)
# refetch user_profile object to correctly handle caching
user_profile = self.example_user('hamlet')
self.assertEqual(getattr(user_profile, param), True)
json_result = self.client_patch(pattern,
{param: ujson.dumps(False)})
self.assert_json_success(json_result)
# refetch user_profile object to correctly handle caching
user_profile = self.example_user('hamlet')
self.assertEqual(getattr(user_profile, param), False)
def test_successful_change_settings(self) -> None:
"""
A call to /json/settings with valid parameters changes the user's
settings correctly and returns correct values.
"""
user = self.example_user('hamlet')
self.login_user(user)
json_result = self.client_patch(
"/json/settings",
dict(
full_name='Foo Bar',
old_password=initial_password(user.delivery_email),
new_password='foobar1',
))
self.assert_json_success(json_result)
result = ujson.loads(json_result.content)
self.check_well_formed_change_settings_response(result)
user.refresh_from_db()
self.assertEqual(user.full_name, "Foo Bar")
self.logout()
# This is one of the few places we log in directly
# with Django's client (to test the password change
# with as few moving parts as possible).
self.assertTrue(
self.client.login(
username=user.delivery_email,
password='foobar1',
realm=user.realm,
),
)
self.assert_logged_in_user_id(user.id)
def test_password_change_check_strength(self) -> None:
self.login('hamlet')
with self.settings(PASSWORD_MIN_LENGTH=3, PASSWORD_MIN_GUESSES=1000):
json_result = self.client_patch(
"/json/settings",
dict(
full_name='Foo Bar',
old_password=initial_password(self.example_email("hamlet")),
new_password='easy',
))
self.assert_json_error(json_result, "New password is too weak!")
json_result = self.client_patch(
"/json/settings",
dict(
full_name='Foo Bar',
old_password=initial_password(self.example_email("hamlet")),
new_password='f657gdGGk9',
))
self.assert_json_success(json_result)
def test_illegal_name_changes(self) -> None:
user = self.example_user('hamlet')
self.login_user(user)
full_name = user.full_name
with self.settings(NAME_CHANGES_DISABLED=True):
json_result = self.client_patch("/json/settings",
dict(full_name='Foo Bar'))
# We actually fail silently here, since this only happens if
# somebody is trying to game our API, and there's no reason to
# give them the courtesy of an error reason.
self.assert_json_success(json_result)
user = self.example_user('hamlet')
self.assertEqual(user.full_name, full_name)
# Now try a too-long name
json_result = self.client_patch("/json/settings",
dict(full_name='x' * 1000))
self.assert_json_error(json_result, 'Name too long!')
# Now try a too-short name
json_result = self.client_patch("/json/settings",
dict(full_name='x'))
self.assert_json_error(json_result, 'Name too short!')
def test_illegal_characters_in_name_changes(self) -> None:
self.login('hamlet')
# Now try a name with invalid characters
json_result = self.client_patch("/json/settings",
dict(full_name='Opheli*'))
self.assert_json_error(json_result, 'Invalid characters in name!')
def test_change_email_to_disposable_email(self) -> None:
hamlet = self.example_user("hamlet")
self.login_user(hamlet)
realm = hamlet.realm
realm.disallow_disposable_email_addresses = True
realm.emails_restricted_to_domains = False
realm.save()
json_result = self.client_patch("/json/settings",
dict(email='hamlet@mailnator.com'))
self.assert_json_error(json_result, 'Please use your real email address.')
# This is basically a don't-explode test.
def test_notify_settings(self) -> None:
for notification_setting in UserProfile.notification_setting_types:
# `notification_sound` is a string not a boolean, so this test
# doesn't work for it.
#
# TODO: Make this work more like do_test_realm_update_api
if notification_setting != 'notification_sound':
self.check_for_toggle_param_patch("/json/settings/notifications",
notification_setting)
def test_change_notification_sound(self) -> None:
pattern = "/json/settings/notifications"
param = "notification_sound"
user_profile = self.example_user('hamlet')
self.login_user(user_profile)
json_result = self.client_patch(pattern,
{param: ujson.dumps("invalid")})
self.assert_json_error(json_result, "Invalid notification sound 'invalid'")
json_result = self.client_patch(pattern,
{param: ujson.dumps("ding")})
self.assert_json_success(json_result)
# refetch user_profile object to correctly handle caching
user_profile = self.example_user('hamlet')
self.assertEqual(getattr(user_profile, param), "ding")
json_result = self.client_patch(pattern,
{param: ujson.dumps('zulip')})
self.assert_json_success(json_result)
# refetch user_profile object to correctly handle caching
user_profile = self.example_user('hamlet')
self.assertEqual(getattr(user_profile, param), 'zulip')
def test_toggling_boolean_user_display_settings(self) -> None:
"""Test updating each boolean setting in UserProfile property_types"""
boolean_settings = (s for s in UserProfile.property_types if UserProfile.property_types[s] is bool)
for display_setting in boolean_settings:
self.check_for_toggle_param_patch("/json/settings/display", display_setting)
def test_enter_sends_setting(self) -> None:
self.check_for_toggle_param('/json/users/me/enter-sends', "enter_sends")
def test_wrong_old_password(self) -> None:
self.login('hamlet')
result = self.client_patch(
"/json/settings",
dict(
old_password='bad_password',
new_password="ignored",
))
self.assert_json_error(result, "Wrong password!")
def test_wrong_old_password_rate_limiter(self) -> None:
self.login('hamlet')
with self.settings(RATE_LIMITING_AUTHENTICATE=True):
add_ratelimit_rule(10, 2, domain='authenticate_by_username')
start_time = time.time()
with mock.patch('time.time', return_value=start_time):
result = self.client_patch(
"/json/settings",
dict(
old_password='bad_password',
new_password="ignored",
))
self.assert_json_error(result, "Wrong password!")
result = self.client_patch(
"/json/settings",
dict(
old_password='bad_password',
new_password="ignored",
))
self.assert_json_error(result, "Wrong password!")
# We're over the limit, so we'll get blocked even with the correct password.
result = self.client_patch(
"/json/settings",
dict(
old_password=initial_password(self.example_email("hamlet")),
new_password="ignored",
))
self.assert_json_error(result, "You're making too many attempts! Try again in 10 seconds.")
# After time passes, we should be able to succeed if we give the correct password.
with mock.patch('time.time', return_value=start_time + 11):
json_result = self.client_patch(
"/json/settings",
dict(
old_password=initial_password(self.example_email("hamlet")),
new_password='foobar1',
))
self.assert_json_success(json_result)
remove_ratelimit_rule(10, 2, domain='authenticate_by_username')
@override_settings(AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',
'zproject.backends.EmailAuthBackend',
'zproject.backends.ZulipDummyBackend'))
def test_change_password_ldap_backend(self) -> None:
self.init_default_ldap_database()
ldap_user_attr_map = {'full_name': 'cn', 'short_name': 'sn'}
self.login('hamlet')
with self.settings(LDAP_APPEND_DOMAIN="zulip.com",
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
result = self.client_patch(
"/json/settings",
dict(
old_password=initial_password(self.example_email("hamlet")),
new_password="ignored",
))
self.assert_json_error(result, "Your Zulip password is managed in LDAP")
result = self.client_patch(
"/json/settings",
dict(
old_password=self.ldap_password("hamlet"), # hamlet's password in ldap
new_password="ignored",
))
self.assert_json_error(result, "Your Zulip password is managed in LDAP")
with self.settings(LDAP_APPEND_DOMAIN="example.com",
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
result = self.client_patch(
"/json/settings",
dict(
old_password=initial_password(self.example_email("hamlet")),
new_password="ignored",
))
self.assert_json_success(result)
with self.settings(LDAP_APPEND_DOMAIN=None,
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map):
result = self.client_patch(
"/json/settings",
dict(
old_password=initial_password(self.example_email("hamlet")),
new_password="ignored",
))
self.assert_json_error(result, "Your Zulip password is managed in LDAP")
def test_changing_nothing_returns_error(self) -> None:
"""
We need to supply at least one non-empty parameter
to this API, or it should fail. (Eventually, we should
probably use a patch interface for these changes.)
"""
self.login('hamlet')
result = self.client_patch("/json/settings",
dict(old_password='ignored'))
self.assert_json_error(result, "Please fill out all fields.")
def do_test_change_user_display_setting(self, setting_name: str) -> None:
test_changes: Dict[str, Any] = dict(
default_language = 'de',
emojiset = 'google',
timezone = 'US/Mountain',
demote_inactive_streams = 2,
color_scheme = 2,
)
self.login('hamlet')
test_value = test_changes.get(setting_name)
# Error if a setting in UserProfile.property_types does not have test values
if test_value is None:
raise AssertionError(f'No test created for {setting_name}')
if isinstance(test_value, int):
invalid_value: Any = 100
else:
invalid_value = 'invalid_' + setting_name
data = {setting_name: ujson.dumps(test_value)}
result = self.client_patch("/json/settings/display", data)
self.assert_json_success(result)
user_profile = self.example_user('hamlet')
self.assertEqual(getattr(user_profile, setting_name), test_value)
# Test to make sure invalid settings are not accepted
# and saved in the db.
data = {setting_name: ujson.dumps(invalid_value)}
result = self.client_patch("/json/settings/display", data)
# the json error for multiple word setting names (ex: default_language)
# displays as 'Invalid language'. Using setting_name.split('_') to format.
self.assert_json_error(result, f"Invalid {setting_name}")
user_profile = self.example_user('hamlet')
self.assertNotEqual(getattr(user_profile, setting_name), invalid_value)
def test_change_user_display_setting(self) -> None:
"""Test updating each non-boolean setting in UserProfile property_types"""
user_settings = (s for s in UserProfile.property_types if UserProfile.property_types[s] is not bool)
for setting in user_settings:
self.do_test_change_user_display_setting(setting)
def do_change_emojiset(self, emojiset: str) -> HttpResponse:
self.login('hamlet')
data = {'emojiset': ujson.dumps(emojiset)}
result = self.client_patch("/json/settings/display", data)
return result
def test_emojiset(self) -> None:
"""Test banned emojisets are not accepted."""
banned_emojisets = ['apple', 'emojione']
valid_emojisets = ['google', 'google-blob', 'text', 'twitter']
for emojiset in banned_emojisets:
result = self.do_change_emojiset(emojiset)
self.assert_json_error(result, "Invalid emojiset")
for emojiset in valid_emojisets:
result = self.do_change_emojiset(emojiset)
self.assert_json_success(result)
def test_avatar_changes_disabled(self) -> None:
self.login('hamlet')
with self.settings(AVATAR_CHANGES_DISABLED=True):
result = self.client_delete("/json/users/me/avatar")
self.assert_json_error(result, "Avatar changes are disabled in this organization.", 400)
with self.settings(AVATAR_CHANGES_DISABLED=True):
with get_test_image_file('img.png') as fp1:
result = self.client_post("/json/users/me/avatar", {'f1': fp1})
self.assert_json_error(result, "Avatar changes are disabled in this organization.", 400)
class UserChangesTest(ZulipTestCase):
def test_update_api_key(self) -> None:
user = self.example_user('hamlet')
email = user.email
self.login_user(user)
old_api_keys = get_all_api_keys(user)
# Ensure the old API keys are in the authentication cache, so
# that the below logic can test whether we have a cache-flushing bug.
for api_key in old_api_keys:
self.assertEqual(get_user_profile_by_api_key(api_key).email, email)
result = self.client_post('/json/users/me/api_key/regenerate')
self.assert_json_success(result)
new_api_key = result.json()['api_key']
self.assertNotIn(new_api_key, old_api_keys)
user = self.example_user('hamlet')
current_api_keys = get_all_api_keys(user)
self.assertIn(new_api_key, current_api_keys)
for api_key in old_api_keys:
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_api_key(api_key)
for api_key in current_api_keys:
self.assertEqual(get_user_profile_by_api_key(api_key).email, email)
|
Metaswitch/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/firewalls/views.py
|
29
|
# Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tabs
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.firewalls \
import forms as fw_forms
from openstack_dashboard.dashboards.project.firewalls \
import tabs as fw_tabs
from openstack_dashboard.dashboards.project.firewalls \
import workflows as fw_workflows
AddRouterToFirewall = fw_forms.AddRouterToFirewall
InsertRuleToPolicy = fw_forms.InsertRuleToPolicy
RemoveRouterFromFirewall = fw_forms.RemoveRouterFromFirewall
RemoveRuleFromPolicy = fw_forms.RemoveRuleFromPolicy
UpdateFirewall = fw_forms.UpdateFirewall
UpdatePolicy = fw_forms.UpdatePolicy
UpdateRule = fw_forms.UpdateRule
FirewallDetailsTabs = fw_tabs.FirewallDetailsTabs
FirewallTabs = fw_tabs.FirewallTabs
PolicyDetailsTabs = fw_tabs.PolicyDetailsTabs
RuleDetailsTabs = fw_tabs.RuleDetailsTabs
AddFirewall = fw_workflows.AddFirewall
AddPolicy = fw_workflows.AddPolicy
AddRule = fw_workflows.AddRule
class IndexView(tabs.TabView):
tab_group_class = (FirewallTabs)
template_name = 'project/firewalls/details_tabs.html'
page_title = _("Firewalls")
def post(self, request, *args, **kwargs):
obj_ids = request.POST.getlist('object_ids')
action = request.POST['action']
obj_type = re.search('.delete([a-z]+)', action).group(1)
if not obj_ids:
obj_ids.append(re.search('([0-9a-z-]+)$', action).group(1))
if obj_type == 'rule':
for obj_id in obj_ids:
try:
api.fwaas.rule_delete(request, obj_id)
messages.success(request, _('Deleted rule %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete rule. %s') % e)
if obj_type == 'policy':
for obj_id in obj_ids:
try:
api.fwaas.policy_delete(request, obj_id)
messages.success(request, _('Deleted policy %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete policy. %s') % e)
if obj_type == 'firewall':
for obj_id in obj_ids:
try:
api.fwaas.firewall_delete(request, obj_id)
messages.success(request,
_('Deleted firewall %s') % obj_id)
except Exception as e:
exceptions.handle(request,
_('Unable to delete firewall. %s') % e)
return self.get(request, *args, **kwargs)
class AddRuleView(workflows.WorkflowView):
workflow_class = AddRule
template_name = "project/firewalls/addrule.html"
page_title = _("Add New Rule")
class AddPolicyView(workflows.WorkflowView):
workflow_class = AddPolicy
template_name = "project/firewalls/addpolicy.html"
page_title = _("Add New Policy")
class AddFirewallView(workflows.WorkflowView):
workflow_class = AddFirewall
template_name = "project/firewalls/addfirewall.html"
page_title = _("Add New Firewall")
def get_workflow(self):
if api.neutron.is_extension_supported(self.request,
'fwaasrouterinsertion'):
AddFirewall.register(fw_workflows.SelectRoutersStep)
workflow = super(AddFirewallView, self).get_workflow()
return workflow
class FireWallDetailTabs(tabs.TabView):
template_name = 'project/firewalls/details_tabs.html'
class RuleDetailsView(FireWallDetailTabs):
tab_group_class = (RuleDetailsTabs)
page_title = _("Firewall Rule Details")
class PolicyDetailsView(FireWallDetailTabs):
tab_group_class = (PolicyDetailsTabs)
page_title = _("Firewall Policy Details")
class FirewallDetailsView(FireWallDetailTabs):
tab_group_class = (FirewallDetailsTabs)
page_title = _("Firewall Details")
class UpdateRuleView(forms.ModalFormView):
form_class = UpdateRule
form_id = "update_rule_form"
template_name = "project/firewalls/updaterule.html"
context_object_name = 'rule'
modal_header = _("Edit Rule")
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:updaterule"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Edit Rule {{ name }}")
def get_context_data(self, **kwargs):
context = super(UpdateRuleView, self).get_context_data(**kwargs)
context['rule_id'] = self.kwargs['rule_id']
args = (self.kwargs['rule_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
rule_id = self.kwargs['rule_id']
try:
rule = api.fwaas.rule_get(self.request, rule_id)
return rule
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve rule details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
rule = self._get_object()
initial = rule.get_dict()
protocol = initial['protocol']
initial['protocol'] = protocol.upper() if protocol else 'ANY'
initial['action'] = initial['action'].upper()
return initial
class UpdatePolicyView(forms.ModalFormView):
form_class = UpdatePolicy
form_id = "update_policy_form"
template_name = "project/firewalls/updatepolicy.html"
context_object_name = 'policy'
modal_header = _("Edit Policy")
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:updatepolicy"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Edit Policy {{ name }}")
def get_context_data(self, **kwargs):
context = super(UpdatePolicyView, self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
args = (self.kwargs['policy_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
policy_id = self.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(self.request, policy_id)
return policy
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
return initial
class UpdateFirewallView(forms.ModalFormView):
form_class = UpdateFirewall
form_id = "update_firewall_form"
template_name = "project/firewalls/updatefirewall.html"
context_object_name = 'firewall'
modal_header = _("Edit Firewall")
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:updatefirewall"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Edit Firewall {{ name }}")
def get_context_data(self, **kwargs):
context = super(UpdateFirewallView, self).get_context_data(**kwargs)
context["firewall_id"] = self.kwargs['firewall_id']
args = (self.kwargs['firewall_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
firewall_id = self.kwargs['firewall_id']
try:
firewall = api.fwaas.firewall_get(self.request,
firewall_id)
return firewall
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve firewall details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
firewall = self._get_object()
initial = firewall.get_dict()
return initial
class InsertRuleToPolicyView(forms.ModalFormView):
form_class = InsertRuleToPolicy
form_id = "update_policy_form"
modal_header = _("Insert Rule into Policy")
template_name = "project/firewalls/insert_rule_to_policy.html"
context_object_name = 'policy'
submit_url = "horizon:project:firewalls:insertrule"
submit_label = _("Save Changes")
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Insert Rule to Policy")
def get_context_data(self, **kwargs):
context = super(InsertRuleToPolicyView,
self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
args = (self.kwargs['policy_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
policy_id = self.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(self.request, policy_id)
return policy
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
initial['policy_id'] = initial['id']
return initial
class RemoveRuleFromPolicyView(forms.ModalFormView):
form_class = RemoveRuleFromPolicy
form_id = "update_policy_form"
modal_header = _("Remove Rule from Policy")
template_name = "project/firewalls/remove_rule_from_policy.html"
context_object_name = 'policy'
submit_label = _("Save Changes")
submit_url = "horizon:project:firewalls:removerule"
success_url = reverse_lazy("horizon:project:firewalls:index")
page_title = _("Remove Rule from Policy")
def get_context_data(self, **kwargs):
context = super(RemoveRuleFromPolicyView,
self).get_context_data(**kwargs)
context["policy_id"] = self.kwargs['policy_id']
args = (self.kwargs['policy_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
policy_id = self.kwargs['policy_id']
try:
policy = api.fwaas.policy_get(self.request, policy_id)
return policy
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve policy details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
policy = self._get_object()
initial = policy.get_dict()
initial['policy_id'] = initial['id']
return initial
class RouterCommonView(forms.ModalFormView):
form_id = "update_firewall_form"
context_object_name = 'firewall'
submit_label = _("Save Changes")
success_url = reverse_lazy("horizon:project:firewalls:index")
def get_context_data(self, **kwargs):
context = super(RouterCommonView,
self).get_context_data(**kwargs)
context["firewall_id"] = self.kwargs['firewall_id']
args = (self.kwargs['firewall_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
obj = self._get_object()
if obj:
context['name'] = obj.name_or_id
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
firewall_id = self.kwargs['firewall_id']
try:
firewall = api.fwaas.firewall_get(self.request, firewall_id)
return firewall
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve firewall details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
firewall = self._get_object()
initial = firewall.get_dict()
return initial
class AddRouterToFirewallView(RouterCommonView):
form_class = AddRouterToFirewall
modal_header = _("Add Router to Firewall")
template_name = "project/firewalls/add_router_to_firewall.html"
submit_url = "horizon:project:firewalls:addrouter"
page_title = _("Add Router to Firewall")
class RemoveRouterFromFirewallView(RouterCommonView):
form_class = RemoveRouterFromFirewall
modal_header = _("Remove Router from Firewall")
template_name = "project/firewalls/remove_router_from_firewall.html"
submit_url = "horizon:project:firewalls:removerouter"
page_title = _("Remove Router from Firewall")
|
frewsxcv/keyczar
|
refs/heads/master
|
cpp/src/tools/scons/scons-local-1.2.0.d20090223/SCons/Tool/zip.py
|
19
|
"""SCons.Tool.zip
Tool-specific initialization for zip.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/zip.py 4043 2009/02/23 09:06:45 scons"
import os.path
import SCons.Builder
import SCons.Defaults
import SCons.Node.FS
import SCons.Util
try:
import zipfile
internal_zip = 1
except ImportError:
internal_zip = 0
if internal_zip:
zipcompression = zipfile.ZIP_DEFLATED
def zip(target, source, env):
def visit(arg, dirname, names):
for name in names:
path = os.path.join(dirname, name)
if os.path.isfile(path):
arg.write(path)
compression = env.get('ZIPCOMPRESSION', 0)
zf = zipfile.ZipFile(str(target[0]), 'w', compression)
for s in source:
if s.isdir():
os.path.walk(str(s), visit, zf)
else:
zf.write(str(s))
zf.close()
else:
zipcompression = 0
zip = "$ZIP $ZIPFLAGS ${TARGET.abspath} $SOURCES"
zipAction = SCons.Action.Action(zip, varlist=['ZIPCOMPRESSION'])
ZipBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'),
source_factory = SCons.Node.FS.Entry,
source_scanner = SCons.Defaults.DirScanner,
suffix = '$ZIPSUFFIX',
multi = 1)
def generate(env):
"""Add Builders and construction variables for zip to an Environment."""
try:
bld = env['BUILDERS']['Zip']
except KeyError:
bld = ZipBuilder
env['BUILDERS']['Zip'] = bld
env['ZIP'] = 'zip'
env['ZIPFLAGS'] = SCons.Util.CLVar('')
env['ZIPCOM'] = zipAction
env['ZIPCOMPRESSION'] = zipcompression
env['ZIPSUFFIX'] = '.zip'
def exists(env):
return internal_zip or env.Detect('zip')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
gsanchez1117/test2
|
refs/heads/gabby-branch
|
lib/flask/testsuite/test_apps/main_app.py
|
614
|
import flask
# Test Flask initialization with main module.
app = flask.Flask('__main__')
|
Chicknstwe/Text-encryption
|
refs/heads/master
|
encryption v0.2.py
|
1
|
"""
MIT License
Copyright (c) 2017 Demetrio Carmona Derqui
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from random import randint
class Encryption(object):
def __init__(self, words='&', key='&'):
'''
Initialize a new en/decryption.
words: string. Empty string by default. Words to encrypt/decrypt.
key: string. Empty string by default.
char_dic and val_dic: dictionaries used in en/decryption.
'''
self.words = str(words).lower()
self.key = str(key)
self.char_dic = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7,
'i': 8, 'j': 9, 'k': 10, 'l': 11, 'm': 12, 'n': 13, 'o': 14,
'p': 15, 'q': 16, 'r': 17, 's': 18, 't': 19, 'u': 20, 'v': 21,
'w': 22, 'x': 23, 'y': 24, 'z': 25, 'ñ': 26, 'ç': 27, ' ': 28,
'0': 29, '1': 30, '2': 31, '3': 32, '4': 33, '5': 34, '6': 35,
'7': 36, '8': 37, '9': 38, '!': 39, '¡': 40, '¿': 41, '?': 42,
'º':43, 'ª': 44, '%': 45, '@': 46, '€': 47, '$': 48, '(': 49, ')': 50}
self.val_dic = {'0': 'a', '1': 'b', '2': 'c', '3': 'd', '4': 'e', '5': 'f',
'6': 'g', '7': 'h', '8': 'i', '9': 'j', '10': 'k', '11': 'l',
'12': 'm', '13': 'n', '14': 'o', '15': 'p', '16': 'q', '17': 'r',
'18': 's', '19': 't', '20': 'u', '21': 'v', '22': 'w', '23': 'x',
'24': 'y', '25': 'z', '26': 'ñ', '27': 'ç', '28': ' ', '29': '0',
'30': '1', '31': '2', '32': '3', '33': '4', '34': '5', '35': '6',
'36': '7', '37': '8', '38': '9', '39': '!', '40': '¡', '41': '¿',
'42': '?', '43': 'º', '44': 'ª', '45': '%', '46': '@', '47': '€',
'48': '$', '49': '(', '50': ')'}
self.enc_msg = ''
self.dec_msg = ''
self.method = 'none'
def __str__(self):
return 'Input Words: ' + str(self.words) + '\nInput key: ' + str(self.key)
def getKey(self):
return self.key
def getWords(self):
return self.words
def getEncMesg(self):
return self.enc_msg
def getDecMsg(self):
return self.dec_msg
def getMethod(self):
return self.method
def swap(self):
if self.dec_msg != '':
self.words = str(self.dec_msg)
return True
else:
print('You must decrypt a message before using swap().')
def setKey(self, newKey):
'''
Setter for self.key.
newKey: string.
'''
allowed = True
for char in newKey:
if char not in self.char_dic.keys():
allowed = False
print('Error: the string contains one or more unallowed characters.')
break
if allowed:
self.key = str(newKey)
print('Key has been changed.')
def length(self):
print('Words length: ' + str(len(str(self.words))) + '\nKey length: ' + str(len(str(self.key))) + '\nDics length: ' + str(len(self.char_dic)) + ' | ' + str(len(self.val_dic)))
def setWords(self, newWords):
'''
Setter for self.words.
newWords: string.
'''
allowed = True
for char in newWords:
if char not in self.char_dic.keys():
allowed = False
print('Error: the string contains one or more unallowed characters.')
break
if allowed:
self.words = str(newWords)
print('Words have been changed.')
def charsAllowed(self):
'''
Return a string with all characters allowed to be en/decrypted.
'''
output = ''
for i in self.char_dic.keys():
output = output + str(i)
print('Allowed characters: ' + output)
def caesar_enc(self):
'''
Caesar encryption adds the 'index' of each input's character in the
dictionary (char_dic) to the 'index' of the key (that must be a string
with length 1) to get each encrypted character.
Returns a string with encrypted characters or error if key's length is
different than 1.
'''
if self.key not in self.char_dic.keys():
# The program can generate a random key if user doesn't provide it
# or it is not valid.
self.key = self.val_dic[str(randint(0, 50))]
print('Random key generated: ' + self.key)
if len(self.key) == 1:
output = ''
for char in self.words.lower():
newKey = self.char_dic[char] + self.char_dic[self.key]
if newKey >= len(self.val_dic):
newKey = newKey - len(self.val_dic)
output = output + self.val_dic[str(newKey)]
self.enc_msg = output
print('Encrypted output:' + output)
self.method = 'Caesar'
else:
print('Error: wrong key length.\nInfo: Cesar encryption\'s key must be a single character.')
def caesar_dec(self):
'''
Caesar decryption substracts the 'index' of the key to each input's
character to get each decrypted character.
Returns a string with decrypted characters or error if key's length is
different than 1 or key isn't in the dictionary.
'''
if self.method != 'Caesar' and self.method != 'none':
print('Error: the words must be encrypted with Caesar method before.')
else:
if len(self.enc_msg) > 0:
rec = input('There\'s an encrypted message stored in this object. Do you want to use it to decryption? y/n')
if rec == 'y':
msg_input = self.enc_msg
else:
msg_input = self.words
if len(self.key) == 1 and self.key in self.char_dic.keys():
output = ''
for char in msg_input.lower():
newKey = self.char_dic[char] - self.char_dic[self.key]
if newKey < 0:
newKey = len(self.char_dic) + newKey
output = output + self.val_dic[str(newKey)]
self.msg_dec = output
print('Encrypted output:' + output)
else:
print('Error: wrong key.\nInfo: Cesar decryption\'s key must be a single character.\nUse charsAllowed method to get all allowed characters.')
def oneTimePad_enc(self):
'''
One-Note Pad encryption adds the 'index' of each input's character in the
dictionary (char_dic) to the 'index' of the key (that must be a string
with length 1) to get each encrypted character.
Returns a string with encrypted characters or error if key's length is
different than 1.
'''
if len(self.key) != len(self.words):
# The program can generate a random key if user doesn't provide it
# or it is not valid.
cont = False
while not cont:
genKey = input('Key and input words lengths are different.\nDo you want to generate a random key? (y/n)')
if genKey == 'y':
self.key = ''
cont = True
while len(self.key) < len(self.words):
self.key = self.key + self.val_dic[str(randint(0, len(self.val_dic) - 1))]
print('Random key generated: ' + self.key)
elif genKey == 'n':
cont = True
print('Encryption was cancelled.')
if len(self.key) == len(self.words):
output = ''
i = 0
while i < len(self.words):
newKey = self.char_dic[self.words[i]] + self.char_dic[self.key[i]]
if newKey >= len(self.val_dic):
newKey = newKey - len(self.val_dic)
output = output + self.val_dic[str(newKey)]
i += 1
self.enc_msg = output
print('Encrypted output: ' + output)
self.method = 'One Time Pad'
def oneTimePad_dec(self):
'''
One-Note Pad encryption adds the 'index' of each input's character in the
dictionary (char_dic) to the 'index' of each key's character (that must
be a string with same length as the input words) to get each encrypted
character.
Returns a string with encrypted characters or error if key's length is
different than 1.
'''
if self.method != 'One Time Pad' and self.method != 'none':
print('Error: the words must be encrypted with One Time Pad method before.')
else:
if len(self.enc_msg) > 0:
rec = input('There\'s an encrypted message stored in this object. Do you want to use it to decryption? y/n')
if rec == 'y':
msg_input = self.enc_msg
else:
msg_input = self.words
if len(self.key) == len(msg_input):
output = ''
i = 0
while i < len(msg_input):
newKey = self.char_dic[msg_input[i]] - self.char_dic[self.key[i]]
if newKey < 0:
newKey = len(self.val_dic) + newKey
output = output + self.val_dic[str(newKey)]
i += 1
self.msg_dec = output
print('Decrypted output: ' + output)
else:
print('Error: words and key must have the same length.')
|
HarryRybacki/osf.io
|
refs/heads/develop
|
website/addons/dropbox/settings/defaults.py
|
75
|
# OAuth app keys
DROPBOX_KEY = None
DROPBOX_SECRET = None
DROPBOX_AUTH_CSRF_TOKEN = 'dropbox-auth-csrf-token'
|
mayankcu/Django-social
|
refs/heads/master
|
venv/Lib/site-packages/django/test/utils.py
|
37
|
from __future__ import with_statement
import warnings
from django.conf import settings, UserSettingsHolder
from django.core import mail
from django.test.signals import template_rendered, setting_changed
from django.template import Template, loader, TemplateDoesNotExist
from django.template.loaders import cached
from django.utils.translation import deactivate
from django.utils.functional import wraps
__all__ = (
'Approximate', 'ContextList', 'get_runner', 'override_settings',
'setup_test_environment', 'teardown_test_environment',
)
RESTORE_LOADERS_ATTR = '_original_template_source_loaders'
class Approximate(object):
def __init__(self, val, places=7):
self.val = val
self.places = places
def __repr__(self):
return repr(self.val)
def __eq__(self, other):
if self.val == other:
return True
return round(abs(self.val-other), self.places) == 0
class ContextList(list):
"""A wrapper that provides direct key access to context items contained
in a list of context objects.
"""
def __getitem__(self, key):
if isinstance(key, basestring):
for subcontext in self:
if key in subcontext:
return subcontext[key]
raise KeyError(key)
else:
return super(ContextList, self).__getitem__(key)
def __contains__(self, key):
try:
value = self[key]
except KeyError:
return False
return True
def instrumented_test_render(self, context):
"""
An instrumented Template render method, providing a signal
that can be intercepted by the test system Client
"""
template_rendered.send(sender=self, template=self, context=context)
return self.nodelist.render(context)
def setup_test_environment():
"""Perform any global pre-test setup. This involves:
- Installing the instrumented test renderer
- Set the email backend to the locmem email backend.
- Setting the active locale to match the LANGUAGE_CODE setting.
"""
Template.original_render = Template._render
Template._render = instrumented_test_render
mail.original_email_backend = settings.EMAIL_BACKEND
settings.EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
mail.outbox = []
deactivate()
def teardown_test_environment():
"""Perform any global post-test teardown. This involves:
- Restoring the original test renderer
- Restoring the email sending functions
"""
Template._render = Template.original_render
del Template.original_render
settings.EMAIL_BACKEND = mail.original_email_backend
del mail.original_email_backend
del mail.outbox
def get_warnings_state():
"""
Returns an object containing the state of the warnings module
"""
# There is no public interface for doing this, but this implementation of
# get_warnings_state and restore_warnings_state appears to work on Python
# 2.4 to 2.7.
return warnings.filters[:]
def restore_warnings_state(state):
"""
Restores the state of the warnings module when passed an object that was
returned by get_warnings_state()
"""
warnings.filters = state[:]
def get_runner(settings, test_runner_class=None):
if not test_runner_class:
test_runner_class = settings.TEST_RUNNER
test_path = test_runner_class.split('.')
# Allow for Python 2.5 relative paths
if len(test_path) > 1:
test_module_name = '.'.join(test_path[:-1])
else:
test_module_name = '.'
test_module = __import__(test_module_name, {}, {}, test_path[-1])
test_runner = getattr(test_module, test_path[-1])
return test_runner
def setup_test_template_loader(templates_dict, use_cached_loader=False):
"""
Changes Django to only find templates from within a dictionary (where each
key is the template name and each value is the corresponding template
content to return).
Use meth:`restore_template_loaders` to restore the original loaders.
"""
if hasattr(loader, RESTORE_LOADERS_ATTR):
raise Exception("loader.%s already exists" % RESTORE_LOADERS_ATTR)
def test_template_loader(template_name, template_dirs=None):
"A custom template loader that loads templates from a dictionary."
try:
return (templates_dict[template_name], "test:%s" % template_name)
except KeyError:
raise TemplateDoesNotExist(template_name)
if use_cached_loader:
template_loader = cached.Loader(('test_template_loader',))
template_loader._cached_loaders = (test_template_loader,)
else:
template_loader = test_template_loader
setattr(loader, RESTORE_LOADERS_ATTR, loader.template_source_loaders)
loader.template_source_loaders = (template_loader,)
return template_loader
def restore_template_loaders():
"""
Restores the original template loaders after
:meth:`setup_test_template_loader` has been run.
"""
loader.template_source_loaders = getattr(loader, RESTORE_LOADERS_ATTR)
delattr(loader, RESTORE_LOADERS_ATTR)
class override_settings(object):
"""
Acts as either a decorator, or a context manager. If it's a decorator it
takes a function and returns a wrapped function. If it's a contextmanager
it's used with the ``with`` statement. In either event entering/exiting
are called before and after, respectively, the function/block is executed.
"""
def __init__(self, **kwargs):
self.options = kwargs
self.wrapped = settings._wrapped
def __enter__(self):
self.enable()
def __exit__(self, exc_type, exc_value, traceback):
self.disable()
def __call__(self, test_func):
from django.test import TransactionTestCase
if isinstance(test_func, type) and issubclass(test_func, TransactionTestCase):
original_pre_setup = test_func._pre_setup
original_post_teardown = test_func._post_teardown
def _pre_setup(innerself):
self.enable()
original_pre_setup(innerself)
def _post_teardown(innerself):
original_post_teardown(innerself)
self.disable()
test_func._pre_setup = _pre_setup
test_func._post_teardown = _post_teardown
return test_func
else:
@wraps(test_func)
def inner(*args, **kwargs):
with self:
return test_func(*args, **kwargs)
return inner
def enable(self):
override = UserSettingsHolder(settings._wrapped)
for key, new_value in self.options.items():
setattr(override, key, new_value)
settings._wrapped = override
for key, new_value in self.options.items():
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value)
def disable(self):
settings._wrapped = self.wrapped
for key in self.options:
new_value = getattr(settings, key, None)
setting_changed.send(sender=settings._wrapped.__class__,
setting=key, value=new_value)
|
GregorCH/ipet
|
refs/heads/master
|
ipet/concepts/Observer.py
|
1
|
"""
The MIT License (MIT)
Copyright (c) 2018 Zuse Institute Berlin, www.zib.de
Permissions are granted as stated in the license file you have obtained
with this software. If you find the library useful for your purpose,
please refer to README.md for how to cite IPET.
@author: Gregor Hendel
"""
class Observable:
"""
observables represent the client in the Observer pattern. They have the possibility to nofify their
observers and invoke an update every time they change their state
"""
observermap = {} # : map which has observables as keys, and observers as values
def addObserver(self, newobserver):
"""
add an observer to the list of observers
"""
theset = Observable.observermap.setdefault(self, set())
theset.add(newobserver)
def removeObserver(self, observer):
"""
removes an observer from the list of observers
"""
theset = Observable.observermap.get(self)
if theset is not None:
theset.remove(observer)
def notify(self, *args):
"""
notify all observers about changes in the state
"""
theset = Observable.observermap.setdefault(self, set())
for observer in theset:
observer.update(*args)
|
naziris/HomeSecPi
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/html5lib/__init__.py
|
196
|
"""
HTML parsing library based on the WHATWG "HTML5"
specification. The parser is designed to be compatible with existing
HTML found in the wild and implements well-defined error recovery that
is largely compatible with modern desktop web browsers.
Example usage:
import html5lib
f = open("my_document.html")
tree = html5lib.parse(f)
"""
from __future__ import absolute_import, division, unicode_literals
from .html5parser import HTMLParser, parse, parseFragment
from .treebuilders import getTreeBuilder
from .treewalkers import getTreeWalker
from .serializer import serialize
__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder",
"getTreeWalker", "serialize"]
__version__ = "1.0b3"
|
pentestfail/TA-FireEye_TAP
|
refs/heads/master
|
bin/ta_fireeye_tap/requests/packages/chardet/sbcharsetprober.py
|
2926
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
|
shastikk/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/krasview.py
|
85
|
# encoding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
int_or_none,
js_to_json,
)
class KrasViewIE(InfoExtractor):
IE_DESC = 'Красвью'
_VALID_URL = r'https?://krasview\.ru/(?:video|embed)/(?P<id>\d+)'
_TEST = {
'url': 'http://krasview.ru/video/512228',
'md5': '3b91003cf85fc5db277870c8ebd98eae',
'info_dict': {
'id': '512228',
'ext': 'mp4',
'title': 'Снег, лёд, заносы',
'description': 'Снято в городе Нягань, в Ханты-Мансийском автономном округе.',
'duration': 27,
'thumbnail': 're:^https?://.*\.jpg',
},
'params': {
'skip_download': 'Not accessible from Travis CI server',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
flashvars = json.loads(js_to_json(self._search_regex(
r'video_Init\(({.+?})', webpage, 'flashvars')))
video_url = flashvars['url']
title = self._og_search_title(webpage)
description = self._og_search_description(webpage, default=None)
thumbnail = flashvars.get('image') or self._og_search_thumbnail(webpage)
duration = int_or_none(flashvars.get('duration'))
width = int_or_none(self._og_search_property(
'video:width', webpage, 'video width', default=None))
height = int_or_none(self._og_search_property(
'video:height', webpage, 'video height', default=None))
return {
'id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'width': width,
'height': height,
}
|
dlazz/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/panos/panos_sag.py
|
18
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_sag
short_description: Create a static address group.
description:
- Create a static address group object in the firewall used for policy rules.
author: "Vinay Venkataraghavan (@vinayvenkat)"
version_added: "2.4"
requirements:
- pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/)
- pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/)
- xmltodict can be obtained from PyPI U(https://pypi.org/project/xmltodict/)
options:
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
sag_name:
description:
- name of the dynamic address group
required: true
static_match_filter:
description:
- Static filter user by the address group
required: true
devicegroup:
description: >
- The name of the Panorama device group. The group must exist on Panorama. If device group is not defined
it is assumed that we are contacting a firewall.
description:
description:
- The purpose / objective of the static Address Group
tags:
description:
- Tags to be associated with the address group
commit:
description:
- commit if changed
type: bool
default: 'yes'
operation:
description:
- The operation to perform Supported values are I(add)/I(list)/I(delete).
required: true
extends_documentation_fragment: panos
'''
EXAMPLES = '''
- name: sag
panos_sag:
ip_address: "192.168.1.1"
password: "admin"
sag_name: "sag-1"
static_value: ['test-addresses', ]
description: "A description for the static address group"
tags: ["tags to be associated with the group", ]
'''
RETURN = '''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
try:
from pandevice import base
from pandevice import firewall
from pandevice import panorama
from pandevice import objects
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_devicegroup(device, devicegroup):
dg_list = device.refresh_devices()
for group in dg_list:
if isinstance(group, panorama.DeviceGroup):
if group.name == devicegroup:
return group
return False
def find_object(device, dev_group, obj_name, obj_type):
# Get the firewall objects
obj_type.refreshall(device)
if isinstance(device, firewall.Firewall):
addr = device.find(obj_name, obj_type)
return addr
elif isinstance(device, panorama.Panorama):
addr = device.find(obj_name, obj_type)
if addr is None:
if dev_group:
device.add(dev_group)
obj_type.refreshall(dev_group)
addr = dev_group.find(obj_name, obj_type)
return addr
else:
return False
def create_address_group_object(**kwargs):
"""
Create an Address object
:param kwargs: key word arguments to instantiate AddressGroup object
@return False or ```objects.AddressObject```
"""
ad_object = objects.AddressGroup(
name=kwargs['address_gp_name'],
static_value=kwargs['sag_match_filter'],
description=kwargs['description'],
tag=kwargs['tag_name']
)
if ad_object.static_value or ad_object.dynamic_value:
return ad_object
else:
return None
def add_address_group(device, dev_group, ag_object):
"""
Create a new dynamic address group object on the
PAN FW.
:param device: Firewall Handle
:param dev_group: Panorama device group
:param ag_object: Address group object
"""
if dev_group:
dev_group.add(ag_object)
else:
device.add(ag_object)
exc = None
try:
ag_object.create()
except Exception as exc:
return False, exc
return True, exc
def delete_address_group(device, dev_group, obj_name):
"""
:param device:
:param dev_group:
:param obj_name:
:return:
"""
static_obj = find_object(device, dev_group, obj_name, objects.AddressGroup)
# If found, delete it
if static_obj:
try:
static_obj.delete()
except Exception as exc:
return False, exc
return True, None
else:
return False, None
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
sag_match_filter=dict(type='list', required=False),
sag_name=dict(required=True),
commit=dict(type='bool', default=True),
devicegroup=dict(default=None),
description=dict(default=None),
tags=dict(type='list', default=[]),
operation=dict(type='str', required=True, choices=['add', 'list', 'delete'])
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']])
if not HAS_LIB:
module.fail_json(msg='pan-python is required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
operation = module.params['operation']
ag_object = create_address_group_object(address_gp_name=module.params.get('sag_name', None),
sag_match_filter=module.params.get('sag_match_filter', None),
description=module.params.get('description', None),
tag_name=module.params.get('tags', None)
)
commit = module.params['commit']
devicegroup = module.params['devicegroup']
# Create the device with the appropriate pandevice type
device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
# If Panorama, validate the devicegroup
dev_group = None
if devicegroup and isinstance(device, panorama.Panorama):
dev_group = get_devicegroup(device, devicegroup)
if dev_group:
device.add(dev_group)
else:
module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup)
if operation == 'add':
result, exc = add_address_group(device, dev_group, ag_object)
if result and commit:
try:
device.commit(sync=True)
except Exception as exc:
module.fail_json(msg=to_native(exc))
elif operation == 'delete':
obj_name = module.params.get('sag_name', None)
result, exc = delete_address_group(device, dev_group, obj_name)
if not result and exc:
module.fail_json(msg=exc.message)
elif not result:
module.fail_json(msg="Specified object not found.")
else:
module.fail_json(changed=False, msg="Unsupported option.")
module.exit_json(changed=True, msg="Address Group Operation Completed.")
if __name__ == '__main__':
main()
|
kressi/erpnext
|
refs/heads/develop
|
erpnext/schools/doctype/fee_category/test_fee_category.py
|
54
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Fee Category')
class TestFeeCategory(unittest.TestCase):
pass
|
takeshineshiro/django
|
refs/heads/master
|
tests/migrations/test_migrations_squashed_complex/3_squashed_5.py
|
770
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
replaces = [
("migrations", "3_auto"),
("migrations", "4_auto"),
("migrations", "5_auto"),
]
dependencies = [("migrations", "2_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
yangming85/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/tests/regressiontests/special_headers/models.py
|
103
|
from django.db import models
class Article(models.Model):
text = models.TextField()
|
hvnsweeting/Diamond
|
refs/heads/master
|
src/collectors/slabinfo/slabinfo.py
|
54
|
# coding=utf-8
"""
The SlabInfoCollector collects metrics on process stats from
/proc/slabinfo
#### Dependencies
* /proc/slabinfo
"""
import platform
import os
import diamond.collector
# Detect the architecture of the system
# and set the counters for MAX_VALUES
# appropriately. Otherwise, rolling over
# counters will cause incorrect or
# negative values.
if platform.architecture()[0] == '64bit':
counter = (2 ** 64) - 1
else:
counter = (2 ** 32) - 1
class SlabInfoCollector(diamond.collector.Collector):
PROC = '/proc/slabinfo'
def get_default_config_help(self):
config_help = super(SlabInfoCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(SlabInfoCollector, self).get_default_config()
config.update({
'path': 'slabinfo'
})
return config
def collect(self):
"""
Collect process stat data
"""
if not os.access(self.PROC, os.R_OK):
return False
# Open PROC file
file = open(self.PROC, 'r')
# Get data
for line in file:
if line.startswith('slabinfo'):
continue
if line.startswith('#'):
keys = line.split()[1:]
continue
data = line.split()
for key in ['<active_objs>', '<num_objs>', '<objsize>',
'<objperslab>', '<pagesperslab>']:
i = keys.index(key)
metric_name = data[0] + '.' + key.replace(
'<', '').replace('>', '')
metric_value = int(data[i])
self.publish(metric_name, metric_value)
for key in ['<limit>', '<batchcount>', '<sharedfactor>']:
i = keys.index(key)
metric_name = data[0] + '.tunables.' + key.replace(
'<', '').replace('>', '')
metric_value = int(data[i])
self.publish(metric_name, metric_value)
for key in ['<active_slabs>', '<num_slabs>', '<sharedavail>']:
i = keys.index(key)
metric_name = data[0] + '.slabdata.' + key.replace(
'<', '').replace('>', '')
metric_value = int(data[i])
self.publish(metric_name, metric_value)
# Close file
file.close()
|
sambyers/o365_fmc
|
refs/heads/master
|
.venv/lib/python3.6/site-packages/setuptools/namespaces.py
|
196
|
import os
from distutils import log
import itertools
from setuptools.extern.six.moves import map
flatten = itertools.chain.from_iterable
class Installer:
nspkg_ext = '-nspkg.pth'
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp:
return
filename, ext = os.path.splitext(self._get_target())
filename += self.nspkg_ext
self.outputs.append(filename)
log.info("Installing %s", filename)
lines = map(self._gen_nspkg_line, nsp)
if self.dry_run:
# always generate the lines, even in dry run
list(lines)
return
with open(filename, 'wt') as f:
f.writelines(lines)
def uninstall_namespaces(self):
filename, ext = os.path.splitext(self._get_target())
filename += self.nspkg_ext
if not os.path.exists(filename):
return
log.info("Removing %s", filename)
os.remove(filename)
def _get_target(self):
return self.target
_nspkg_tmpl = (
"import sys, types, os",
"has_mfs = sys.version_info > (3, 5)",
"p = os.path.join(%(root)s, *%(pth)r)",
"importlib = has_mfs and __import__('importlib.util')",
"has_mfs and __import__('importlib.machinery')",
"m = has_mfs and "
"sys.modules.setdefault(%(pkg)r, "
"importlib.util.module_from_spec("
"importlib.machinery.PathFinder.find_spec(%(pkg)r, "
"[os.path.dirname(p)])))",
"m = m or "
"sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))",
"mp = (m or []) and m.__dict__.setdefault('__path__',[])",
"(p not in mp) and mp.append(p)",
)
"lines for the namespace installer"
_nspkg_tmpl_multi = (
'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
)
"additional line(s) when a parent package is indicated"
def _get_root(self):
return "sys._getframe(1).f_locals['sitedir']"
def _gen_nspkg_line(self, pkg):
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
root = self._get_root()
tmpl_lines = self._nspkg_tmpl
parent, sep, child = pkg.rpartition('.')
if parent:
tmpl_lines += self._nspkg_tmpl_multi
return ';'.join(tmpl_lines) % locals() + '\n'
def _get_all_ns_packages(self):
"""Return sorted list of all package namespaces"""
pkgs = self.distribution.namespace_packages or []
return sorted(flatten(map(self._pkg_names, pkgs)))
@staticmethod
def _pkg_names(pkg):
"""
Given a namespace package, yield the components of that
package.
>>> names = Installer._pkg_names('a.b.c')
>>> set(names) == set(['a', 'a.b', 'a.b.c'])
True
"""
parts = pkg.split('.')
while parts:
yield '.'.join(parts)
parts.pop()
class DevelopInstaller(Installer):
def _get_root(self):
return repr(str(self.egg_path))
def _get_target(self):
return self.egg_link
|
runekaagaard/django-contrib-locking
|
refs/heads/master
|
django/contrib/gis/management/commands/ogrinspect.py
|
111
|
import argparse
import inspect
from django.contrib.gis import gdal
from django.core.management.base import BaseCommand, CommandError
class LayerOptionAction(argparse.Action):
"""
Custom argparse action for the `ogrinspect` `layer_key` keyword option
which may be an integer or a string.
"""
def __call__(self, parser, namespace, value, option_string=None):
try:
setattr(namespace, self.dest, int(value))
except ValueError:
setattr(namespace, self.dest, value)
class ListOptionAction(argparse.Action):
"""
Custom argparse action for `ogrinspect` keywords that require
a string list. If the string is 'True'/'true' then the option
value will be a boolean instead.
"""
def __call__(self, parser, namespace, value, option_string=None):
if value.lower() == 'true':
setattr(namespace, self.dest, True)
else:
setattr(namespace, self.dest, value.split(','))
class Command(BaseCommand):
help = ('Inspects the given OGR-compatible data source (e.g., a shapefile) and outputs\n'
'a GeoDjango model with the given model name. For example:\n'
' ./manage.py ogrinspect zipcode.shp Zipcode')
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument('data_source', help='Path to the data source.')
parser.add_argument('model_name', help='Name of the model to create.')
parser.add_argument('--blank', dest='blank',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `blank=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.')
parser.add_argument('--decimal', dest='decimal',
action=ListOptionAction, default=False,
help='Use a comma separated list of OGR float fields to '
'generate `DecimalField` instead of the default '
'`FloatField`. Set to `true` to apply to all OGR float fields.')
parser.add_argument('--geom-name', dest='geom_name', default='geom',
help='Specifies the model name for the Geometry Field '
'(defaults to `geom`)')
parser.add_argument('--layer', dest='layer_key',
action=LayerOptionAction, default=0,
help='The key for specifying which layer in the OGR data '
'source to use. Defaults to 0 (the first layer). May be '
'an integer or a string identifier for the layer.')
parser.add_argument('--multi-geom', action='store_true',
dest='multi_geom', default=False,
help='Treat the geometry in the data source as a geometry collection.')
parser.add_argument('--name-field', dest='name_field',
help='Specifies a field name to return for the `__unicode__`/`__str__` function.')
parser.add_argument('--no-imports', action='store_false', dest='imports', default=True,
help='Do not include `from django.contrib.gis.db import models` statement.')
parser.add_argument('--null', dest='null', action=ListOptionAction, default=False,
help='Use a comma separated list of OGR field names to add '
'the `null=True` option to the field definition. Set to `true` '
'to apply to all applicable fields.')
parser.add_argument('--srid', dest='srid',
help='The SRID to use for the Geometry Field. If it can be '
'determined, the SRID of the data source is used.')
parser.add_argument('--mapping', action='store_true', dest='mapping',
help='Generate mapping dictionary for use with `LayerMapping`.')
def handle(self, *args, **options):
data_source, model_name = options.pop('data_source'), options.pop('model_name')
if not gdal.HAS_GDAL:
raise CommandError('GDAL is required to inspect geospatial data sources.')
# Getting the OGR DataSource from the string parameter.
try:
ds = gdal.DataSource(data_source)
except gdal.GDALException as msg:
raise CommandError(msg)
# Returning the output of ogrinspect with the given arguments
# and options.
from django.contrib.gis.utils.ogrinspect import _ogrinspect, mapping
# Filter options to params accepted by `_ogrinspect`
ogr_options = {k: v for k, v in options.items()
if k in inspect.getargspec(_ogrinspect).args and v is not None}
output = [s for s in _ogrinspect(ds, model_name, **ogr_options)]
if options['mapping']:
# Constructing the keyword arguments for `mapping`, and
# calling it on the data source.
kwargs = {'geom_name': options['geom_name'],
'layer_key': options['layer_key'],
'multi_geom': options['multi_geom'],
}
mapping_dict = mapping(ds, **kwargs)
# This extra legwork is so that the dictionary definition comes
# out in the same order as the fields in the model definition.
rev_mapping = {v: k for k, v in mapping_dict.items()}
output.extend(['', '# Auto-generated `LayerMapping` dictionary for %s model' % model_name,
'%s_mapping = {' % model_name.lower()])
output.extend(" '%s' : '%s'," % (
rev_mapping[ogr_fld], ogr_fld) for ogr_fld in ds[options['layer_key']].fields
)
output.extend([" '%s' : '%s'," % (options['geom_name'], mapping_dict[options['geom_name']]), '}'])
return '\n'.join(output) + '\n'
|
40223102/w110519
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/locale.py
|
624
|
def getdefaultlocale():
return __BRYTHON__.language,None
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
# 'C' locale default values
return {'grouping': [127],
'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],
'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error('_locale emulation only supports "C" locale')
return 'C'
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
return None, None
|
webu/django-cms
|
refs/heads/develop
|
cms/management/commands/subcommands/base.py
|
48
|
# -*- coding: utf-8 -*-
import sys
from django.core.management.base import BaseCommand, CommandError
class SubcommandsCommand(BaseCommand):
subcommands = {}
command_name = ''
def __init__(self):
super(SubcommandsCommand, self).__init__()
for name, subcommand in self.subcommands.items():
subcommand.command_name = '%s %s' % (self.command_name, name)
def handle(self, *args, **options):
stderr = getattr(self, 'stderr', sys.stderr)
stdout = getattr(self, 'stdout', sys.stdout)
if len(args) > 0:
if args[0] in self.subcommands.keys():
handle_command = self.subcommands.get(args[0])()
handle_command.stdout = stdout
handle_command.stderr = stderr
handle_command.handle(*args[1:], **options)
else:
stderr.write("%r is not a valid subcommand for %r\n" % (args[0], self.command_name))
stderr.write("Available subcommands are:\n")
for subcommand in sorted(self.subcommands.keys()):
stderr.write(" %r\n" % subcommand)
raise CommandError('Invalid subcommand %r for %r' % (args[0], self.command_name))
else:
stderr.write("%r must be called with at least one argument, it's subcommand.\n" % self.command_name)
stderr.write("Available subcommands are:\n")
for subcommand in sorted(self.subcommands.keys()):
stderr.write(" %r\n" % subcommand)
raise CommandError('No subcommand given for %r' % self.command_name)
|
rafaelwerneck/kuaa
|
refs/heads/master
|
descriptors/bic/plugin_bic.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
###############################################################################
# This file is part of Kuaa.
#
# Kuaa is a framework for the automation of machine learning experiments.
#
# It provides a workflow-based standardized environment for easy evaluation of
# feature descriptors, normalization techniques, classifiers and fusion
# approaches.
#
# Techniques of each kind can be easily plugged into the framework as they can
# be implemented as plugins, with standardized inputs and outputs.
# The framework also provides a recommendation module in order to help
# inexperienced researchers in choosing adequate or alternative techniques for
# experiments.
#
# Copyright (C) 2016 under the GNU General Public License Version 3.
#
# This framework was developed during the research collaboration of Institute
# of Computing (University of Campinas, Brazil) and Samsung Eletrônica da
# Amazônia Ltda. entitled "Pattern recognition and classification by feature
# engineering, *-fusion, open-set recognition, and meta-recognition", which was
# sponsored by Samsung.
#
# This framework is provided "as is" without any guarantees or warranty. The
# authors make no warranties, express of implied, that they are free of error,
# or they will meet your requirements for any particular application.
#
# The framework was developed to be used for educational and research purposes.
# It is expressly prohibited to use for any commercial purposes.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
###############################################################################
#Python imports
import os
import sys
import timeit
import platform
#Imports from the framework
import util
def extract(img_path, img_classes, param):
"""
Function that performs the extraction of an image using the BIC descriptor.
This function transforms the image being extracted to the desired image
format of the descriptor, performs the extraction of the image, and last,
transforms the output feature vector of the descriptor into the standard
output of the framework, a list of floats.
"""
print "Descriptor: BIC"
#CONSTANTS
#Number of executions of the extraction
NUM_EXEC = 1
#PATHS
#Path for the folder containing the descriptor executable
##Not necessary in case that the extraction is present in the plugin code
descriptor_path = os.path.dirname(__file__)
#Path for the temporary folder, where the file with the feature vector
#extracted from the image will be saved
##Consider the path with the origin on the directory of the plugin
##(os.path.dirname(__file__)) to avoid problems with the directory where
##the framework is being executed
path_temp = os.path.join(os.path.dirname(__file__), "..", "..", "temp")
if not os.path.isdir(path_temp):
os.makedirs(path_temp)
print img_path, "being extract in the process ", os.getpid()
#Temporary name of the image in the desired image format
list_classes = ""
for name_class in img_classes:
list_classes += str(name_class) + "_"
list_names = img_path.split(os.sep)
img_name = list_classes + list_names[-2] + "_" + list_names[-1]
#Convert the image to the desired format of the descriptor
temp_img_path, converted = util.convert_desired_format(img_path, \
img_name, "PPM")
if converted:
print "\tImage converted to PPM"
#Path of the file with the feature vector of an image
fv_path = os.path.join(path_temp, img_name + ".fv")
# Extraction of the feature vector
if not os.path.exists(fv_path):
system_platform = [platform.system(), platform.architecture()[0]]
if system_platform[0] == 'Linux':
if system_platform[1] == '32bit':
plugin_name = 'bic_32l.so'
else:
plugin_name = 'bic_64l.so'
else:
plugin_name = 'bic_64l.so'
#Extraction of the feature vector
if not os.path.exists(fv_path):
setup = """
ctypes = __import__('ctypes')
plugin = "%s"
lib = ctypes.CDLL("%s%s" + plugin)
img_path = "%s"
fv_path = "%s"
"""%(plugin_name, descriptor_path, os.sep, temp_img_path, fv_path)
cmd = """
lib.Extraction(img_path, fv_path)
"""
t = timeit.Timer(stmt=cmd, setup=setup)
try:
t.timeit(number=NUM_EXEC)
print "\tFeature vector extracted"
except:
t.print_exc()
#Remove the temporary image
if converted:
os.remove(temp_img_path)
#Transforms the feature vector of the descriptor into the standard output
#of the framework
fv = fv_transform(fv_path)
return img_path, len(img_classes), img_classes, fv
def fv_transform(fv_path):
"""
Receive the path with the feature vector in the descriptor output and
return the feature vector in the framework standard output, a list of
floats.
"""
list_fv = []
#Open the file created by the descriptor, save the feature vector in the
#standard output, remove the file and return the new feature vector
try:
file_fv = open(fv_path, "rb")
except IOError:
print "ERROR"
sys.exit(1)
file_fv.readline()
fv = file_fv.readline()
#Performs the necessary operations to transform the feature vector into
#the standard output
for carac in fv:
if not carac == "\n":
list_fv.append(float(eval(carac)))
file_fv.close()
os.remove(fv_path)
print "\tFeature vector transformed in the standard output"
return list_fv
def distance(fv1, fv2):
"""
Performs the calculation of distance between the two feature vectors,
according to the Distance function of the executable.
Inputs:
- fv1 (list of floats): First feature vector
- fv2 (list of floats): Second feature vector
Output:
- distance (double): Distance between the two feature vectors
"""
#Imports
import ctypes
len_fv = len(fv1)
descriptor_path = os.path.dirname(__file__)
system_platform = [platform.system(), platform.architecture()[0]]
if system_platform[0] == 'Linux':
if system_platform[1] == '32bit':
plugin_name = 'bic_32l.so'
else:
plugin_name = 'bic_64l.so'
else:
plugin_name = 'bic_64l.so'
plugin_path = os.path.join(descriptor_path, plugin_name)
plugin = ctypes.CDLL(plugin_path)
#Descriptor exclusive
#-------------------------------------------------------------------------
#Creating class requested by the Distance function
class Histogram(ctypes.Structure):
_fields_ = [("v", ctypes.POINTER(ctypes.c_ubyte * len_fv)),
("n", ctypes.c_int)]
#All Histograms
c_len_fv = ctypes.c_int(len_fv)
#First Histogram
fv_int = map(int, fv1)
p_fv1 = ctypes.pointer((ctypes.c_ubyte * len_fv)(*fv_int))
p_hist_fv1 = ctypes.pointer(Histogram(p_fv1, c_len_fv))
#Second Histogram
fv_int = map(int, fv2)
p_fv2 = ctypes.pointer((ctypes.c_ubyte * len_fv)(*fv_int))
p_hist_fv2 = ctypes.pointer(Histogram(p_fv2, c_len_fv))
#Parameters of the Distance function
plugin.Distance.restype = ctypes.c_double
#-------------------------------------------------------------------------
#Execution
distance = plugin.Distance(p_hist_fv1, p_hist_fv2)
return distance
|
MechCoder/sympy
|
refs/heads/master
|
sympy/categories/baseclasses.py
|
47
|
from __future__ import print_function, division
from sympy.core import S, Basic, Dict, Symbol, Tuple
from sympy.core.compatibility import range, iterable
from sympy.sets import Set, FiniteSet, EmptySet
class Class(Set):
r"""
The base class for any kind of class in the set-theoretic sense.
In axiomatic set theories, everything is a class. A class which
can be a member of another class is a set. A class which is not a
member of another class is a proper class. The class `\{1, 2\}`
is a set; the class of all sets is a proper class.
This class is essentially a synonym for :class:`sympy.core.Set`.
The goal of this class is to assure easier migration to the
eventual proper implementation of set theory.
"""
is_proper = False
class Object(Symbol):
"""
The base class for any kind of object in an abstract category.
While technically any instance of :class:`Basic` will do, this
class is the recommended way to create abstract objects in
abstract categories.
"""
class Morphism(Basic):
"""
The base class for any morphism in an abstract category.
In abstract categories, a morphism is an arrow between two
category objects. The object where the arrow starts is called the
domain, while the object where the arrow ends is called the
codomain.
Two morphisms between the same pair of objects are considered to
be the same morphisms. To distinguish between morphisms between
the same objects use :class:`NamedMorphism`.
It is prohibited to instantiate this class. Use one of the
derived classes instead.
See Also
========
IdentityMorphism, NamedMorphism, CompositeMorphism
"""
def __new__(cls, domain, codomain):
raise(NotImplementedError(
"Cannot instantiate Morphism. Use derived classes instead."))
@property
def domain(self):
"""
Returns the domain of the morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> f.domain
Object("A")
"""
return self.args[0]
@property
def codomain(self):
"""
Returns the codomain of the morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> f.codomain
Object("B")
"""
return self.args[1]
def compose(self, other):
r"""
Composes self with the supplied morphism.
The order of elements in the composition is the usual order,
i.e., to construct `g\circ f` use ``g.compose(f)``.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> g * f
CompositeMorphism((NamedMorphism(Object("A"), Object("B"), "f"),
NamedMorphism(Object("B"), Object("C"), "g")))
>>> (g * f).domain
Object("A")
>>> (g * f).codomain
Object("C")
"""
return CompositeMorphism(other, self)
def __mul__(self, other):
r"""
Composes self with the supplied morphism.
The semantics of this operation is given by the following
equation: ``g * f == g.compose(f)`` for composable morphisms
``g`` and ``f``.
See Also
========
compose
"""
return self.compose(other)
class IdentityMorphism(Morphism):
"""
Represents an identity morphism.
An identity morphism is a morphism with equal domain and codomain,
which acts as an identity with respect to composition.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, IdentityMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> id_A = IdentityMorphism(A)
>>> id_B = IdentityMorphism(B)
>>> f * id_A == f
True
>>> id_B * f == f
True
See Also
========
Morphism
"""
def __new__(cls, domain):
return Basic.__new__(cls, domain, domain)
class NamedMorphism(Morphism):
"""
Represents a morphism which has a name.
Names are used to distinguish between morphisms which have the
same domain and codomain: two named morphisms are equal if they
have the same domains, codomains, and names.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> f
NamedMorphism(Object("A"), Object("B"), "f")
>>> f.name
'f'
See Also
========
Morphism
"""
def __new__(cls, domain, codomain, name):
if not name:
raise ValueError("Empty morphism names not allowed.")
return Basic.__new__(cls, domain, codomain, Symbol(name))
@property
def name(self):
"""
Returns the name of the morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> f.name
'f'
"""
return self.args[2].name
class CompositeMorphism(Morphism):
r"""
Represents a morphism which is a composition of other morphisms.
Two composite morphisms are equal if the morphisms they were
obtained from (components) are the same and were listed in the
same order.
The arguments to the constructor for this class should be listed
in diagram order: to obtain the composition `g\circ f` from the
instances of :class:`Morphism` ``g`` and ``f`` use
``CompositeMorphism(f, g)``.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, CompositeMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> g * f
CompositeMorphism((NamedMorphism(Object("A"), Object("B"), "f"),
NamedMorphism(Object("B"), Object("C"), "g")))
>>> CompositeMorphism(f, g) == g * f
True
"""
@staticmethod
def _add_morphism(t, morphism):
"""
Intelligently adds ``morphism`` to tuple ``t``.
If ``morphism`` is a composite morphism, its components are
added to the tuple. If ``morphism`` is an identity, nothing
is added to the tuple.
No composability checks are performed.
"""
if isinstance(morphism, CompositeMorphism):
# ``morphism`` is a composite morphism; we have to
# denest its components.
return t + morphism.components
elif isinstance(morphism, IdentityMorphism):
# ``morphism`` is an identity. Nothing happens.
return t
else:
return t + Tuple(morphism)
def __new__(cls, *components):
if components and not isinstance(components[0], Morphism):
# Maybe the user has explicitly supplied a list of
# morphisms.
return CompositeMorphism.__new__(cls, *components[0])
normalised_components = Tuple()
# TODO: Fix the unpythonicity.
for i in range(len(components) - 1):
current = components[i]
following = components[i + 1]
if not isinstance(current, Morphism) or \
not isinstance(following, Morphism):
raise TypeError("All components must be morphisms.")
if current.codomain != following.domain:
raise ValueError("Uncomposable morphisms.")
normalised_components = CompositeMorphism._add_morphism(
normalised_components, current)
# We haven't added the last morphism to the list of normalised
# components. Add it now.
normalised_components = CompositeMorphism._add_morphism(
normalised_components, components[-1])
if not normalised_components:
# If ``normalised_components`` is empty, only identities
# were supplied. Since they all were composable, they are
# all the same identities.
return components[0]
elif len(normalised_components) == 1:
# No sense to construct a whole CompositeMorphism.
return normalised_components[0]
return Basic.__new__(cls, normalised_components)
@property
def components(self):
"""
Returns the components of this composite morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> (g * f).components
(NamedMorphism(Object("A"), Object("B"), "f"),
NamedMorphism(Object("B"), Object("C"), "g"))
"""
return self.args[0]
@property
def domain(self):
"""
Returns the domain of this composite morphism.
The domain of the composite morphism is the domain of its
first component.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> (g * f).domain
Object("A")
"""
return self.components[0].domain
@property
def codomain(self):
"""
Returns the codomain of this composite morphism.
The codomain of the composite morphism is the codomain of its
last component.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> (g * f).codomain
Object("C")
"""
return self.components[-1].codomain
def flatten(self, new_name):
"""
Forgets the composite structure of this morphism.
If ``new_name`` is not empty, returns a :class:`NamedMorphism`
with the supplied name, otherwise returns a :class:`Morphism`.
In both cases the domain of the new morphism is the domain of
this composite morphism and the codomain of the new morphism
is the codomain of this composite morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> (g * f).flatten("h")
NamedMorphism(Object("A"), Object("C"), "h")
"""
return NamedMorphism(self.domain, self.codomain, new_name)
class Category(Basic):
r"""
An (abstract) category.
A category [JoyOfCats] is a quadruple `\mbox{K} = (O, \hom, id,
\circ)` consisting of
* a (set-theoretical) class `O`, whose members are called
`K`-objects,
* for each pair `(A, B)` of `K`-objects, a set `\hom(A, B)` whose
members are called `K`-morphisms from `A` to `B`,
* for a each `K`-object `A`, a morphism `id:A\rightarrow A`,
called the `K`-identity of `A`,
* a composition law `\circ` associating with every `K`-morphisms
`f:A\rightarrow B` and `g:B\rightarrow C` a `K`-morphism `g\circ
f:A\rightarrow C`, called the composite of `f` and `g`.
Composition is associative, `K`-identities are identities with
respect to composition, and the sets `\hom(A, B)` are pairwise
disjoint.
This class knows nothing about its objects and morphisms.
Concrete cases of (abstract) categories should be implemented as
classes derived from this one.
Certain instances of :class:`Diagram` can be asserted to be
commutative in a :class:`Category` by supplying the argument
``commutative_diagrams`` in the constructor.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram, Category
>>> from sympy import FiniteSet
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g])
>>> K = Category("K", commutative_diagrams=[d])
>>> K.commutative_diagrams == FiniteSet(d)
True
See Also
========
Diagram
"""
def __new__(cls, name, objects=EmptySet(), commutative_diagrams=EmptySet()):
if not name:
raise ValueError("A Category cannot have an empty name.")
new_category = Basic.__new__(cls, Symbol(name), Class(objects),
FiniteSet(*commutative_diagrams))
return new_category
@property
def name(self):
"""
Returns the name of this category.
Examples
========
>>> from sympy.categories import Category
>>> K = Category("K")
>>> K.name
'K'
"""
return self.args[0].name
@property
def objects(self):
"""
Returns the class of objects of this category.
Examples
========
>>> from sympy.categories import Object, Category
>>> from sympy import FiniteSet
>>> A = Object("A")
>>> B = Object("B")
>>> K = Category("K", FiniteSet(A, B))
>>> K.objects
Class({Object("A"), Object("B")})
"""
return self.args[1]
@property
def commutative_diagrams(self):
"""
Returns the :class:`FiniteSet` of diagrams which are known to
be commutative in this category.
>>> from sympy.categories import Object, NamedMorphism, Diagram, Category
>>> from sympy import FiniteSet
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g])
>>> K = Category("K", commutative_diagrams=[d])
>>> K.commutative_diagrams == FiniteSet(d)
True
"""
return self.args[2]
def hom(self, A, B):
raise NotImplementedError(
"hom-sets are not implemented in Category.")
def all_morphisms(self):
raise NotImplementedError(
"Obtaining the class of morphisms is not implemented in Category.")
class Diagram(Basic):
r"""
Represents a diagram in a certain category.
Informally, a diagram is a collection of objects of a category and
certain morphisms between them. A diagram is still a monoid with
respect to morphism composition; i.e., identity morphisms, as well
as all composites of morphisms included in the diagram belong to
the diagram. For a more formal approach to this notion see
[Pare1970].
The components of composite morphisms are also added to the
diagram. No properties are assigned to such morphisms by default.
A commutative diagram is often accompanied by a statement of the
following kind: "if such morphisms with such properties exist,
then such morphisms which such properties exist and the diagram is
commutative". To represent this, an instance of :class:`Diagram`
includes a collection of morphisms which are the premises and
another collection of conclusions. ``premises`` and
``conclusions`` associate morphisms belonging to the corresponding
categories with the :class:`FiniteSet`'s of their properties.
The set of properties of a composite morphism is the intersection
of the sets of properties of its components. The domain and
codomain of a conclusion morphism should be among the domains and
codomains of the morphisms listed as the premises of a diagram.
No checks are carried out of whether the supplied object and
morphisms do belong to one and the same category.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> from sympy import FiniteSet, pprint, default_sort_key
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g])
>>> premises_keys = sorted(d.premises.keys(), key=default_sort_key)
>>> pprint(premises_keys, use_unicode=False)
[g*f:A-->C, id:A-->A, id:B-->B, id:C-->C, f:A-->B, g:B-->C]
>>> pprint(d.premises, use_unicode=False)
{g*f:A-->C: EmptySet(), id:A-->A: EmptySet(), id:B-->B: EmptySet(), id:C-->C:
EmptySet(), f:A-->B: EmptySet(), g:B-->C: EmptySet()}
>>> d = Diagram([f, g], {g * f: "unique"})
>>> pprint(d.conclusions)
{g*f:A-->C: {unique}}
References
==========
[Pare1970] B. Pareigis: Categories and functors. Academic Press,
1970.
"""
@staticmethod
def _set_dict_union(dictionary, key, value):
"""
If ``key`` is in ``dictionary``, set the new value of ``key``
to be the union between the old value and ``value``.
Otherwise, set the value of ``key`` to ``value.
Returns ``True`` if the key already was in the dictionary and
``False`` otherwise.
"""
if key in dictionary:
dictionary[key] = dictionary[key] | value
return True
else:
dictionary[key] = value
return False
@staticmethod
def _add_morphism_closure(morphisms, morphism, props, add_identities=True,
recurse_composites=True):
"""
Adds a morphism and its attributes to the supplied dictionary
``morphisms``. If ``add_identities`` is True, also adds the
identity morphisms for the domain and the codomain of
``morphism``.
"""
if not Diagram._set_dict_union(morphisms, morphism, props):
# We have just added a new morphism.
if isinstance(morphism, IdentityMorphism):
if props:
# Properties for identity morphisms don't really
# make sense, because very much is known about
# identity morphisms already, so much that they
# are trivial. Having properties for identity
# morphisms would only be confusing.
raise ValueError(
"Instances of IdentityMorphism cannot have properties.")
return
if add_identities:
empty = EmptySet()
id_dom = IdentityMorphism(morphism.domain)
id_cod = IdentityMorphism(morphism.codomain)
Diagram._set_dict_union(morphisms, id_dom, empty)
Diagram._set_dict_union(morphisms, id_cod, empty)
for existing_morphism, existing_props in list(morphisms.items()):
new_props = existing_props & props
if morphism.domain == existing_morphism.codomain:
left = morphism * existing_morphism
Diagram._set_dict_union(morphisms, left, new_props)
if morphism.codomain == existing_morphism.domain:
right = existing_morphism * morphism
Diagram._set_dict_union(morphisms, right, new_props)
if isinstance(morphism, CompositeMorphism) and recurse_composites:
# This is a composite morphism, add its components as
# well.
empty = EmptySet()
for component in morphism.components:
Diagram._add_morphism_closure(morphisms, component, empty,
add_identities)
def __new__(cls, *args):
"""
Construct a new instance of Diagram.
If no arguments are supplied, an empty diagram is created.
If at least an argument is supplied, ``args[0]`` is
interpreted as the premises of the diagram. If ``args[0]`` is
a list, it is interpreted as a list of :class:`Morphism`'s, in
which each :class:`Morphism` has an empty set of properties.
If ``args[0]`` is a Python dictionary or a :class:`Dict`, it
is interpreted as a dictionary associating to some
:class:`Morphism`'s some properties.
If at least two arguments are supplied ``args[1]`` is
interpreted as the conclusions of the diagram. The type of
``args[1]`` is interpreted in exactly the same way as the type
of ``args[0]``. If only one argument is supplied, the diagram
has no conclusions.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> from sympy.categories import IdentityMorphism, Diagram
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g])
>>> IdentityMorphism(A) in d.premises.keys()
True
>>> g * f in d.premises.keys()
True
>>> d = Diagram([f, g], {g * f: "unique"})
>>> d.conclusions[g * f]
{unique}
"""
premises = {}
conclusions = {}
# Here we will keep track of the objects which appear in the
# premises.
objects = EmptySet()
if len(args) >= 1:
# We've got some premises in the arguments.
premises_arg = args[0]
if isinstance(premises_arg, list):
# The user has supplied a list of morphisms, none of
# which have any attributes.
empty = EmptySet()
for morphism in premises_arg:
objects |= FiniteSet(morphism.domain, morphism.codomain)
Diagram._add_morphism_closure(premises, morphism, empty)
elif isinstance(premises_arg, dict) or isinstance(premises_arg, Dict):
# The user has supplied a dictionary of morphisms and
# their properties.
for morphism, props in premises_arg.items():
objects |= FiniteSet(morphism.domain, morphism.codomain)
Diagram._add_morphism_closure(
premises, morphism, FiniteSet(*props) if iterable(props) else FiniteSet(props))
if len(args) >= 2:
# We also have some conclusions.
conclusions_arg = args[1]
if isinstance(conclusions_arg, list):
# The user has supplied a list of morphisms, none of
# which have any attributes.
empty = EmptySet()
for morphism in conclusions_arg:
# Check that no new objects appear in conclusions.
if ((objects.contains(morphism.domain) == S.true) and
(objects.contains(morphism.codomain) == S.true)):
# No need to add identities and recurse
# composites this time.
Diagram._add_morphism_closure(
conclusions, morphism, empty, add_identities=False,
recurse_composites=False)
elif isinstance(conclusions_arg, dict) or \
isinstance(conclusions_arg, Dict):
# The user has supplied a dictionary of morphisms and
# their properties.
for morphism, props in conclusions_arg.items():
# Check that no new objects appear in conclusions.
if (morphism.domain in objects) and \
(morphism.codomain in objects):
# No need to add identities and recurse
# composites this time.
Diagram._add_morphism_closure(
conclusions, morphism, FiniteSet(*props) if iterable(props) else FiniteSet(props),
add_identities=False, recurse_composites=False)
return Basic.__new__(cls, Dict(premises), Dict(conclusions), objects)
@property
def premises(self):
"""
Returns the premises of this diagram.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> from sympy.categories import IdentityMorphism, Diagram
>>> from sympy import pretty
>>> A = Object("A")
>>> B = Object("B")
>>> f = NamedMorphism(A, B, "f")
>>> id_A = IdentityMorphism(A)
>>> id_B = IdentityMorphism(B)
>>> d = Diagram([f])
>>> print(pretty(d.premises, use_unicode=False))
{id:A-->A: EmptySet(), id:B-->B: EmptySet(), f:A-->B: EmptySet()}
"""
return self.args[0]
@property
def conclusions(self):
"""
Returns the conclusions of this diagram.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> from sympy.categories import IdentityMorphism, Diagram
>>> from sympy import FiniteSet
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g])
>>> IdentityMorphism(A) in d.premises.keys()
True
>>> g * f in d.premises.keys()
True
>>> d = Diagram([f, g], {g * f: "unique"})
>>> d.conclusions[g * f] == FiniteSet("unique")
True
"""
return self.args[1]
@property
def objects(self):
"""
Returns the :class:`FiniteSet` of objects that appear in this
diagram.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g])
>>> d.objects
{Object("A"), Object("B"), Object("C")}
"""
return self.args[2]
def hom(self, A, B):
"""
Returns a 2-tuple of sets of morphisms between objects A and
B: one set of morphisms listed as premises, and the other set
of morphisms listed as conclusions.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> from sympy import pretty
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g], {g * f: "unique"})
>>> print(pretty(d.hom(A, C), use_unicode=False))
({g*f:A-->C}, {g*f:A-->C})
See Also
========
Object, Morphism
"""
premises = EmptySet()
conclusions = EmptySet()
for morphism in self.premises.keys():
if (morphism.domain == A) and (morphism.codomain == B):
premises |= FiniteSet(morphism)
for morphism in self.conclusions.keys():
if (morphism.domain == A) and (morphism.codomain == B):
conclusions |= FiniteSet(morphism)
return (premises, conclusions)
def is_subdiagram(self, diagram):
"""
Checks whether ``diagram`` is a subdiagram of ``self``.
Diagram `D'` is a subdiagram of `D` if all premises
(conclusions) of `D'` are contained in the premises
(conclusions) of `D`. The morphisms contained
both in `D'` and `D` should have the same properties for `D'`
to be a subdiagram of `D`.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g], {g * f: "unique"})
>>> d1 = Diagram([f])
>>> d.is_subdiagram(d1)
True
>>> d1.is_subdiagram(d)
False
"""
premises = all([(m in self.premises) and
(diagram.premises[m] == self.premises[m])
for m in diagram.premises])
if not premises:
return False
conclusions = all([(m in self.conclusions) and
(diagram.conclusions[m] == self.conclusions[m])
for m in diagram.conclusions])
# Premises is surely ``True`` here.
return conclusions
def subdiagram_from_objects(self, objects):
"""
If ``objects`` is a subset of the objects of ``self``, returns
a diagram which has as premises all those premises of ``self``
which have a domains and codomains in ``objects``, likewise
for conclusions. Properties are preserved.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, Diagram
>>> from sympy import FiniteSet
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> d = Diagram([f, g], {f: "unique", g*f: "veryunique"})
>>> d1 = d.subdiagram_from_objects(FiniteSet(A, B))
>>> d1 == Diagram([f], {f: "unique"})
True
"""
if not objects.is_subset(self.objects):
raise ValueError(
"Supplied objects should all belong to the diagram.")
new_premises = {}
for morphism, props in self.premises.items():
if ((objects.contains(morphism.domain) == S.true) and
(objects.contains(morphism.codomain) == S.true)):
new_premises[morphism] = props
new_conclusions = {}
for morphism, props in self.conclusions.items():
if ((objects.contains(morphism.domain) == S.true) and
(objects.contains(morphism.codomain) == S.true)):
new_conclusions[morphism] = props
return Diagram(new_premises, new_conclusions)
|
SysTheron/adhocracy
|
refs/heads/develop
|
src/adhocracy/lib/event/rss.py
|
1
|
from pylons import response
from webhelpers.feedgenerator import Rss201rev2Feed as Feed
from adhocracy.lib import pager
from adhocracy.lib.event import formatting
def rss_feed(events, name, link, description):
rss = Feed(name, link.encode('utf-8'), description)
def event_item(event):
try:
item_link = event.event.link_path(event)
except:
item_link = link
rss.add_item(title=u"%s %s" % (event.user.name,
formatting.as_unicode(event)),
link=item_link.encode('utf-8'),
pubdate=event.time,
description=event.text(),
author_name=event.user.name,
unique_id=item_link)
response.content_type = 'application/rss+xml'
pager.NamedPager('rss', events, event_item, size=50).here()
return rss.writeString('utf-8')
|
nikhil93uf/Qemu
|
refs/heads/master
|
scripts/analyse-9p-simpletrace.py
|
333
|
#!/usr/bin/env python
# Pretty print 9p simpletrace log
# Usage: ./analyse-9p-simpletrace <trace-events> <trace-pid>
#
# Author: Harsh Prateek Bora
import os
import simpletrace
symbol_9p = {
6 : 'TLERROR',
7 : 'RLERROR',
8 : 'TSTATFS',
9 : 'RSTATFS',
12 : 'TLOPEN',
13 : 'RLOPEN',
14 : 'TLCREATE',
15 : 'RLCREATE',
16 : 'TSYMLINK',
17 : 'RSYMLINK',
18 : 'TMKNOD',
19 : 'RMKNOD',
20 : 'TRENAME',
21 : 'RRENAME',
22 : 'TREADLINK',
23 : 'RREADLINK',
24 : 'TGETATTR',
25 : 'RGETATTR',
26 : 'TSETATTR',
27 : 'RSETATTR',
30 : 'TXATTRWALK',
31 : 'RXATTRWALK',
32 : 'TXATTRCREATE',
33 : 'RXATTRCREATE',
40 : 'TREADDIR',
41 : 'RREADDIR',
50 : 'TFSYNC',
51 : 'RFSYNC',
52 : 'TLOCK',
53 : 'RLOCK',
54 : 'TGETLOCK',
55 : 'RGETLOCK',
70 : 'TLINK',
71 : 'RLINK',
72 : 'TMKDIR',
73 : 'RMKDIR',
74 : 'TRENAMEAT',
75 : 'RRENAMEAT',
76 : 'TUNLINKAT',
77 : 'RUNLINKAT',
100 : 'TVERSION',
101 : 'RVERSION',
102 : 'TAUTH',
103 : 'RAUTH',
104 : 'TATTACH',
105 : 'RATTACH',
106 : 'TERROR',
107 : 'RERROR',
108 : 'TFLUSH',
109 : 'RFLUSH',
110 : 'TWALK',
111 : 'RWALK',
112 : 'TOPEN',
113 : 'ROPEN',
114 : 'TCREATE',
115 : 'RCREATE',
116 : 'TREAD',
117 : 'RREAD',
118 : 'TWRITE',
119 : 'RWRITE',
120 : 'TCLUNK',
121 : 'RCLUNK',
122 : 'TREMOVE',
123 : 'RREMOVE',
124 : 'TSTAT',
125 : 'RSTAT',
126 : 'TWSTAT',
127 : 'RWSTAT'
}
class VirtFSRequestTracker(simpletrace.Analyzer):
def begin(self):
print "Pretty printing 9p simpletrace log ..."
def v9fs_rerror(self, tag, id, err):
print "RERROR (tag =", tag, ", id =", symbol_9p[id], ", err = \"", os.strerror(err), "\")"
def v9fs_version(self, tag, id, msize, version):
print "TVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")"
def v9fs_version_return(self, tag, id, msize, version):
print "RVERSION (tag =", tag, ", msize =", msize, ", version =", version, ")"
def v9fs_attach(self, tag, id, fid, afid, uname, aname):
print "TATTACH (tag =", tag, ", fid =", fid, ", afid =", afid, ", uname =", uname, ", aname =", aname, ")"
def v9fs_attach_return(self, tag, id, type, version, path):
print "RATTACH (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})"
def v9fs_stat(self, tag, id, fid):
print "TSTAT (tag =", tag, ", fid =", fid, ")"
def v9fs_stat_return(self, tag, id, mode, atime, mtime, length):
print "RSTAT (tag =", tag, ", mode =", mode, ", atime =", atime, ", mtime =", mtime, ", length =", length, ")"
def v9fs_getattr(self, tag, id, fid, request_mask):
print "TGETATTR (tag =", tag, ", fid =", fid, ", request_mask =", hex(request_mask), ")"
def v9fs_getattr_return(self, tag, id, result_mask, mode, uid, gid):
print "RGETATTR (tag =", tag, ", result_mask =", hex(result_mask), ", mode =", oct(mode), ", uid =", uid, ", gid =", gid, ")"
def v9fs_walk(self, tag, id, fid, newfid, nwnames):
print "TWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", nwnames =", nwnames, ")"
def v9fs_walk_return(self, tag, id, nwnames, qids):
print "RWALK (tag =", tag, ", nwnames =", nwnames, ", qids =", hex(qids), ")"
def v9fs_open(self, tag, id, fid, mode):
print "TOPEN (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ")"
def v9fs_open_return(self, tag, id, type, version, path, iounit):
print "ROPEN (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_lcreate(self, tag, id, dfid, flags, mode, gid):
print "TLCREATE (tag =", tag, ", dfid =", dfid, ", flags =", oct(flags), ", mode =", oct(mode), ", gid =", gid, ")"
def v9fs_lcreate_return(self, tag, id, type, version, path, iounit):
print "RLCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_fsync(self, tag, id, fid, datasync):
print "TFSYNC (tag =", tag, ", fid =", fid, ", datasync =", datasync, ")"
def v9fs_clunk(self, tag, id, fid):
print "TCLUNK (tag =", tag, ", fid =", fid, ")"
def v9fs_read(self, tag, id, fid, off, max_count):
print "TREAD (tag =", tag, ", fid =", fid, ", off =", off, ", max_count =", max_count, ")"
def v9fs_read_return(self, tag, id, count, err):
print "RREAD (tag =", tag, ", count =", count, ", err =", err, ")"
def v9fs_readdir(self, tag, id, fid, offset, max_count):
print "TREADDIR (tag =", tag, ", fid =", fid, ", offset =", offset, ", max_count =", max_count, ")"
def v9fs_readdir_return(self, tag, id, count, retval):
print "RREADDIR (tag =", tag, ", count =", count, ", retval =", retval, ")"
def v9fs_write(self, tag, id, fid, off, count, cnt):
print "TWRITE (tag =", tag, ", fid =", fid, ", off =", off, ", count =", count, ", cnt =", cnt, ")"
def v9fs_write_return(self, tag, id, total, err):
print "RWRITE (tag =", tag, ", total =", total, ", err =", err, ")"
def v9fs_create(self, tag, id, fid, name, perm, mode):
print "TCREATE (tag =", tag, ", fid =", fid, ", perm =", oct(perm), ", name =", name, ", mode =", oct(mode), ")"
def v9fs_create_return(self, tag, id, type, version, path, iounit):
print "RCREATE (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, iounit =", iounit, ")"
def v9fs_symlink(self, tag, id, fid, name, symname, gid):
print "TSYMLINK (tag =", tag, ", fid =", fid, ", name =", name, ", symname =", symname, ", gid =", gid, ")"
def v9fs_symlink_return(self, tag, id, type, version, path):
print "RSYMLINK (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "})"
def v9fs_flush(self, tag, id, flush_tag):
print "TFLUSH (tag =", tag, ", flush_tag =", flush_tag, ")"
def v9fs_link(self, tag, id, dfid, oldfid, name):
print "TLINK (tag =", tag, ", dfid =", dfid, ", oldfid =", oldfid, ", name =", name, ")"
def v9fs_remove(self, tag, id, fid):
print "TREMOVE (tag =", tag, ", fid =", fid, ")"
def v9fs_wstat(self, tag, id, fid, mode, atime, mtime):
print "TWSTAT (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", atime =", atime, "mtime =", mtime, ")"
def v9fs_mknod(self, tag, id, fid, mode, major, minor):
print "TMKNOD (tag =", tag, ", fid =", fid, ", mode =", oct(mode), ", major =", major, ", minor =", minor, ")"
def v9fs_lock(self, tag, id, fid, type, start, length):
print "TLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")"
def v9fs_lock_return(self, tag, id, status):
print "RLOCK (tag =", tag, ", status =", status, ")"
def v9fs_getlock(self, tag, id, fid, type, start, length):
print "TGETLOCK (tag =", tag, ", fid =", fid, "type =", type, ", start =", start, ", length =", length, ")"
def v9fs_getlock_return(self, tag, id, type, start, length, proc_id):
print "RGETLOCK (tag =", tag, "type =", type, ", start =", start, ", length =", length, ", proc_id =", proc_id, ")"
def v9fs_mkdir(self, tag, id, fid, name, mode, gid):
print "TMKDIR (tag =", tag, ", fid =", fid, ", name =", name, ", mode =", mode, ", gid =", gid, ")"
def v9fs_mkdir_return(self, tag, id, type, version, path, err):
print "RMKDIR (tag =", tag, ", qid={type =", type, ", version =", version, ", path =", path, "}, err =", err, ")"
def v9fs_xattrwalk(self, tag, id, fid, newfid, name):
print "TXATTRWALK (tag =", tag, ", fid =", fid, ", newfid =", newfid, ", xattr name =", name, ")"
def v9fs_xattrwalk_return(self, tag, id, size):
print "RXATTRWALK (tag =", tag, ", xattrsize =", size, ")"
def v9fs_xattrcreate(self, tag, id, fid, name, size, flags):
print "TXATTRCREATE (tag =", tag, ", fid =", fid, ", name =", name, ", xattrsize =", size, ", flags =", flags, ")"
def v9fs_readlink(self, tag, id, fid):
print "TREADLINK (tag =", tag, ", fid =", fid, ")"
def v9fs_readlink_return(self, tag, id, target):
print "RREADLINK (tag =", tag, ", target =", target, ")"
simpletrace.run(VirtFSRequestTracker())
|
liuquansheng47/ansible
|
refs/heads/devel
|
lib/ansible/utils/unicode.py
|
1
|
# (c) 2012-2014, Toshio Kuraotmi <a.badger@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six import string_types, text_type, binary_type, PY3
# to_bytes and to_unicode were written by Toshio Kuratomi for the
# python-kitchen library https://pypi.python.org/pypi/kitchen
# They are licensed in kitchen under the terms of the GPLv2+
# They were copied and modified for use in ansible by Toshio in Jan 2015
# (simply removing the deprecated features)
#: Aliases for the utf-8 codec
_UTF8_ALIASES = frozenset(('utf-8', 'UTF-8', 'utf8', 'UTF8', 'utf_8', 'UTF_8',
'utf', 'UTF', 'u8', 'U8'))
#: Aliases for the latin-1 codec
_LATIN1_ALIASES = frozenset(('latin-1', 'LATIN-1', 'latin1', 'LATIN1',
'latin', 'LATIN', 'l1', 'L1', 'cp819', 'CP819', '8859', 'iso8859-1',
'ISO8859-1', 'iso-8859-1', 'ISO-8859-1'))
# EXCEPTION_CONVERTERS is defined below due to using to_unicode
if PY3:
basestring = (str, bytes)
def to_unicode(obj, encoding='utf-8', errors='replace', nonstring=None):
'''Convert an object into a :class:`unicode` string
:arg obj: Object to convert to a :class:`unicode` string. This should
normally be a byte :class:`str`
:kwarg encoding: What encoding to try converting the byte :class:`str` as.
Defaults to :term:`utf-8`
:kwarg errors: If errors are found while decoding, perform this action.
Defaults to ``replace`` which replaces the invalid bytes with
a character that means the bytes were unable to be decoded. Other
values are the same as the error handling schemes in the `codec base
classes
<http://docs.python.org/library/codecs.html#codec-base-classes>`_.
For instance ``strict`` which raises an exception and ``ignore`` which
simply omits the non-decodable characters.
:kwarg nonstring: How to treat nonstring values. Possible values are:
:simplerepr: Attempt to call the object's "simple representation"
method and return that value. Python-2.3+ has two methods that
try to return a simple representation: :meth:`object.__unicode__`
and :meth:`object.__str__`. We first try to get a usable value
from :meth:`object.__unicode__`. If that fails we try the same
with :meth:`object.__str__`.
:empty: Return an empty :class:`unicode` string
:strict: Raise a :exc:`TypeError`
:passthru: Return the object unchanged
:repr: Attempt to return a :class:`unicode` string of the repr of the
object
Default is ``simplerepr``
:raises TypeError: if :attr:`nonstring` is ``strict`` and
a non-:class:`basestring` object is passed in or if :attr:`nonstring`
is set to an unknown value
:raises UnicodeDecodeError: if :attr:`errors` is ``strict`` and
:attr:`obj` is not decodable using the given encoding
:returns: :class:`unicode` string or the original object depending on the
value of :attr:`nonstring`.
Usually this should be used on a byte :class:`str` but it can take both
byte :class:`str` and :class:`unicode` strings intelligently. Nonstring
objects are handled in different ways depending on the setting of the
:attr:`nonstring` parameter.
The default values of this function are set so as to always return
a :class:`unicode` string and never raise an error when converting from
a byte :class:`str` to a :class:`unicode` string. However, when you do
not pass validly encoded text (or a nonstring object), you may end up with
output that you don't expect. Be sure you understand the requirements of
your data, not just ignore errors by passing it through this function.
'''
# Could use isbasestring/isunicode here but we want this code to be as
# fast as possible
if isinstance(obj, basestring):
if isinstance(obj, text_type):
return obj
if encoding in _UTF8_ALIASES:
return text_type(obj, 'utf-8', errors)
if encoding in _LATIN1_ALIASES:
return text_type(obj, 'latin-1', errors)
return obj.decode(encoding, errors)
if not nonstring:
nonstring = 'simplerepr'
if nonstring == 'empty':
return u''
elif nonstring == 'passthru':
return obj
elif nonstring == 'simplerepr':
try:
simple = obj.__unicode__()
except (AttributeError, UnicodeError):
simple = None
if not simple:
try:
simple = text_type(obj)
except UnicodeError:
try:
simple = obj.__str__()
except (UnicodeError, AttributeError):
simple = u''
if isinstance(simple, binary_type):
return text_type(simple, encoding, errors)
return simple
elif nonstring in ('repr', 'strict'):
obj_repr = repr(obj)
if isinstance(obj_repr, binary_type):
obj_repr = text_type(obj_repr, encoding, errors)
if nonstring == 'repr':
return obj_repr
raise TypeError('to_unicode was given "%(obj)s" which is neither'
' a byte string (str) or a unicode string' %
{'obj': obj_repr.encode(encoding, 'replace')})
raise TypeError('nonstring value, %(param)s, is not set to a valid'
' action' % {'param': nonstring})
def to_bytes(obj, encoding='utf-8', errors='replace', nonstring=None):
'''Convert an object into a byte :class:`str`
:arg obj: Object to convert to a byte :class:`str`. This should normally
be a :class:`unicode` string.
:kwarg encoding: Encoding to use to convert the :class:`unicode` string
into a byte :class:`str`. Defaults to :term:`utf-8`.
:kwarg errors: If errors are found while encoding, perform this action.
Defaults to ``replace`` which replaces the invalid bytes with
a character that means the bytes were unable to be encoded. Other
values are the same as the error handling schemes in the `codec base
classes
<http://docs.python.org/library/codecs.html#codec-base-classes>`_.
For instance ``strict`` which raises an exception and ``ignore`` which
simply omits the non-encodable characters.
:kwarg nonstring: How to treat nonstring values. Possible values are:
:simplerepr: Attempt to call the object's "simple representation"
method and return that value. Python-2.3+ has two methods that
try to return a simple representation: :meth:`object.__unicode__`
and :meth:`object.__str__`. We first try to get a usable value
from :meth:`object.__str__`. If that fails we try the same
with :meth:`object.__unicode__`.
:empty: Return an empty byte :class:`str`
:strict: Raise a :exc:`TypeError`
:passthru: Return the object unchanged
:repr: Attempt to return a byte :class:`str` of the :func:`repr` of the
object
Default is ``simplerepr``.
:raises TypeError: if :attr:`nonstring` is ``strict`` and
a non-:class:`basestring` object is passed in or if :attr:`nonstring`
is set to an unknown value.
:raises UnicodeEncodeError: if :attr:`errors` is ``strict`` and all of the
bytes of :attr:`obj` are unable to be encoded using :attr:`encoding`.
:returns: byte :class:`str` or the original object depending on the value
of :attr:`nonstring`.
.. warning::
If you pass a byte :class:`str` into this function the byte
:class:`str` is returned unmodified. It is **not** re-encoded with
the specified :attr:`encoding`. The easiest way to achieve that is::
to_bytes(to_unicode(text), encoding='utf-8')
The initial :func:`to_unicode` call will ensure text is
a :class:`unicode` string. Then, :func:`to_bytes` will turn that into
a byte :class:`str` with the specified encoding.
Usually, this should be used on a :class:`unicode` string but it can take
either a byte :class:`str` or a :class:`unicode` string intelligently.
Nonstring objects are handled in different ways depending on the setting
of the :attr:`nonstring` parameter.
The default values of this function are set so as to always return a byte
:class:`str` and never raise an error when converting from unicode to
bytes. However, when you do not pass an encoding that can validly encode
the object (or a non-string object), you may end up with output that you
don't expect. Be sure you understand the requirements of your data, not
just ignore errors by passing it through this function.
'''
# Could use isbasestring, isbytestring here but we want this to be as fast
# as possible
if isinstance(obj, basestring):
if isinstance(obj, binary_type):
return obj
return obj.encode(encoding, errors)
if not nonstring:
nonstring = 'simplerepr'
if nonstring == 'empty':
return b''
elif nonstring == 'passthru':
return obj
elif nonstring == 'simplerepr':
try:
simple = str(obj)
except UnicodeError:
try:
simple = obj.__str__()
except (AttributeError, UnicodeError):
simple = None
if not simple:
try:
simple = obj.__unicode__()
except (AttributeError, UnicodeError):
simple = b''
if isinstance(simple, text_type):
simple = simple.encode(encoding, 'replace')
return simple
elif nonstring in ('repr', 'strict'):
try:
obj_repr = obj.__repr__()
except (AttributeError, UnicodeError):
obj_repr = b''
if isinstance(obj_repr, text_type):
obj_repr = obj_repr.encode(encoding, errors)
else:
obj_repr = binary_type(obj_repr)
if nonstring == 'repr':
return obj_repr
raise TypeError('to_bytes was given "%(obj)s" which is neither'
' a unicode string or a byte string (str)' % {'obj': obj_repr})
raise TypeError('nonstring value, %(param)s, is not set to a valid'
' action' % {'param': nonstring})
# force the return value of a function to be unicode. Use with partial to
# ensure that a filter will return unicode values.
def unicode_wrap(func, *args, **kwargs):
return to_unicode(func(*args, **kwargs), nonstring='passthru')
# Alias for converting to native strings.
if PY3:
to_str = to_unicode
else:
to_str = to_bytes
|
foliverkodi/repository.foliver
|
refs/heads/master
|
plugin.video.brfilmes/resources/lib/chardet/compat.py
|
2942
|
######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
|
ceph/samba
|
refs/heads/ceph
|
source4/dsdb/tests/python/dirsync.py
|
33
|
#!/usr/bin/env python
#
# Unit tests for dirsync control
# Copyright (C) Matthieu Patou <mat@matws.net> 2011
# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2014
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import optparse
import sys
sys.path.insert(0, "bin/python")
import samba
from samba.tests.subunitrun import TestProgram, SubunitOptions
import samba.getopt as options
import base64
from ldb import LdbError, SCOPE_BASE
from ldb import Message, MessageElement, Dn
from ldb import FLAG_MOD_ADD, FLAG_MOD_DELETE
from samba.dcerpc import security, misc, drsblobs
from samba.ndr import ndr_unpack, ndr_pack
from samba.auth import system_session
from samba import gensec, sd_utils
from samba.samdb import SamDB
from samba.credentials import Credentials, DONT_USE_KERBEROS
import samba.tests
from samba.tests import delete_force
parser = optparse.OptionParser("dirsync.py [options] <host>")
sambaopts = options.SambaOptions(parser)
parser.add_option_group(sambaopts)
parser.add_option_group(options.VersionOptions(parser))
# use command line creds if available
credopts = options.CredentialsOptions(parser)
parser.add_option_group(credopts)
subunitopts = SubunitOptions(parser)
parser.add_option_group(subunitopts)
opts, args = parser.parse_args()
if len(args) < 1:
parser.print_usage()
sys.exit(1)
host = args.pop()
if not "://" in host:
ldaphost = "ldap://%s" % host
ldapshost = "ldaps://%s" % host
else:
ldaphost = host
start = host.rindex("://")
host = host.lstrip(start+3)
lp = sambaopts.get_loadparm()
creds = credopts.get_credentials(lp)
#
# Tests start here
#
class DirsyncBaseTests(samba.tests.TestCase):
def setUp(self):
super(DirsyncBaseTests, self).setUp()
self.ldb_admin = SamDB(ldapshost, credentials=creds, session_info=system_session(lp), lp=lp)
self.base_dn = self.ldb_admin.domain_dn()
self.domain_sid = security.dom_sid(self.ldb_admin.get_domain_sid())
self.user_pass = "samba123@AAA"
self.configuration_dn = self.ldb_admin.get_config_basedn().get_linearized()
self.sd_utils = sd_utils.SDUtils(self.ldb_admin)
#used for anonymous login
print "baseDN: %s" % self.base_dn
def get_user_dn(self, name):
return "CN=%s,CN=Users,%s" % (name, self.base_dn)
def get_ldb_connection(self, target_username, target_password):
creds_tmp = Credentials()
creds_tmp.set_username(target_username)
creds_tmp.set_password(target_password)
creds_tmp.set_domain(creds.get_domain())
creds_tmp.set_realm(creds.get_realm())
creds_tmp.set_workstation(creds.get_workstation())
creds_tmp.set_gensec_features(creds_tmp.get_gensec_features()
| gensec.FEATURE_SEAL)
creds_tmp.set_kerberos_state(DONT_USE_KERBEROS) # kinit is too expensive to use in a tight loop
ldb_target = SamDB(url=ldaphost, credentials=creds_tmp, lp=lp)
return ldb_target
#tests on ldap add operations
class SimpleDirsyncTests(DirsyncBaseTests):
def setUp(self):
super(SimpleDirsyncTests, self).setUp()
# Regular user
self.dirsync_user = "test_dirsync_user"
self.simple_user = "test_simple_user"
self.admin_user = "test_admin_user"
self.ouname = None
self.ldb_admin.newuser(self.dirsync_user, self.user_pass)
self.ldb_admin.newuser(self.simple_user, self.user_pass)
self.ldb_admin.newuser(self.admin_user, self.user_pass)
self.desc_sddl = self.sd_utils.get_sd_as_sddl(self.base_dn)
user_sid = self.sd_utils.get_object_sid(self.get_user_dn(self.dirsync_user))
mod = "(A;;CR;1131f6aa-9c07-11d1-f79f-00c04fc2dcd2;;%s)" % str(user_sid)
self.sd_utils.dacl_add_ace(self.base_dn, mod)
# add admins to the Domain Admins group
self.ldb_admin.add_remove_group_members("Domain Admins", [self.admin_user],
add_members_operation=True)
def tearDown(self):
super(SimpleDirsyncTests, self).tearDown()
delete_force(self.ldb_admin, self.get_user_dn(self.dirsync_user))
delete_force(self.ldb_admin, self.get_user_dn(self.simple_user))
delete_force(self.ldb_admin, self.get_user_dn(self.admin_user))
if self.ouname:
delete_force(self.ldb_admin, self.ouname)
self.sd_utils.modify_sd_on_dn(self.base_dn, self.desc_sddl)
try:
self.ldb_admin.deletegroup("testgroup")
except Exception:
pass
#def test_dirsync_errors(self):
def test_dirsync_supported(self):
"""Test the basic of the dirsync is supported"""
self.ldb_dirsync = self.get_ldb_connection(self.dirsync_user, self.user_pass)
self.ldb_simple = self.get_ldb_connection(self.simple_user, self.user_pass)
res = self.ldb_admin.search(self.base_dn, expression="samaccountname=*", controls=["dirsync:1:0:1"])
res = self.ldb_dirsync.search(self.base_dn, expression="samaccountname=*", controls=["dirsync:1:0:1"])
try:
self.ldb_simple.search(self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:0:1"])
except LdbError,l:
self.assertTrue(str(l).find("LDAP_INSUFFICIENT_ACCESS_RIGHTS") != -1)
def test_parentGUID_referrals(self):
res2 = self.ldb_admin.search(self.base_dn, scope=SCOPE_BASE, attrs=["objectGUID"])
res = self.ldb_admin.search(self.base_dn,
expression="name=Configuration",
controls=["dirsync:1:0:1"])
self.assertEqual(res2[0].get("objectGUID"), res[0].get("parentGUID"))
def test_ok_not_rootdc(self):
"""Test if it's ok to do dirsync on another NC that is not the root DC"""
self.ldb_admin.search(self.ldb_admin.get_config_basedn(),
expression="samaccountname=*",
controls=["dirsync:1:0:1"])
def test_dirsync_errors(self):
"""Test if dirsync returns the correct LDAP errors in case of pb"""
self.ldb_simple = self.get_ldb_connection(self.simple_user, self.user_pass)
self.ldb_dirsync = self.get_ldb_connection(self.dirsync_user, self.user_pass)
try:
self.ldb_simple.search(self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:0:1"])
except LdbError,l:
print l
self.assertTrue(str(l).find("LDAP_INSUFFICIENT_ACCESS_RIGHTS") != -1)
try:
self.ldb_simple.search("CN=Users,%s" % self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:0:1"])
except LdbError,l:
print l
self.assertTrue(str(l).find("LDAP_INSUFFICIENT_ACCESS_RIGHTS") != -1)
try:
self.ldb_simple.search("CN=Users,%s" % self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:1:1"])
except LdbError,l:
print l
self.assertTrue(str(l).find("LDAP_UNWILLING_TO_PERFORM") != -1)
try:
self.ldb_dirsync.search("CN=Users,%s" % self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:0:1"])
except LdbError,l:
print l
self.assertTrue(str(l).find("LDAP_INSUFFICIENT_ACCESS_RIGHTS") != -1)
try:
self.ldb_admin.search("CN=Users,%s" % self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:0:1"])
except LdbError,l:
print l
self.assertTrue(str(l).find("LDAP_INSUFFICIENT_ACCESS_RIGHTS") != -1)
try:
self.ldb_admin.search("CN=Users,%s" % self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:1:1"])
except LdbError,l:
print l
self.assertTrue(str(l).find("LDAP_UNWILLING_TO_PERFORM") != -1)
def test_dirsync_attributes(self):
"""Check behavior with some attributes """
res = self.ldb_admin.search(self.base_dn,
expression="samaccountname=*",
controls=["dirsync:1:0:1"])
# Check that nTSecurityDescriptor is returned as it's the case when doing dirsync
self.assertTrue(res.msgs[0].get("ntsecuritydescriptor") != None)
# Check that non replicated attributes are not returned
self.assertTrue(res.msgs[0].get("badPwdCount") == None)
# Check that non forward link are not returned
self.assertTrue(res.msgs[0].get("memberof") == None)
# Asking for instanceType will return also objectGUID
res = self.ldb_admin.search(self.base_dn,
expression="samaccountname=Administrator",
attrs=["instanceType"],
controls=["dirsync:1:0:1"])
self.assertTrue(res.msgs[0].get("objectGUID") != None)
self.assertTrue(res.msgs[0].get("instanceType") != None)
# We don't return an entry if asked for objectGUID
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % str(self.base_dn),
attrs=["objectGUID"],
controls=["dirsync:1:0:1"])
self.assertEquals(len(res.msgs), 0)
# a request on the root of a NC didn't return parentGUID
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % str(self.base_dn),
attrs=["name"],
controls=["dirsync:1:0:1"])
self.assertTrue(res.msgs[0].get("objectGUID") != None)
self.assertTrue(res.msgs[0].get("name") != None)
self.assertTrue(res.msgs[0].get("parentGUID") == None)
self.assertTrue(res.msgs[0].get("instanceType") != None)
# Asking for name will return also objectGUID and parentGUID
# and instanceType and of course name
res = self.ldb_admin.search(self.base_dn,
expression="samaccountname=Administrator",
attrs=["name"],
controls=["dirsync:1:0:1"])
self.assertTrue(res.msgs[0].get("objectGUID") != None)
self.assertTrue(res.msgs[0].get("name") != None)
self.assertTrue(res.msgs[0].get("parentGUID") != None)
self.assertTrue(res.msgs[0].get("instanceType") != None)
# Asking for dn will not return not only DN but more like if attrs=*
# parentGUID should be returned
res = self.ldb_admin.search(self.base_dn,
expression="samaccountname=Administrator",
attrs=["dn"],
controls=["dirsync:1:0:1"])
count = len(res.msgs[0])
res2 = self.ldb_admin.search(self.base_dn,
expression="samaccountname=Administrator",
controls=["dirsync:1:0:1"])
count2 = len(res2.msgs[0])
self.assertEqual(count, count2)
# Asking for cn will return nothing on objects that have CN as RDN
res = self.ldb_admin.search(self.base_dn,
expression="samaccountname=Administrator",
attrs=["cn"],
controls=["dirsync:1:0:1"])
self.assertEqual(len(res.msgs), 0)
# Asking for parentGUID will return nothing too
res = self.ldb_admin.search(self.base_dn,
expression="samaccountname=Administrator",
attrs=["parentGUID"],
controls=["dirsync:1:0:1"])
self.assertEqual(len(res.msgs), 0)
ouname="OU=testou,%s" % self.base_dn
self.ouname = ouname
self.ldb_admin.create_ou(ouname)
delta = Message()
delta.dn = Dn(self.ldb_admin, str(ouname))
delta["cn"] = MessageElement("test ou",
FLAG_MOD_ADD,
"cn" )
self.ldb_admin.modify(delta)
res = self.ldb_admin.search(self.base_dn,
expression="name=testou",
attrs=["cn"],
controls=["dirsync:1:0:1"])
self.assertEqual(len(res.msgs), 1)
self.assertEqual(len(res.msgs[0]), 3)
delete_force(self.ldb_admin, ouname)
def test_dirsync_with_controls(self):
"""Check that dirsync return correct informations when dealing with the NC"""
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % str(self.base_dn),
attrs=["name"],
controls=["dirsync:1:0:10000", "extended_dn:1", "show_deleted:1"])
def test_dirsync_basenc(self):
"""Check that dirsync return correct informations when dealing with the NC"""
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % str(self.base_dn),
attrs=["name"],
controls=["dirsync:1:0:10000"])
self.assertEqual(len(res.msgs), 1)
self.assertEqual(len(res.msgs[0]), 3)
res = self.ldb_admin.search(self.base_dn,
expression="(distinguishedName=%s)" % str(self.base_dn),
attrs=["ntSecurityDescriptor"],
controls=["dirsync:1:0:10000"])
self.assertEqual(len(res.msgs), 1)
self.assertEqual(len(res.msgs[0]), 3)
def test_dirsync_othernc(self):
"""Check that dirsync return information for entries that are normaly referrals (ie. other NCs)"""
res = self.ldb_admin.search(self.base_dn,
expression="(objectclass=configuration)",
attrs=["name"],
controls=["dirsync:1:0:10000"])
self.assertEqual(len(res.msgs), 1)
self.assertEqual(len(res.msgs[0]), 4)
res = self.ldb_admin.search(self.base_dn,
expression="(objectclass=configuration)",
attrs=["ntSecurityDescriptor"],
controls=["dirsync:1:0:10000"])
self.assertEqual(len(res.msgs), 1)
self.assertEqual(len(res.msgs[0]), 3)
res = self.ldb_admin.search(self.base_dn,
expression="(objectclass=domaindns)",
attrs=["ntSecurityDescriptor"],
controls=["dirsync:1:0:10000"])
nb = len(res.msgs)
# only sub nc returns a result when asked for objectGUID
res = self.ldb_admin.search(self.base_dn,
expression="(objectclass=domaindns)",
attrs=["objectGUID"],
controls=["dirsync:1:0:0"])
self.assertEqual(len(res.msgs), nb - 1)
if nb > 1:
self.assertTrue(res.msgs[0].get("objectGUID") != None)
else:
res = self.ldb_admin.search(self.base_dn,
expression="(objectclass=configuration)",
attrs=["objectGUID"],
controls=["dirsync:1:0:0"])
def test_dirsync_send_delta(self):
"""Check that dirsync return correct delta when sending the last cookie"""
res = self.ldb_admin.search(self.base_dn,
expression="(&(samaccountname=test*)(!(isDeleted=*)))",
controls=["dirsync:1:0:10000"])
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "0"
ctl[3] = "10000"
control = str(":".join(ctl))
res = self.ldb_admin.search(self.base_dn,
expression="(&(samaccountname=test*)(!(isDeleted=*)))",
controls=[control])
self.assertEqual(len(res), 0)
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=["dirsync:1:0:100000"])
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "0"
ctl[3] = "10000"
control2 = str(":".join(ctl))
# Let's create an OU
ouname="OU=testou2,%s" % self.base_dn
self.ouname = ouname
self.ldb_admin.create_ou(ouname)
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=[control2])
self.assertEqual(len(res), 1)
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "0"
ctl[3] = "10000"
control3 = str(":".join(ctl))
delta = Message()
delta.dn = Dn(self.ldb_admin, str(ouname))
delta["cn"] = MessageElement("test ou",
FLAG_MOD_ADD,
"cn" )
self.ldb_admin.modify(delta)
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=[control3])
self.assertEqual(len(res.msgs), 1)
# 3 attributes: instanceType, cn and objectGUID
self.assertEqual(len(res.msgs[0]), 3)
delta = Message()
delta.dn = Dn(self.ldb_admin, str(ouname))
delta["cn"] = MessageElement([],
FLAG_MOD_DELETE,
"cn" )
self.ldb_admin.modify(delta)
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=[control3])
self.assertEqual(len(res.msgs), 1)
# So we won't have much attribute returned but instanceType and GUID
# are.
# 3 attributes: instanceType and objectGUID and cn but empty
self.assertEqual(len(res.msgs[0]), 3)
ouname = "OU=newouname,%s" % self.base_dn
self.ldb_admin.rename(str(res[0].dn), str(Dn(self.ldb_admin, ouname)))
self.ouname = ouname
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "0"
ctl[3] = "10000"
control4 = str(":".join(ctl))
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=[control3])
self.assertTrue(res[0].get("parentGUID") != None)
self.assertTrue(res[0].get("name") != None)
delete_force(self.ldb_admin, ouname)
def test_dirsync_linkedattributes(self):
"""Check that dirsync returnd deleted objects too"""
# Let's search for members
self.ldb_simple = self.get_ldb_connection(self.simple_user, self.user_pass)
res = self.ldb_simple.search(self.base_dn,
expression="(name=Administrators)",
controls=["dirsync:1:1:1"])
self.assertTrue(len(res[0].get("member")) > 0)
size = len(res[0].get("member"))
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "1"
ctl[3] = "10000"
control1 = str(":".join(ctl))
self.ldb_admin.add_remove_group_members("Administrators", [self.simple_user],
add_members_operation=True)
res = self.ldb_simple.search(self.base_dn,
expression="(name=Administrators)",
controls=[control1])
self.assertEqual(len(res[0].get("member")), size + 1)
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "1"
ctl[3] = "10000"
control1 = str(":".join(ctl))
# remove the user from the group
self.ldb_admin.add_remove_group_members("Administrators", [self.simple_user],
add_members_operation=False)
res = self.ldb_simple.search(self.base_dn,
expression="(name=Administrators)",
controls=[control1])
self.assertEqual(len(res[0].get("member")), size )
self.ldb_admin.newgroup("testgroup")
self.ldb_admin.add_remove_group_members("testgroup", [self.simple_user],
add_members_operation=True)
res = self.ldb_admin.search(self.base_dn,
expression="(name=testgroup)",
controls=["dirsync:1:0:1"])
self.assertEqual(len(res[0].get("member")), 1)
self.assertTrue(res[0].get("member") != "" )
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "0"
ctl[3] = "1"
control1 = str(":".join(ctl))
# Check that reasking the same question but with an updated cookie
# didn't return any results.
print control1
res = self.ldb_admin.search(self.base_dn,
expression="(name=testgroup)",
controls=[control1])
self.assertEqual(len(res), 0)
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "1"
ctl[3] = "10000"
control1 = str(":".join(ctl))
self.ldb_admin.add_remove_group_members("testgroup", [self.simple_user],
add_members_operation=False)
res = self.ldb_admin.search(self.base_dn,
expression="(name=testgroup)",
attrs=["member"],
controls=[control1])
self.ldb_admin.deletegroup("testgroup")
self.assertEqual(len(res[0].get("member")), 0)
def test_dirsync_deleted_items(self):
"""Check that dirsync returnd deleted objects too"""
# Let's create an OU
ouname="OU=testou3,%s" % self.base_dn
self.ouname = ouname
self.ldb_admin.create_ou(ouname)
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=["dirsync:1:0:1"])
guid = None
for e in res:
if str(e["name"]) == "testou3":
guid = str(ndr_unpack(misc.GUID,e.get("objectGUID")[0]))
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "0"
ctl[3] = "10000"
control1 = str(":".join(ctl))
# So now delete the object and check that
# we can see the object but deleted when admin
delete_force(self.ldb_admin, ouname)
res = self.ldb_admin.search(self.base_dn,
expression="(objectClass=organizationalUnit)",
controls=[control1])
self.assertEqual(len(res), 1)
guid2 = str(ndr_unpack(misc.GUID,res[0].get("objectGUID")[0]))
self.assertEqual(guid2, guid)
self.assertTrue(res[0].get("isDeleted"))
self.assertTrue(res[0].get("name") != None)
def test_cookie_from_others(self):
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=["dirsync:1:0:1"])
ctl = str(res.controls[0]).split(":")
cookie = ndr_unpack(drsblobs.ldapControlDirSyncCookie, base64.b64decode(str(ctl[4])))
cookie.blob.guid1 = misc.GUID("128a99bf-abcd-1234-abcd-1fb625e530db")
controls=["dirsync:1:0:0:%s" % base64.b64encode(ndr_pack(cookie))]
res = self.ldb_admin.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=controls)
class ExtendedDirsyncTests(SimpleDirsyncTests):
def test_dirsync_linkedattributes(self):
flag_incr_linked = 2147483648
self.ldb_simple = self.get_ldb_connection(self.simple_user, self.user_pass)
res = self.ldb_admin.search(self.base_dn,
attrs=["member"],
expression="(name=Administrators)",
controls=["dirsync:1:%d:1" % flag_incr_linked])
self.assertTrue(res[0].get("member;range=1-1") != None )
self.assertTrue(len(res[0].get("member;range=1-1")) > 0)
size = len(res[0].get("member;range=1-1"))
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "%d" % flag_incr_linked
ctl[3] = "10000"
control1 = str(":".join(ctl))
self.ldb_admin.add_remove_group_members("Administrators", [self.simple_user],
add_members_operation=True)
self.ldb_admin.add_remove_group_members("Administrators", [self.dirsync_user],
add_members_operation=True)
res = self.ldb_admin.search(self.base_dn,
expression="(name=Administrators)",
controls=[control1])
self.assertEqual(len(res[0].get("member;range=1-1")), 2)
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "%d" % flag_incr_linked
ctl[3] = "10000"
control1 = str(":".join(ctl))
# remove the user from the group
self.ldb_admin.add_remove_group_members("Administrators", [self.simple_user],
add_members_operation=False)
res = self.ldb_admin.search(self.base_dn,
expression="(name=Administrators)",
controls=[control1])
self.assertEqual(res[0].get("member;range=1-1"), None )
self.assertEqual(len(res[0].get("member;range=0-0")), 1)
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "%d" % flag_incr_linked
ctl[3] = "10000"
control2 = str(":".join(ctl))
self.ldb_admin.add_remove_group_members("Administrators", [self.dirsync_user],
add_members_operation=False)
res = self.ldb_admin.search(self.base_dn,
expression="(name=Administrators)",
controls=[control2])
self.assertEqual(res[0].get("member;range=1-1"), None )
self.assertEqual(len(res[0].get("member;range=0-0")), 1)
res = self.ldb_admin.search(self.base_dn,
expression="(name=Administrators)",
controls=[control1])
self.assertEqual(res[0].get("member;range=1-1"), None )
self.assertEqual(len(res[0].get("member;range=0-0")), 2)
def test_dirsync_deleted_items(self):
"""Check that dirsync returnd deleted objects too"""
# Let's create an OU
self.ldb_simple = self.get_ldb_connection(self.simple_user, self.user_pass)
ouname="OU=testou3,%s" % self.base_dn
self.ouname = ouname
self.ldb_admin.create_ou(ouname)
# Specify LDAP_DIRSYNC_OBJECT_SECURITY
res = self.ldb_simple.search(self.base_dn,
expression="(&(objectClass=organizationalUnit)(!(isDeleted=*)))",
controls=["dirsync:1:1:1"])
guid = None
for e in res:
if str(e["name"]) == "testou3":
guid = str(ndr_unpack(misc.GUID,e.get("objectGUID")[0]))
self.assertTrue(guid != None)
ctl = str(res.controls[0]).split(":")
ctl[1] = "1"
ctl[2] = "1"
ctl[3] = "10000"
control1 = str(":".join(ctl))
# So now delete the object and check that
# we can see the object but deleted when admin
# we just see the objectGUID when simple user
delete_force(self.ldb_admin, ouname)
res = self.ldb_simple.search(self.base_dn,
expression="(objectClass=organizationalUnit)",
controls=[control1])
self.assertEqual(len(res), 1)
guid2 = str(ndr_unpack(misc.GUID,res[0].get("objectGUID")[0]))
self.assertEqual(guid2, guid)
self.assertEqual(str(res[0].dn), "")
if not getattr(opts, "listtests", False):
lp = sambaopts.get_loadparm()
samba.tests.cmdline_credentials = credopts.get_credentials(lp)
TestProgram(module=__name__, opts=subunitopts)
|
apixandru/intellij-community
|
refs/heads/master
|
python/testData/quickFixes/PyMakeMethodStaticQuickFixTest/noSelf.py
|
83
|
__author__ = 'ktisha'
class Child(Base):
def __init__(self):
super(Child, self).__init__()
def <caret>f():
test = 1
|
Hybrid-Cloud/conveyor
|
refs/heads/master
|
conveyor/conveyorheat/engine/notification/__init__.py
|
1
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
# from conveyor.conveyorheat.common import messaging
SERVICE = 'orchestration'
INFO = 'INFO'
ERROR = 'ERROR'
notifier_opts = [
cfg.StrOpt('default_notification_level',
default=INFO,
help='Default notification level for outgoing notifications.'),
cfg.StrOpt('default_publisher_id',
help='Default publisher_id for outgoing notifications.'),
]
CONF = cfg.CONF
CONF.register_opts(notifier_opts)
def _get_default_publisher():
publisher_id = CONF.default_publisher_id
if publisher_id is None:
publisher_id = "%s.%s" % (SERVICE, CONF.host)
return publisher_id
def get_default_level():
return CONF.default_notification_level.upper()
def notify(context, event_type, level, body):
# client = messaging.get_notifier(_get_default_publisher())
client = None
method = getattr(client, level.lower())
method(context, "%s.%s" % (SERVICE, event_type), body)
def list_opts():
yield None, notifier_opts
|
samitnuk/talks_keeper
|
refs/heads/master
|
talks_keeper/models.py
|
1
|
from django.db import models
from django.urls import reverse
from colorfield.fields import ColorField
class Country(models.Model):
name = models.CharField(
max_length=256, db_index=True, verbose_name="Назва країни")
class Meta:
verbose_name = "Країна"
verbose_name_plural = "Країни"
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("country_detail", kwargs={"pk": self.pk})
class Company(models.Model):
short_name = models.CharField(
max_length=256, db_index=True, verbose_name="Коротка назва")
full_name = models.CharField(
max_length=256, verbose_name="Повна назва", blank=True)
country = models.ForeignKey(Country)
class Meta:
verbose_name = "Підприємство"
verbose_name_plural = "Підприємства"
def __str__(self):
return self.short_name
def get_absolute_url(self):
return reverse("company_detail", kwargs={"pk": self.pk})
@property
def talks(self):
return self.talk.all()
class Talk(models.Model):
date = models.DateTimeField(verbose_name="Дата")
company = models.ForeignKey(Company, related_name="talk")
source_info = models.TextField(verbose_name="Інфо про джерело")
talk_details = models.TextField(verbose_name="Інфо про розмову")
is_our_talk = models.BooleanField(default=False,
verbose_name="Ми звернулися")
class Meta:
verbose_name = "Розмова"
verbose_name_plural = "Розмови"
ordering = ['-date']
def __str__(self):
return "{} / {}".format(self.date, self.company)
def get_absolute_url(self):
return reverse("talk_detail", kwargs={"pk": self.pk})
def get_labels(self):
return self.label_set.all()
class Label(models.Model):
name = models.CharField(
max_length=100, db_index=True,
unique=True, verbose_name="Назва мітки")
color = ColorField(verbose_name="Колір")
talks = models.ManyToManyField(Talk, blank=True)
class Meta:
verbose_name = "Мітка"
verbose_name_plural = "Мітки"
ordering = ['name']
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("label_detail", kwargs={"pk": self.pk})
|
avadacatavra/servo
|
refs/heads/master
|
components/script/dom/bindings/codegen/parser/tests/test_callback_interface.py
|
142
|
import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
callback interface TestCallbackInterface {
attribute boolean bool;
};
""")
results = parser.finish()
iface = results[0]
harness.ok(iface.isCallback(), "Interface should be a callback")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface {
};
callback interface TestCallbackInterface : TestInterface {
attribute boolean bool;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow non-callback parent of callback interface")
parser = parser.reset()
threw = False
try:
parser.parse("""
interface TestInterface : TestCallbackInterface {
};
callback interface TestCallbackInterface {
attribute boolean bool;
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow callback parent of non-callback interface")
parser = parser.reset()
parser.parse("""
callback interface TestCallbackInterface1 {
void foo();
};
callback interface TestCallbackInterface2 {
void foo(DOMString arg);
void foo(TestCallbackInterface1 arg);
};
callback interface TestCallbackInterface3 {
void foo(DOMString arg);
void foo(TestCallbackInterface1 arg);
static void bar();
};
callback interface TestCallbackInterface4 {
void foo(DOMString arg);
void foo(TestCallbackInterface1 arg);
static void bar();
const long baz = 5;
};
callback interface TestCallbackInterface5 {
static attribute boolean bool;
void foo();
};
callback interface TestCallbackInterface6 {
void foo(DOMString arg);
void foo(TestCallbackInterface1 arg);
void bar();
};
callback interface TestCallbackInterface7 {
static attribute boolean bool;
};
callback interface TestCallbackInterface8 {
attribute boolean bool;
};
callback interface TestCallbackInterface9 : TestCallbackInterface1 {
void foo();
};
callback interface TestCallbackInterface10 : TestCallbackInterface1 {
void bar();
};
""")
results = parser.finish()
for (i, iface) in enumerate(results):
harness.check(iface.isSingleOperationInterface(), i < 4,
"Interface %s should be a single operation interface" %
iface.identifier.name)
|
unasuke/AtCoder
|
refs/heads/master
|
Beginner/13/13-A.py
|
1
|
#AtCoder Beginner 13 A
str = raw_input()
if str == 'A':
print "1"
elif str == 'B':
print "2"
elif str == 'C':
print "3"
elif str == 'D':
print "4"
elif str == 'E':
print "5"
|
lixun910/pysal
|
refs/heads/master
|
pysal/lib/weights/tests/test_spintW.py
|
3
|
import unittest
import numpy as np
from ..spintW import ODW, netW, mat2L, vecW
from ..util import lat2W
class TestODWeights(unittest.TestCase):
def setUp(self):
self.O = lat2W(2,2)
self.D = lat2W(2,2)
self.ODW = np.array(
[[ 0. , 0. , 0. , 0. , 0. , 0.25, 0.25, 0. , 0. ,
0.25, 0.25, 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0.25, 0. , 0. , 0.25, 0.25,
0. , 0. , 0.25, 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0.25, 0. , 0. , 0.25, 0.25,
0. , 0. , 0.25, 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0.25, 0.25, 0. , 0. ,
0.25, 0.25, 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0.25, 0.25, 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0.25, 0.25, 0. ],
[0.25, 0. , 0. , 0.25, 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25, 0. , 0. , 0.25],
[ 0.25, 0. , 0. , 0.25, 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25, 0. , 0. , 0.25],
[ 0. , 0.25, 0.25, 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0.25, 0.25, 0. ],
[ 0. , 0.25, 0.25, 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0.25, 0.25, 0. ],
[ 0.25, 0. , 0. , 0.25, 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25, 0. , 0. , 0.25],
[ 0.25, 0. , 0. , 0.25, 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0.25, 0. , 0. , 0.25],
[ 0. , 0.25, 0.25, 0. , 0. , 0. , 0. , 0. , 0. ,
0. , 0. , 0. , 0. , 0.25, 0.25, 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0.25, 0.25, 0. , 0. ,
0.25, 0.25, 0. , 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0.25, 0. , 0. , 0.25, 0.25,
0. , 0. , 0.25, 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0.25, 0. , 0. , 0.25, 0.25,
0. , 0. , 0.25, 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. , 0. , 0.25, 0.25, 0. , 0. ,
0.25, 0.25, 0. , 0. , 0. , 0. , 0. ]])
def test_ODW_full(self):
W = ODW(self.O, self.D)
np.testing.assert_allclose(self.ODW, W.full()[0])
class TestNetW(unittest.TestCase):
def setUp(self):
self.link_list = [('a', 'b'), ('a', 'c'), ('a', 'd'),
('b', 'a'), ('b', 'c'), ('b', 'd'),
('c', 'a'), ('c', 'b'), ('c', 'd'),
('d', 'a'), ('d', 'b'), ('d', 'c')]
self._all = np.array([
[ 0., 1., 1., 1., 1., 1., 1., 1., 0., 1., 1., 0.],
[ 1., 0., 1., 1., 1., 0., 1., 1., 1., 1., 0., 1.],
[ 1., 1., 0., 1., 0., 1., 1., 0., 1., 1., 1., 1.],
[ 1., 1., 1., 0., 1., 1., 1., 1., 0., 1., 1., 0.],
[ 1., 1., 0., 1., 0., 1., 1., 1., 1., 0., 1., 1.],
[ 1., 0., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1.],
[ 1., 1., 1., 1., 1., 0., 0., 1., 1., 1., 0., 1.],
[ 1., 1., 0., 1., 1., 1., 1., 0., 1., 0., 1., 1.],
[ 0., 1., 1., 0., 1., 1., 1., 1., 0., 1., 1., 1.],
[ 1., 1., 1., 1., 0., 1., 1., 0., 1., 0., 1., 1.],
[ 1., 0., 1., 1., 1., 1., 0., 1., 1., 1., 0., 1.],
[ 0., 1., 1., 0., 1., 1., 1., 1., 1., 1., 1., 0.]])
self.O = np.array([
[ 0., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 1., 0., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 1.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0.]])
self.D = np.array([
[ 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 1., 0.],
[ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 1.],
[ 0., 0., 0., 0., 0., 1., 0., 0., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 1., 0., 0., 1., 0., 0.],
[ 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.],
[ 0., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0., 0.],
[ 0., 0., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.],
[ 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0.],
[ 0., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
[ 1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.]])
self.OD = np.array([
[ 0., 1., 1., 0., 0., 0., 0., 1., 0., 0., 1., 0.],
[ 1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 1.],
[ 1., 1., 0., 0., 0., 1., 0., 0., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 1., 1., 1., 0., 0., 1., 0., 0.],
[ 0., 1., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1.],
[ 0., 0., 1., 1., 1., 0., 0., 0., 1., 0., 0., 0.],
[ 0., 0., 0., 1., 0., 0., 0., 1., 1., 1., 0., 0.],
[ 1., 0., 0., 0., 0., 0., 1., 0., 1., 0., 1., 0.],
[ 0., 0., 1., 0., 0., 1., 1., 1., 0., 0., 0., 0.],
[ 0., 0., 0., 1., 0., 0., 1., 0., 0., 0., 1., 1.],
[ 1., 0., 0., 0., 0., 0., 0., 1., 0., 1., 0., 1.],
[ 0., 1., 0., 0., 1., 0., 0., 0., 0., 1., 1., 0.]])
self.C = np.array([
[ 0., 0., 0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 1., 1., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1.],
[ 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 1., 1., 1., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1.],
[ 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 1.],
[ 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 1., 1., 1., 0., 0., 0.]])
self.edge_list = [(0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
def test_netOD(self):
netW_OD = netW(self.link_list, share='OD')
np.testing.assert_allclose(netW_OD.full()[0], self.OD)
def test_netO(self):
netW_O = netW(self.link_list, share='O')
np.testing.assert_allclose(netW_O.full()[0], self.O)
def test_netD(self):
netW_D = netW(self.link_list, share='D')
np.testing.assert_allclose(netW_D.full()[0], self.D)
def test_netC(self):
netW_C = netW(self.link_list, share='C')
np.testing.assert_allclose(netW_C.full()[0], self.C)
def test_net_all(self):
netW_all = netW(self.link_list, share='A')
np.testing.assert_allclose(netW_all.full()[0], self._all)
def test_mat2L(self):
mat = np.array([[0,1,1],[1,0,1],[1,1,0]])
edge_list = mat2L(mat)
self.assertEqual(edge_list, self.edge_list)
class TestVecW(unittest.TestCase):
def setUp(self):
self.origin_x = np.array([2,6,9,2])
self.origin_y = np.array([4,8,2,5])
self.dest_x = np.array([9,1,6,3])
self.dest_y = np.array([3,6,2,7])
self.continuous = np.array([
[ 0. , 0.09759001, 0.12598816, 0.13736056],
[ 0.09759001, 0. , 0.10783277, 0.18257419],
[ 0.12598816, 0.10783277, 0. , 0.10425721],
[ 0.13736056, 0.18257419, 0.10425721, 0. ]])
def test_vecW(self):
W = vecW(self.origin_x, self.origin_y, self.dest_x, self.dest_y,
threshold=np.inf, binary=False)
np.testing.assert_allclose(self.continuous, W.full()[0])
|
VitalPet/odoo
|
refs/heads/7.0
|
addons/document_page/wizard/document_page_show_diff.py
|
59
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import base64
class showdiff(osv.osv_memory):
""" Disp[ay Difference for History """
_name = 'wizard.document.page.history.show_diff'
def get_diff(self, cr, uid, context=None):
if context is None:
context = {}
history = self.pool.get('document.page.history')
ids = context.get('active_ids', [])
diff = ""
if len(ids) == 2:
if ids[0] > ids[1]:
diff = history.getDiff(cr, uid, ids[1], ids[0])
else:
diff = history.getDiff(cr, uid, ids[0], ids[1])
elif len(ids) == 1:
old = history.browse(cr, uid, ids[0])
nids = history.search(cr, uid, [('page_id', '=', old.page_id.id)])
nids.sort()
diff = history.getDiff(cr, uid, ids[0], nids[-1])
else:
raise osv.except_osv(_('Warning!'), _('You need to select minimum one or maximum two history revisions!'))
return diff
_columns = {
'diff': fields.text('Diff', readonly=True),
}
_defaults = {
'diff': get_diff
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
suyashphadtare/test
|
refs/heads/develop
|
erpnext/hr/report/monthly_attendance_sheet/monthly_attendance_sheet.py
|
25
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import cstr, cint
from frappe import msgprint, _
def execute(filters=None):
if not filters: filters = {}
conditions, filters = get_conditions(filters)
columns = get_columns(filters)
att_map = get_attendance_list(conditions, filters)
emp_map = get_employee_details()
data = []
for emp in sorted(att_map):
emp_det = emp_map.get(emp)
if not emp_det:
continue
row = [emp, emp_det.employee_name, emp_det.branch, emp_det.department, emp_det.designation,
emp_det.company]
total_p = total_a = 0.0
for day in range(filters["total_days_in_month"]):
status = att_map.get(emp).get(day + 1, "Absent")
status_map = {"Present": "P", "Absent": "A", "Half Day": "HD"}
row.append(status_map[status])
if status == "Present":
total_p += 1
elif status == "Absent":
total_a += 1
elif status == "Half Day":
total_p += 0.5
total_a += 0.5
row += [total_p, total_a]
data.append(row)
return columns, data
def get_columns(filters):
columns = [
_("Employee") + ":Link/Employee:120", _("Employee Name") + "::140", _("Branch")+ ":Link/Branch:120",
_("Department") + ":Link/Department:120", _("Designation") + ":Link/Designation:120",
_("Company") + ":Link/Company:120"
]
for day in range(filters["total_days_in_month"]):
columns.append(cstr(day+1) +"::20")
columns += [_("Total Present") + ":Float:80", _("Total Absent") + ":Float:80"]
return columns
def get_attendance_list(conditions, filters):
attendance_list = frappe.db.sql("""select employee, day(att_date) as day_of_month,
status from tabAttendance where docstatus = 1 %s order by employee, att_date""" %
conditions, filters, as_dict=1)
att_map = {}
for d in attendance_list:
att_map.setdefault(d.employee, frappe._dict()).setdefault(d.day_of_month, "")
att_map[d.employee][d.day_of_month] = d.status
return att_map
def get_conditions(filters):
if not (filters.get("month") and filters.get("fiscal_year")):
msgprint(_("Please select month and year"), raise_exception=1)
filters["month"] = ["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov",
"Dec"].index(filters["month"]) + 1
from calendar import monthrange
filters["total_days_in_month"] = monthrange(cint(filters["fiscal_year"].split("-")[-1]),
filters["month"])[1]
conditions = " and month(att_date) = %(month)s and fiscal_year = %(fiscal_year)s"
if filters.get("company"): conditions += " and company = %(company)s"
if filters.get("employee"): conditions += " and employee = %(employee)s"
return conditions, filters
def get_employee_details():
emp_map = frappe._dict()
for d in frappe.db.sql("""select name, employee_name, designation,
department, branch, company
from tabEmployee where docstatus < 2
and status = 'Active'""", as_dict=1):
emp_map.setdefault(d.name, d)
return emp_map
|
jose36/plugin.video.Jmdl2
|
refs/heads/master
|
servers/movreel.py
|
42
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Conector para movreel
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os
from core import scrapertools
from core import logger
from core import config
from core import jsunpack
def test_video_exists( page_url ):
return True,""
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
logger.info("[movreel.py] get_video_url(page_url='%s')" % page_url)
video_urls = []
data = scrapertools.cache_page(page_url)
op = scrapertools.get_match(data,'<input type="hidden" name="op" value="([^"]+)">')
file_code = scrapertools.get_match(data,'<input type="hidden" name="file_code" value="([^"]+)">')
w = scrapertools.get_match(data,'<input type="hidden" name="w" value="([^"]+)">')
h = scrapertools.get_match(data,'<input type="hidden" name="h" value="([^"]+)">')
method_free = scrapertools.get_match(data,'<input type="submit" name="method_free" value="([^"]+)">')
#op=video_embed&file_code=yrwo5dotp1xy&w=600&h=400&method_free=Close+Ad+and+Watch+as+Free+User
post = urllib.urlencode( {"op":op,"file_code":file_code,"w":w,"h":h,"method_free":method_free} )
data = scrapertools.cache_page(page_url,post=post)
data = jsunpack.unpack(data)
logger.info("data="+data)
media_url = scrapertools.get_match(data,'file\:"([^"]+)"')
video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [movreel]",media_url])
for video_url in video_urls:
logger.info("[movreel.py] %s - %s" % (video_url[0],video_url[1]))
return video_urls
# Encuentra vídeos del servidor en el texto pasado
def find_videos(data):
encontrados = set()
devuelve = []
#http://movreel.com/embed/l8ondvel8ynb
data = urllib.unquote(data)
patronvideos = 'movreel.com/embed/([a-z0-9]+)'
logger.info("[movreel.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[movreel]"
url = "http://movreel.com/embed/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'movreel' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
#http%3A%2F%2Fwww.movreel.com%2Fuau47ktbg4dx
data = urllib.unquote(data)
patronvideos = 'movreel.com/([a-z0-9]+)'
logger.info("[movreel.py] find_videos #"+patronvideos+"#")
matches = re.compile(patronvideos,re.DOTALL).findall(data)
for match in matches:
titulo = "[movreel]"
url = "http://movreel.com/embed/"+match
if url not in encontrados:
logger.info(" url="+url)
devuelve.append( [ titulo , url , 'movreel' ] )
encontrados.add(url)
else:
logger.info(" url duplicada="+url)
return devuelve
|
mchughmk/react-django
|
refs/heads/master
|
backend/ReactDjango/wsgi.py
|
1
|
"""
WSGI config for ReactDjango project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ReactDjango.settings")
application = get_wsgi_application()
|
EricCline/CEM_inc
|
refs/heads/master
|
env/lib/python2.7/site-packages/django/contrib/auth/forms.py
|
59
|
from __future__ import unicode_literals
from django import forms
from django.forms.util import flatatt
from django.template import loader
from django.utils.datastructures import SortedDict
from django.utils.html import format_html, format_html_join
from django.utils.http import int_to_base36
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth import authenticate, get_user_model
from django.contrib.auth.models import User
from django.contrib.auth.hashers import UNUSABLE_PASSWORD, identify_hasher
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.models import get_current_site
UNMASKED_DIGITS_TO_SHOW = 6
mask_password = lambda p: "%s%s" % (p[:UNMASKED_DIGITS_TO_SHOW], "*" * max(len(p) - UNMASKED_DIGITS_TO_SHOW, 0))
class ReadOnlyPasswordHashWidget(forms.Widget):
def render(self, name, value, attrs):
encoded = value
final_attrs = self.build_attrs(attrs)
if not encoded or encoded == UNUSABLE_PASSWORD:
summary = mark_safe("<strong>%s</strong>" % ugettext("No password set."))
else:
try:
hasher = identify_hasher(encoded)
except ValueError:
summary = mark_safe("<strong>%s</strong>" % ugettext(
"Invalid password format or unknown hashing algorithm."))
else:
summary = format_html_join('',
"<strong>{0}</strong>: {1} ",
((ugettext(key), value)
for key, value in hasher.safe_summary(encoded).items())
)
return format_html("<div{0}>{1}</div>", flatatt(final_attrs), summary)
class ReadOnlyPasswordHashField(forms.Field):
widget = ReadOnlyPasswordHashWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
super(ReadOnlyPasswordHashField, self).__init__(*args, **kwargs)
def bound_data(self, data, initial):
# Always return initial because the widget doesn't
# render an input field.
return initial
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
username = forms.RegexField(
label=_("Username"), max_length=30, regex=r"^[\w.@+-]+$",
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
class Meta:
model = User
def __init__(self, *args, **kwargs):
super(UserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial["password"]
class AuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = forms.CharField(max_length=254)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
error_messages = {
'invalid_login': _("Please enter a correct %(username)s and password. "
"Note that both fields may be case-sensitive."),
'no_cookies': _("Your Web browser doesn't appear to have cookies "
"enabled. Cookies are required for logging in."),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(AuthenticationForm, self).__init__(*args, **kwargs)
# Set the label for the "username" field.
UserModel = get_user_model()
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if not self.fields['username'].label:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
self.user_cache = authenticate(username=username,
password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'] % {
'username': self.username_field.verbose_name
})
elif not self.user_cache.is_active:
raise forms.ValidationError(self.error_messages['inactive'])
self.check_for_test_cookie()
return self.cleaned_data
def check_for_test_cookie(self):
if self.request and not self.request.session.test_cookie_worked():
raise forms.ValidationError(self.error_messages['no_cookies'])
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
class PasswordResetForm(forms.Form):
error_messages = {
'unknown': _("That email address doesn't have an associated "
"user account. Are you sure you've registered?"),
'unusable': _("The user account associated with this email "
"address cannot reset the password."),
}
email = forms.EmailField(label=_("Email"), max_length=254)
def clean_email(self):
"""
Validates that an active user exists with the given email address.
"""
UserModel = get_user_model()
email = self.cleaned_data["email"]
self.users_cache = UserModel._default_manager.filter(email__iexact=email)
if not len(self.users_cache):
raise forms.ValidationError(self.error_messages['unknown'])
if not any(user.is_active for user in self.users_cache):
# none of the filtered users are active
raise forms.ValidationError(self.error_messages['unknown'])
if any((user.password == UNUSABLE_PASSWORD)
for user in self.users_cache):
raise forms.ValidationError(self.error_messages['unusable'])
return email
def save(self, domain_override=None,
subject_template_name='registration/password_reset_subject.txt',
email_template_name='registration/password_reset_email.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None):
"""
Generates a one-use only link for resetting password and sends to the
user.
"""
from django.core.mail import send_mail
for user in self.users_cache:
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
c = {
'email': user.email,
'domain': domain,
'site_name': site_name,
'uid': int_to_base36(user.pk),
'user': user,
'token': token_generator.make_token(user),
'protocol': use_https and 'https' or 'http',
}
subject = loader.render_to_string(subject_template_name, c)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
email = loader.render_to_string(email_template_name, c)
send_mail(subject, email, from_email, [user.email])
class SetPasswordForm(forms.Form):
"""
A form that lets a user change set his/her password without entering the
old password
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
new_password1 = forms.CharField(label=_("New password"),
widget=forms.PasswordInput)
new_password2 = forms.CharField(label=_("New password confirmation"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(SetPasswordForm, self).__init__(*args, **kwargs)
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
self.user.set_password(self.cleaned_data['new_password1'])
if commit:
self.user.save()
return self.user
class PasswordChangeForm(SetPasswordForm):
"""
A form that lets a user change his/her password by entering
their old password.
"""
error_messages = dict(SetPasswordForm.error_messages, **{
'password_incorrect': _("Your old password was entered incorrectly. "
"Please enter it again."),
})
old_password = forms.CharField(label=_("Old password"),
widget=forms.PasswordInput)
def clean_old_password(self):
"""
Validates that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'])
return old_password
PasswordChangeForm.base_fields = SortedDict([
(k, PasswordChangeForm.base_fields[k])
for k in ['old_password', 'new_password1', 'new_password2']
])
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password (again)"),
widget=forms.PasswordInput)
def __init__(self, user, *args, **kwargs):
self.user = user
super(AdminPasswordChangeForm, self).__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'])
return password2
def save(self, commit=True):
"""
Saves the new password.
"""
self.user.set_password(self.cleaned_data["password1"])
if commit:
self.user.save()
return self.user
|
okanasik/JdeRobot
|
refs/heads/master
|
src/tools/visualStates_py/gui/state.py
|
2
|
'''
Copyright (C) 1997-2017 JDERobot Developers Team
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Library General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
Authors : Okan Asik (asik.okan@gmail.com)
'''
from gui.guistate import StateGraphicsItem
from gui.transition import Transition
from xml.dom.minidom import Node
class State:
def __init__(self, id, name, initial, parent=None):
self.parent = parent
self.id = id
self.name = name
self.code = ''
self.timeStepDuration = 100
self.x = 0
self.y = 0
self.initial = initial
self.children = []
self.originTransitions = []
self.destTransitions = []
self.graphicsItem = None
self.isRunning = False
def setPos(self, x, y):
self.x = x
self.y = y
def addChild(self, child):
if child not in self.children:
self.children.append(child)
def removeChild(self, child):
if child in self.children:
self.children.remove(child)
def getChildren(self):
return self.children
def getOriginTransitions(self):
return self.originTransitions
def addOriginTransition(self, tran):
if tran not in self.originTransitions:
self.originTransitions.append(tran)
def removeOriginTransition(self, tran):
if tran in self.originTransitions:
self.originTransitions.remove(tran)
def getDestTransitions(self):
return self.destTransitions
def addDestTransition(self, tran):
if tran not in self.destTransitions:
self.destTransitions.append(tran)
def removeDestTransition(self, tran):
if tran in self.destTransitions:
self.destTransitions.remove(tran)
def getGraphicsItem(self):
if self.graphicsItem == None:
self.graphicsItem = StateGraphicsItem(self)
self.graphicsItem.posChanged.connect(self.posChanged)
self.graphicsItem.setRunning(self.isRunning)
return self.graphicsItem
def resetGraphicsItem(self):
self.graphicsItem = None
def posChanged(self, stateItem):
scenePos = stateItem.scenePos()
self.x = scenePos.x()
self.y = scenePos.y()
# creates a new copy of the state without children and transitions
def getNewCopy(self):
copy = State(self.id, self.name, False, self.parent)
copy.code = self.code
copy.x = self.x
copy.y = self.y
return copy
def parseElement(self, elementName, parentElement):
elements = parentElement.getElementsByTagName(elementName)
if len(elements) > 0:
if len(elements[0].childNodes) > 0:
return elements[0].childNodes[0].nodeValue
return ''
def parse(self, stateElement):
# parse attributes of the state
for (name, value) in stateElement.attributes.items():
if name == 'id':
self.id = int(value)
elif name == 'initial':
self.initial = (value == 'True')
self.name = stateElement.getElementsByTagName('name')[0].childNodes[0].nodeValue
# print('read name:' + self.name)
self.x = float(stateElement.getElementsByTagName('posx')[0].childNodes[0].nodeValue)
self.y = float(stateElement.getElementsByTagName('posy')[0].childNodes[0].nodeValue)
self.code = self.parseElement('code', stateElement)
self.timeStepDuration = int((self.parseElement('timestep', stateElement)))
# recursive child state parsing
allChildTransitions = []
statesById = {}
stateTransitions = []
for childNode in stateElement.childNodes:
if childNode.nodeType == Node.ELEMENT_NODE:
if childNode.tagName == 'state':
childState = State(0, 'state', False, self)
transitionNodes = childState.parse(childNode)
# print('add child:' + childState.name + ' to parent:' + self.name)
self.addChild(childState)
statesById[childState.id] = childState
allChildTransitions = allChildTransitions + transitionNodes
elif childNode.tagName == 'transition':
stateTransitions.append(childNode)
# wire transitions with the states after all the child states are parsed
for tranNode in allChildTransitions:
transition = Transition(0, 'transition')
transition.parse(tranNode, statesById)
# return transitions of the state to be able to wire after all states are created
return stateTransitions
def createElement(self, doc, parentElement=None):
stateElement = doc.createElement('state')
stateElement.setAttribute('initial', str(self.initial))
stateElement.setAttribute('id', str(self.id))
posxElement = doc.createElement('posx')
posxElement.appendChild(doc.createTextNode(str(self.x)))
posyElement = doc.createElement('posy')
posyElement.appendChild(doc.createTextNode(str(self.y)))
stateElement.appendChild(posxElement)
stateElement.appendChild(posyElement)
nameElement = doc.createElement('name')
nameElement.appendChild(doc.createTextNode(self.name))
stateElement.appendChild(nameElement)
codeElement = doc.createElement('code')
codeElement.appendChild(doc.createTextNode(self.code))
stateElement.appendChild(codeElement)
timeElement = doc.createElement('timestep')
timeElement.appendChild(doc.createTextNode(str(self.timeStepDuration)))
stateElement.appendChild(timeElement)
# create transition elements
for tran in self.getOriginTransitions():
tranElement = tran.createElement(doc)
stateElement.appendChild(tranElement)
for child in self.getChildren():
child.createElement(doc, stateElement)
if parentElement is not None:
parentElement.appendChild(stateElement)
return stateElement
def getCode(self):
return self.code
def setCode(self, code):
self.code = code
def getTimeStep(self):
return self.timeStepDuration
def setTimeStep(self, timestep):
self.timeStepDuration = timestep
def getInitialChild(self):
for child in self.getChildren():
if child.initial:
return child
return None
def setInitial(self, initial):
self.initial = initial
def getChildrenTransitions(self):
childTransitions = []
for child in self.getChildren():
childTransitions += child.getOriginTransitions()
return childTransitions
def setRunning(self, status):
self.isRunning = status
if self.graphicsItem is not None:
self.graphicsItem.setRunning(self.isRunning)
if not self.isRunning:
for child in self.getChildren():
child.setRunning(self.isRunning)
|
fabioz/PyDev.Debugger
|
refs/heads/master
|
third_party/pep8/lib2to3/lib2to3/pgen2/pgen.py
|
321
|
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
# Pgen imports
from . import grammar, token, tokenize
class PgenGrammar(grammar.Grammar):
pass
class ParserGenerator(object):
def __init__(self, filename, stream=None):
close_stream = None
if stream is None:
stream = open(filename)
close_stream = stream.close
self.filename = filename
self.stream = stream
self.generator = tokenize.generate_tokens(stream.readline)
self.gettoken() # Initialize lookahead
self.dfas, self.startsymbol = self.parse()
if close_stream is not None:
close_stream()
self.first = {} # map from symbol name to set of tokens
self.addfirstsets()
def make_grammar(self):
c = PgenGrammar()
names = self.dfas.keys()
names.sort()
names.remove(self.startsymbol)
names.insert(0, self.startsymbol)
for name in names:
i = 256 + len(c.symbol2number)
c.symbol2number[name] = i
c.number2symbol[i] = name
for name in names:
dfa = self.dfas[name]
states = []
for state in dfa:
arcs = []
for label, next in state.arcs.iteritems():
arcs.append((self.make_label(c, label), dfa.index(next)))
if state.isfinal:
arcs.append((0, dfa.index(state)))
states.append(arcs)
c.states.append(states)
c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
c.start = c.symbol2number[self.startsymbol]
return c
def make_first(self, c, name):
rawfirst = self.first[name]
first = {}
for label in rawfirst:
ilabel = self.make_label(c, label)
##assert ilabel not in first # XXX failed on <> ... !=
first[ilabel] = 1
return first
def make_label(self, c, label):
# XXX Maybe this should be a method on a subclass of converter?
ilabel = len(c.labels)
if label[0].isalpha():
# Either a symbol name or a named token
if label in c.symbol2number:
# A symbol name (a non-terminal)
if label in c.symbol2label:
return c.symbol2label[label]
else:
c.labels.append((c.symbol2number[label], None))
c.symbol2label[label] = ilabel
return ilabel
else:
# A named token (NAME, NUMBER, STRING)
itoken = getattr(token, label, None)
assert isinstance(itoken, int), label
assert itoken in token.tok_name, label
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
else:
# Either a keyword or an operator
assert label[0] in ('"', "'"), label
value = eval(label)
if value[0].isalpha():
# A keyword
if value in c.keywords:
return c.keywords[value]
else:
c.labels.append((token.NAME, value))
c.keywords[value] = ilabel
return ilabel
else:
# An operator (any non-numeric token)
itoken = grammar.opmap[value] # Fails if unknown token
if itoken in c.tokens:
return c.tokens[itoken]
else:
c.labels.append((itoken, None))
c.tokens[itoken] = ilabel
return ilabel
def addfirstsets(self):
names = self.dfas.keys()
names.sort()
for name in names:
if name not in self.first:
self.calcfirst(name)
#print name, self.first[name].keys()
def calcfirst(self, name):
dfa = self.dfas[name]
self.first[name] = None # dummy to detect left recursion
state = dfa[0]
totalset = {}
overlapcheck = {}
for label, next in state.arcs.iteritems():
if label in self.dfas:
if label in self.first:
fset = self.first[label]
if fset is None:
raise ValueError("recursion for rule %r" % name)
else:
self.calcfirst(label)
fset = self.first[label]
totalset.update(fset)
overlapcheck[label] = fset
else:
totalset[label] = 1
overlapcheck[label] = {label: 1}
inverse = {}
for label, itsfirst in overlapcheck.iteritems():
for symbol in itsfirst:
if symbol in inverse:
raise ValueError("rule %s is ambiguous; %s is in the"
" first sets of %s as well as %s" %
(name, symbol, label, inverse[symbol]))
inverse[symbol] = label
self.first[name] = totalset
def parse(self):
dfas = {}
startsymbol = None
# MSTART: (NEWLINE | RULE)* ENDMARKER
while self.type != token.ENDMARKER:
while self.type == token.NEWLINE:
self.gettoken()
# RULE: NAME ':' RHS NEWLINE
name = self.expect(token.NAME)
self.expect(token.OP, ":")
a, z = self.parse_rhs()
self.expect(token.NEWLINE)
#self.dump_nfa(name, a, z)
dfa = self.make_dfa(a, z)
#self.dump_dfa(name, dfa)
oldlen = len(dfa)
self.simplify_dfa(dfa)
newlen = len(dfa)
dfas[name] = dfa
#print name, oldlen, newlen
if startsymbol is None:
startsymbol = name
return dfas, startsymbol
def make_dfa(self, start, finish):
# To turn an NFA into a DFA, we define the states of the DFA
# to correspond to *sets* of states of the NFA. Then do some
# state reduction. Let's represent sets as dicts with 1 for
# values.
assert isinstance(start, NFAState)
assert isinstance(finish, NFAState)
def closure(state):
base = {}
addclosure(state, base)
return base
def addclosure(state, base):
assert isinstance(state, NFAState)
if state in base:
return
base[state] = 1
for label, next in state.arcs:
if label is None:
addclosure(next, base)
states = [DFAState(closure(start), finish)]
for state in states: # NB states grows while we're iterating
arcs = {}
for nfastate in state.nfaset:
for label, next in nfastate.arcs:
if label is not None:
addclosure(next, arcs.setdefault(label, {}))
for label, nfaset in arcs.iteritems():
for st in states:
if st.nfaset == nfaset:
break
else:
st = DFAState(nfaset, finish)
states.append(st)
state.addarc(st, label)
return states # List of DFAState instances; first one is start
def dump_nfa(self, name, start, finish):
print "Dump of NFA for", name
todo = [start]
for i, state in enumerate(todo):
print " State", i, state is finish and "(final)" or ""
for label, next in state.arcs:
if next in todo:
j = todo.index(next)
else:
j = len(todo)
todo.append(next)
if label is None:
print " -> %d" % j
else:
print " %s -> %d" % (label, j)
def dump_dfa(self, name, dfa):
print "Dump of DFA for", name
for i, state in enumerate(dfa):
print " State", i, state.isfinal and "(final)" or ""
for label, next in state.arcs.iteritems():
print " %s -> %d" % (label, dfa.index(next))
def simplify_dfa(self, dfa):
# This is not theoretically optimal, but works well enough.
# Algorithm: repeatedly look for two states that have the same
# set of arcs (same labels pointing to the same nodes) and
# unify them, until things stop changing.
# dfa is a list of DFAState instances
changes = True
while changes:
changes = False
for i, state_i in enumerate(dfa):
for j in range(i+1, len(dfa)):
state_j = dfa[j]
if state_i == state_j:
#print " unify", i, j
del dfa[j]
for state in dfa:
state.unifystate(state_j, state_i)
changes = True
break
def parse_rhs(self):
# RHS: ALT ('|' ALT)*
a, z = self.parse_alt()
if self.value != "|":
return a, z
else:
aa = NFAState()
zz = NFAState()
aa.addarc(a)
z.addarc(zz)
while self.value == "|":
self.gettoken()
a, z = self.parse_alt()
aa.addarc(a)
z.addarc(zz)
return aa, zz
def parse_alt(self):
# ALT: ITEM+
a, b = self.parse_item()
while (self.value in ("(", "[") or
self.type in (token.NAME, token.STRING)):
c, d = self.parse_item()
b.addarc(c)
b = d
return a, b
def parse_item(self):
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
if self.value == "[":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, "]")
a.addarc(z)
return a, z
else:
a, z = self.parse_atom()
value = self.value
if value not in ("+", "*"):
return a, z
self.gettoken()
z.addarc(a)
if value == "+":
return a, z
else:
return a, a
def parse_atom(self):
# ATOM: '(' RHS ')' | NAME | STRING
if self.value == "(":
self.gettoken()
a, z = self.parse_rhs()
self.expect(token.OP, ")")
return a, z
elif self.type in (token.NAME, token.STRING):
a = NFAState()
z = NFAState()
a.addarc(z, self.value)
self.gettoken()
return a, z
else:
self.raise_error("expected (...) or NAME or STRING, got %s/%s",
self.type, self.value)
def expect(self, type, value=None):
if self.type != type or (value is not None and self.value != value):
self.raise_error("expected %s/%s, got %s/%s",
type, value, self.type, self.value)
value = self.value
self.gettoken()
return value
def gettoken(self):
tup = self.generator.next()
while tup[0] in (tokenize.COMMENT, tokenize.NL):
tup = self.generator.next()
self.type, self.value, self.begin, self.end, self.line = tup
#print token.tok_name[self.type], repr(self.value)
def raise_error(self, msg, *args):
if args:
try:
msg = msg % args
except:
msg = " ".join([msg] + map(str, args))
raise SyntaxError(msg, (self.filename, self.end[0],
self.end[1], self.line))
class NFAState(object):
def __init__(self):
self.arcs = [] # list of (label, NFAState) pairs
def addarc(self, next, label=None):
assert label is None or isinstance(label, str)
assert isinstance(next, NFAState)
self.arcs.append((label, next))
class DFAState(object):
def __init__(self, nfaset, final):
assert isinstance(nfaset, dict)
assert isinstance(iter(nfaset).next(), NFAState)
assert isinstance(final, NFAState)
self.nfaset = nfaset
self.isfinal = final in nfaset
self.arcs = {} # map from label to DFAState
def addarc(self, next, label):
assert isinstance(label, str)
assert label not in self.arcs
assert isinstance(next, DFAState)
self.arcs[label] = next
def unifystate(self, old, new):
for label, next in self.arcs.iteritems():
if next is old:
self.arcs[label] = new
def __eq__(self, other):
# Equality test -- ignore the nfaset instance variable
assert isinstance(other, DFAState)
if self.isfinal != other.isfinal:
return False
# Can't just return self.arcs == other.arcs, because that
# would invoke this method recursively, with cycles...
if len(self.arcs) != len(other.arcs):
return False
for label, next in self.arcs.iteritems():
if next is not other.arcs.get(label):
return False
return True
__hash__ = None # For Py3 compatibility.
def generate_grammar(filename="Grammar.txt"):
p = ParserGenerator(filename)
return p.make_grammar()
|
slarosa/QGIS
|
refs/heads/master
|
python/plugins/sextante/algs/ftools/BasicStatisticsNumbers.py
|
1
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
BasicStatisticsNumbers.py
---------------------
Date : September 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'September 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import math
from PyQt4.QtCore import *
from qgis.core import *
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.core.QGisLayers import QGisLayers
from sextante.parameters.ParameterVector import ParameterVector
from sextante.parameters.ParameterTableField import ParameterTableField
from sextante.outputs.OutputHTML import OutputHTML
from sextante.outputs.OutputNumber import OutputNumber
from sextante.algs.ftools import FToolsUtils as utils
class BasicStatisticsNumbers(GeoAlgorithm):
INPUT_LAYER = "INPUT_LAYER"
FIELD_NAME = "FIELD_NAME"
OUTPUT_HTML_FILE = "OUTPUT_HTML_FILE"
CV = "CV"
MIN = "MIN"
MAX = "MAX"
SUM = "SUM"
MEAN = "MEAN"
COUNT = "COUNT"
STD_DEV = "STD_DEV"
RANGE = "RANGE"
MEDIAN = "MEDIAN"
UNIQUE = "UNIQUE"
#===========================================================================
# def getIcon(self):
# return QtGui.QIcon(os.path.dirname(__file__) + "/icons/basic_statistics.png")
#===========================================================================
def defineCharacteristics(self):
self.name = "Basic statistics for numeric fields"
self.group = "Vector table tools"
self.addParameter(ParameterVector(self.INPUT_LAYER, "Input vector layer", ParameterVector.VECTOR_TYPE_ANY, False))
self.addParameter(ParameterTableField(self.FIELD_NAME, "Field to calculate statistics on",
self.INPUT_LAYER, ParameterTableField.DATA_TYPE_NUMBER))
self.addOutput(OutputHTML(self.OUTPUT_HTML_FILE, "Statistics for numeric field"))
self.addOutput(OutputNumber(self.CV, "Coefficient of Variation"))
self.addOutput(OutputNumber(self.MIN, "Minimum value"))
self.addOutput(OutputNumber(self.MAX, "Maximum value"))
self.addOutput(OutputNumber(self.SUM, "Sum"))
self.addOutput(OutputNumber(self.MEAN, "Mean value"))
self.addOutput(OutputNumber(self.COUNT, "Count"))
self.addOutput(OutputNumber(self.RANGE, "Range"))
self.addOutput(OutputNumber(self.MEDIAN, "Median"))
self.addOutput(OutputNumber(self.UNIQUE, "Number of unique values"))
self.addOutput(OutputNumber(self.STD_DEV, "Standard deviation"))
def processAlgorithm(self, progress):
layer = QGisLayers.getObjectFromUri(self.getParameterValue(self.INPUT_LAYER))
fieldName = self.getParameterValue(self.FIELD_NAME)
outputFile = self.getOutputValue(self.OUTPUT_HTML_FILE)
index = layer.fieldNameIndex(fieldName)
cvValue = 0
minValue = 0
maxValue = 0
sumValue = 0
meanValue = 0
medianValue = 0
stdDevValue = 0
isFirst = True
values = []
features = QGisLayers.features(layer)
count = len(features)
total = 100.0 / float(count)
current = 0
for ft in features:
value = float(ft.attributes()[index].toDouble()[0])
if isFirst:
minValue = value
maxValue = value
isFirst = False
else:
if value < minValue:
minValue = value
if value > maxValue:
maxValue = value
values.append( value )
sumValue += value
current += 1
progress.setPercentage(int(current * total))
# calculate additional values
rValue = maxValue - minValue
uniqueValue = utils.getUniqueValuesCount(layer, index)
if count > 0:
meanValue = sumValue / count
if meanValue != 0.00:
for v in values:
stdDevValue += ((v - meanValue) * (v - meanValue))
stdDevValue = math.sqrt(stdDevValue / count)
cvValue = stdDevValue / meanValue
if count > 1:
tmp = values
tmp.sort()
# calculate median
if (count % 2) == 0:
medianValue = 0.5 * (tmp[(count - 1) / 2] + tmp[count / 2])
else:
medianValue = tmp[(count + 1) / 2 - 1]
data = []
data.append("Count: " + unicode(count))
data.append("Unique values: " + unicode(uniqueValue))
data.append("Minimum value: " + unicode(minValue))
data.append("Maximum value: " + unicode(maxValue))
data.append("Range: " + unicode(rValue))
data.append("Sum: " + unicode(sumValue))
data.append("Mean value: " + unicode(meanValue))
data.append("Median value: " + unicode(medianValue))
data.append("Standard deviation: " + unicode(stdDevValue))
data.append("Coefficient of Variation: " + unicode(cvValue))
self.createHTML(outputFile, data)
self.setOutputValue(self.COUNT, count)
self.setOutputValue(self.UNIQUE, uniqueValue)
self.setOutputValue(self.MIN, minValue)
self.setOutputValue(self.MAX, maxValue)
self.setOutputValue(self.RANGE, rValue)
self.setOutputValue(self.SUM, sumValue)
self.setOutputValue(self.MEAN, meanValue)
self.setOutputValue(self.MEDIAN, medianValue)
self.setOutputValue(self.STD_DEV, stdDevValue)
self.setOutputValue(self.CV, cvValue)
def createHTML(self, outputFile, algData):
f = open(outputFile, "w")
for s in algData:
f.write("<p>" + str(s) + "</p>")
f.close()
|
AloneRoad/Inforlearn
|
refs/heads/1.0-rc3
|
.google_appengine/lib/django/django/contrib/contenttypes/models.py
|
33
|
from django.db import models
from django.utils.translation import gettext_lazy as _
CONTENT_TYPE_CACHE = {}
class ContentTypeManager(models.Manager):
def get_for_model(self, model):
"""
Returns the ContentType object for the given model, creating the
ContentType if necessary.
"""
opts = model._meta
key = (opts.app_label, opts.object_name.lower())
try:
ct = CONTENT_TYPE_CACHE[key]
except KeyError:
# The str() is needed around opts.verbose_name because it's a
# django.utils.functional.__proxy__ object.
ct, created = self.model._default_manager.get_or_create(app_label=key[0],
model=key[1], defaults={'name': str(opts.verbose_name)})
CONTENT_TYPE_CACHE[key] = ct
return ct
def clear_cache(self):
"""
Clear out the content-type cache. This needs to happen during database
flushes to prevent caching of "stale" content type IDs (see
django.contrib.contenttypes.management.create_contenttypes for where
this gets called).
"""
global CONTENT_TYPE_CACHE
CONTENT_TYPE_CACHE = {}
class ContentType(models.Model):
name = models.CharField(maxlength=100)
app_label = models.CharField(maxlength=100)
model = models.CharField(_('python model class name'), maxlength=100)
objects = ContentTypeManager()
class Meta:
verbose_name = _('content type')
verbose_name_plural = _('content types')
db_table = 'django_content_type'
ordering = ('name',)
unique_together = (('app_label', 'model'),)
def __str__(self):
return self.name
def model_class(self):
"Returns the Python model class for this type of content."
from django.db import models
return models.get_model(self.app_label, self.model)
def get_object_for_this_type(self, **kwargs):
"""
Returns an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._default_manager.get(**kwargs)
|
samstav/satori
|
refs/heads/master
|
tools/install_venv_common.py
|
31
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
from __future__ import print_function
import optparse
import os
import subprocess
import sys
class InstallVenv(object):
def __init__(self, root, venv, requirements,
test_requirements, py_version,
project):
self.root = root
self.venv = venv
self.requirements = requirements
self.test_requirements = test_requirements
self.py_version = py_version
self.project = project
def die(self, message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
if sys.version_info < (2, 6):
self.die("Need Python Version >= 2.6")
def run_command_with_code(self, cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
return self.run_command_with_code(cmd, redirect_output,
check_exit_code)[0]
def get_distro(self):
if (os.path.exists('/etc/fedora-release') or
os.path.exists('/etc/redhat-release')):
return Fedora(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
else:
return Distro(
self.root, self.venv, self.requirements,
self.test_requirements, self.py_version, self.project)
def check_dependencies(self):
self.get_distro().install_virtualenv()
def create_virtualenv(self, no_site_packages=True):
"""Creates the virtual environment and installs PIP.
Creates the virtual environment and installs PIP only into the
virtual environment.
"""
if not os.path.isdir(self.venv):
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print('done.')
else:
print("venv already exists...")
pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
'pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(self):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# setuptools and pbr
self.pip_install('pip>=1.4')
self.pip_install('setuptools')
self.pip_install('pbr')
self.pip_install('-r', self.requirements, '-r', self.test_requirements)
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
def check_cmd(self, cmd):
return bool(self.run_command(['which', cmd],
check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded')
return
else:
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'
' favorite package management tool' % self.project)
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux
"""
def check_pkg(self, pkg):
return self.run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.die("Please install 'python-virtualenv'.")
super(Fedora, self).install_virtualenv()
|
jim-minter/aws-ose3
|
refs/heads/master
|
target/reip.py
|
1
|
#!/usr/bin/python
import OpenSSL.crypto
import argparse
import base64
import glob
import k8s
import os
import shutil
import yaml
def sn():
sn = int(open("/etc/origin/master/ca.serial.txt").read(), 16)
sntext = "%X" % (sn + 1)
if len(sntext) % 2:
sntext = "0" + sntext
open("/etc/origin/master/ca.serial.txt", "w").write(sntext)
return sn
def make_cert(fn, o, cn, san, eku):
ca_cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
open("/etc/origin/master/ca.crt").read())
ca_key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
open("/etc/origin/master/ca.key").read())
key = OpenSSL.crypto.PKey()
key.generate_key(OpenSSL.crypto.TYPE_RSA, 2048)
cert = OpenSSL.crypto.X509()
cert.set_version(2)
cert.set_serial_number(sn())
if o:
cert.get_subject().O = o
cert.get_subject().CN = cn
cert.gmtime_adj_notBefore(-60 * 60)
cert.gmtime_adj_notAfter((2 * 365 * 24 - 1) * 60 * 60)
cert.set_issuer(ca_cert.get_subject())
cert.set_pubkey(key)
cert.add_extensions([
OpenSSL.crypto.X509Extension("keyUsage", True, "digitalSignature, keyEncipherment"),
OpenSSL.crypto.X509Extension("extendedKeyUsage", False, eku),
OpenSSL.crypto.X509Extension("basicConstraints", True, "CA:FALSE")
])
if san:
cert.add_extensions([
OpenSSL.crypto.X509Extension("subjectAltName", False, san)
])
cert.sign(ca_key, "sha256")
with os.fdopen(os.open("%s.key" % fn, os.O_WRONLY | os.O_CREAT, 0600),
"w") as f:
f.write(OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,
key))
with open("%s.crt" % fn, "w") as f:
f.write(OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
cert))
def do_master_config():
# update master-config.yaml
f = "/etc/origin/master/master-config.yaml"
y = yaml.load(open(f, "r").read())
y["assetConfig"]["loggingPublicURL"] = "https://kibana." + args.subdomain + "/"
y["assetConfig"]["masterPublicURL"] = "https://" + args.public_hostname + ":8443"
y["assetConfig"]["metricsPublicURL"] = "https://hawkular-metrics." + args.subdomain + "/hawkular/metrics"
y["assetConfig"]["publicURL"] = "https://" + args.public_hostname + ":8443/console/"
y["corsAllowedOrigins"] = ["127.0.0.1",
"localhost",
"172.30.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster.local",
"openshift",
"openshift.default",
"openshift.default.svc",
"openshift.default.svc.cluster.local",
args.private_ip,
args.private_hostname,
args.public_ip,
args.public_hostname
]
y["etcdClientInfo"]["urls"] = ["https://" + args.private_hostname + ":4001"]
y["etcdConfig"]["address"] = args.private_hostname + ":4001"
y["etcdConfig"]["peerAddress"] = args.private_hostname + ":7001"
y["kubernetesMasterConfig"]["masterIP"] = args.private_ip
y["masterPublicURL"] = "https://" + args.public_hostname + ":8443"
y["oauthConfig"]["assetPublicURL"] = "https://" + args.public_hostname + ":8443/console/"
y["oauthConfig"]["masterPublicURL"] = "https://" + args.public_hostname + ":8443"
y["oauthConfig"]["masterURL"] = "https://" + args.private_hostname + ":8443"
y["routingConfig"]["subdomain"] = "apps." + args.subdomain
open(f, "w").write(yaml.dump(y, default_flow_style=False))
# rebuild SSL certs
for cert in ["etcd.server", "master.server"]:
make_cert("/etc/origin/master/" + cert, None, "172.30.0.1",
", ".join(["DNS:kubernetes",
"DNS:kubernetes.default",
"DNS:kubernetes.default.svc",
"DNS:kubernetes.default.svc.cluster.local",
"DNS:openshift",
"DNS:openshift.default",
"DNS:openshift.default.svc",
"DNS:openshift.default.svc.cluster.local",
"DNS:" + args.public_hostname,
"DNS:" + args.private_hostname,
"DNS:172.30.0.1",
"DNS:" + args.public_ip,
"DNS:" + args.private_ip,
"IP:172.30.0.1",
"IP:" + args.public_ip,
"IP:" + args.private_ip]), "serverAuth")
# rebuild service kubeconfig files
ca = base64.b64encode(open("/etc/origin/master/ca.crt").read())
private_hostname_ = args.private_hostname.replace(".", "-")
public_hostname_ = args.public_hostname.replace(".", "-")
for kc in ["admin", "openshift-master", "openshift-registry", "openshift-router"]:
y = {"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": public_hostname_ + ":8443",
"cluster": {"certificate-authority-data": ca,
"server": "https://" + args.public_hostname + ":8443"}},
{"name": private_hostname_ + ":8443",
"cluster": {"certificate-authority-data": ca,
"server": "https://" + args.private_hostname + ":8443"}}],
"users": [{"name": "system:" + kc + "/" + private_hostname_ + ":8443",
"user": {"client-certificate-data": base64.b64encode(open("/etc/origin/master/" + kc + ".crt").read()),
"client-key-data": base64.b64encode(open("/etc/origin/master/" + kc + ".key").read())}}],
"contexts": [{"name": "default/" + public_hostname_ + ":8443/system:" + kc,
"context": {"cluster": public_hostname_ + ":8443",
"namespace": "default",
"user": "system:" + kc + "/" + private_hostname_ + ":8443"}},
{"name": "default/" + private_hostname_ + ":8443/system:" + kc,
"context": {"cluster": private_hostname_ + ":8443",
"namespace": "default",
"user": "system:" + kc + "/" + private_hostname_ + ":8443"}}],
"current-context": "default/" + private_hostname_ + ":8443/system:" + kc}
open("/etc/origin/master/" + kc + ".kubeconfig", "w").write(yaml.dump(y, default_flow_style=False))
# rebuild root's kubeconfig file
shutil.copy("/etc/origin/master/admin.kubeconfig", "/root/.kube/config")
def do_node_config():
# update node-config.yaml
f = "/etc/origin/node/node-config.yaml"
y = yaml.load(open(f, "r").read())
y["masterKubeConfig"] = "system:node:" + args.private_hostname + ".kubeconfig"
y["nodeIP"] = args.private_ip
y["nodeName"] = args.private_hostname
open(f, "w").write(yaml.dump(y, default_flow_style=False))
# remove old node SSL certs and kubeconfig files
for f in glob.glob("/etc/origin/node/system:node:*"):
os.unlink(f)
# rebuild node SSL certs
make_cert("/etc/origin/node/server", None, "172.30.0.1",
", ".join(["DNS:kubernetes",
"DNS:kubernetes.default",
"DNS:kubernetes.default.svc",
"DNS:kubernetes.default.svc.cluster.local",
"DNS:openshift",
"DNS:openshift.default",
"DNS:openshift.default.svc",
"DNS:openshift.default.svc.cluster.local",
"DNS:" + args.public_hostname,
"DNS:" + args.private_hostname,
"DNS:172.30.0.1",
"DNS:" + args.public_ip,
"DNS:" + args.private_ip,
"IP:172.30.0.1",
"IP:" + args.public_ip,
"IP:" + args.private_ip]), "serverAuth")
make_cert("/etc/origin/node/system:node:" + args.private_hostname, "system:nodes", "system:node:" + args.private_hostname, None, "clientAuth")
# rebuild node kubeconfig file
private_hostname_ = args.private_hostname.replace(".", "-")
y = {"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": private_hostname_ + ":8443",
"cluster": {"certificate-authority-data": base64.b64encode(open("/etc/origin/node/ca.crt").read()),
"server": "https://" + args.private_hostname + ":8443"}}],
"users": [{"name": "system:node:" + args.private_hostname + "/" + private_hostname_ + ":8443",
"user": {"client-certificate-data": base64.b64encode(open("/etc/origin/node/system:node:" + args.private_hostname + ".crt").read()),
"client-key-data": base64.b64encode(open("/etc/origin/node/system:node:" + args.private_hostname + ".key").read())}}],
"contexts": [{"name": "default/" + private_hostname_ + ":8443/system:node:" + args.private_hostname,
"context": {"cluster": private_hostname_ + ":8443",
"namespace": "default",
"user": "system:node:" + args.private_hostname + "/" + private_hostname_ + ":8443"}}],
"current-context": "default/" + private_hostname_ + ":8443/system:node:" + args.private_hostname}
open("/etc/origin/node/system:node:" + args.private_hostname + ".kubeconfig", "w").write(yaml.dump(y, default_flow_style=False))
def do_restart_services():
svcs = ["atomic-openshift-master", "atomic-openshift-node"]
for svc in svcs[::-1]:
os.system("systemctl stop " + svc)
# clear out finished Docker containers
os.system("docker ps -aq | xargs docker rm -f")
# trigger complete reconfiguration of OVS
os.unlink("/run/openshift-sdn/docker-network")
os.system("ovs-ofctl -O OpenFlow13 del-flows br0")
for svc in svcs:
os.system("systemctl start " + svc)
def do_cleanup(api):
for i in api.get("/api/v1/nodes")._items:
if i.metadata.name != args.private_hostname:
api.delete(i.metadata.selfLink)
for i in api.get("/oapi/v1/hostsubnets")._items:
if i.metadata.name != args.private_hostname:
api.delete(i.metadata.selfLink)
for i in api.get("/oapi/v1/oauthclients")._items:
i.redirectURIs = [uri for uri in i.redirectURIs if not ("8443" in uri and args.public_hostname not in uri)]
api.put(i.metadata.selfLink, i)
for i in api.get("/api/v1/events")._items:
api.delete(i.metadata.selfLink)
for i in api.get("/api/v1/pods")._items:
api.delete(i.metadata.selfLink)
def do_services_config_post(api):
# replace DCs (we replace so that latestVersion is reset)
dc = api.get("/oapi/v1/namespaces/default/deploymentconfigs/docker-registry")
delete_dc(api, dc)
set_env(dc.spec.template.spec.containers[0], "OPENSHIFT_MASTER", "https://" + args.public_hostname + ":8443")
dc.metadata = {k: dc.metadata[k] for k in dc.metadata if k in ["labels", "name"]}
del dc.status
api.post("/oapi/v1/namespaces/default/deploymentconfigs", dc)
dc = api.get("/oapi/v1/namespaces/default/deploymentconfigs/router")
delete_dc(api, dc)
set_env(dc.spec.template.spec.containers[0], "OPENSHIFT_MASTER", "https://" + args.public_hostname + ":8443")
dc.metadata = {k: dc.metadata[k] for k in dc.metadata if k in ["labels", "name"]}
del dc.status
api.post("/oapi/v1/namespaces/default/deploymentconfigs", dc)
def do_kibana_config_pre(api):
# rebuild SSL cert
make_cert("kibana", None, "kibana",
", ".join(["DNS:kibana",
"DNS:kibana." + args.subdomain,
"DNS:kibana-ops." + args.subdomain]), "serverAuth")
sec = api.get("/api/v1/namespaces/logging/secrets/logging-kibana-proxy")
sec.data["server-cert"] = base64.b64encode(open("kibana.crt").read())
sec.data["server-key"] = base64.b64encode(open("kibana.key").read())
api.put(sec.metadata.selfLink, sec)
def do_kibana_config_post(api):
# replace logging-kibana DC (we replace so that latestVersion is reset)
dc = api.get("/oapi/v1/namespaces/logging/deploymentconfigs/logging-kibana")
delete_dc(api, dc)
set_env(dc.spec.template.spec.containers[1], "OAP_PUBLIC_MASTER_URL", "https://" + args.public_hostname + ":8443")
dc.metadata = {k: dc.metadata[k] for k in dc.metadata if k in ["labels", "name"]}
del dc.status
api.post("/oapi/v1/namespaces/logging/deploymentconfigs", dc)
# fix route hostnames
r = api.get("/oapi/v1/namespaces/logging/routes/kibana")
r.spec.host = "kibana." + args.subdomain
api.put(r.metadata.selfLink, r)
r = api.get("/oapi/v1/namespaces/logging/routes/kibana-ops")
r.spec.host = "kibana-ops." + args.subdomain
api.put(r.metadata.selfLink, r)
def do_hawkular_config_pre(api):
# rebuild SSL cert
make_cert("hawkular-metrics", None, "hawkular-metrics",
", ".join(["DNS:hawkular-metrics",
"DNS:hawkular-metrics." + args.subdomain]), "serverAuth")
open("hawkular-metrics.crt", "a").write(open("/etc/origin/master/ca.crt").read())
# key and cert go into hawkular-metrics-secrets keystore
sec = api.get("/api/v1/namespaces/openshift-infra/secrets/hawkular-metrics-secrets")
pw = sec.data["hawkular-metrics.keystore.password"].decode("base64").strip()
os.system("openssl pkcs12 -export -in hawkular-metrics.crt -inkey hawkular-metrics.key -out hawkular-metrics.pkcs12 -name hawkular-metrics -password pass:" + pw)
os.system("keytool -importkeystore -srckeystore hawkular-metrics.pkcs12 -srcstoretype pkcs12 -destkeystore keystore -deststorepass " + pw + " -srcstorepass " + pw)
sec.data["hawkular-metrics.keystore"] = base64.b64encode(open("keystore").read())
api.put(sec.metadata.selfLink, sec)
# cert goes into hawkular-metrics-certificate
sec = api.get("/api/v1/namespaces/openshift-infra/secrets/hawkular-metrics-certificate")
os.system("openssl x509 -in hawkular-metrics.crt -out hawkular-metrics.crt.der -outform der")
sec.data["hawkular-metrics.certificate"] = base64.b64encode(open("hawkular-metrics.crt.der").read())
api.put(sec.metadata.selfLink, sec)
# cert also goes into hawkular-cassandra-secrets truststore
sec = api.get("/api/v1/namespaces/openshift-infra/secrets/hawkular-cassandra-secrets")
pw = sec.data["cassandra.truststore.password"].decode("base64").strip()
open("truststore", "w").write(sec.data["cassandra.truststore"].decode("base64"))
os.system("keytool -delete -alias hawkular-metrics -keystore truststore -storepass " + pw)
os.system("keytool -import -trustcacerts -alias hawkular-metrics -file hawkular-metrics.crt.der -keystore truststore -storepass " + pw)
sec.data["cassandra.truststore"] = base64.b64encode(open("truststore").read())
api.put(sec.metadata.selfLink, sec)
def do_hawkular_config_post(api):
# fix route hostname
r = api.get("/oapi/v1/namespaces/openshift-infra/routes/hawkular-metrics")
r.spec.host = "hawkular-metrics." + args.subdomain
api.put(r.metadata.selfLink, r)
def connect_api():
return k8s.API("https://" + args.private_hostname + ":8443",
("/etc/origin/master/openshift-master.crt",
"/etc/origin/master/openshift-master.key"))
def delete_dc(api, dc):
# delete DC and cascade to appropriate RCs
api.delete(dc.metadata.selfLink)
for rc in api.get("/api/v1/namespaces/" + dc.metadata.namespace + "/replicationcontrollers")._items:
if rc.metadata.name.startswith(dc.metadata.name + "-"):
delete_rc(api, rc)
def delete_rc(api, rc):
# delete RC and cascade to appropriate pods
api.delete(rc.metadata.selfLink)
for pod in api.get("/api/v1/namespaces/" + rc.metadata.namespace + "/pods")._items:
if pod.metadata.name.startswith(rc.metadata.name + "-"):
api.delete(pod.metadata.selfLink)
def set_env(c, k, v):
c.env = [e for e in c.env if e.name != k]
c.env.append(k8s.AttrDict({"name": k, "value": v}))
def parse_args():
ap = argparse.ArgumentParser()
ap.add_argument("private_hostname")
ap.add_argument("public_hostname")
ap.add_argument("private_ip")
ap.add_argument("public_ip")
ap.add_argument("subdomain")
return ap.parse_args()
def main():
# 1. Update daemon configs and certs
do_master_config()
do_node_config()
do_restart_services()
api = connect_api()
# 2. Make necessary changes via API before bulk object delete
do_kibana_config_pre(api)
do_hawkular_config_pre(api)
# 3. Bulk opject delete
do_cleanup(api)
# 4. Post bulk object delete changes
do_services_config_post(api)
do_kibana_config_post(api)
do_hawkular_config_post(api)
if __name__ == "__main__":
args = parse_args()
main()
|
weimingtom/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/turtledemo/colormixer.py
|
65
|
# colormixer
from turtle import Screen, Turtle, mainloop
import sys
sys.setrecursionlimit(20000) # overcomes, for now, an instability of Python 3.0
class ColorTurtle(Turtle):
def __init__(self, x, y):
Turtle.__init__(self)
self.shape("turtle")
self.resizemode("user")
self.shapesize(3,3,5)
self.pensize(10)
self._color = [0,0,0]
self.x = x
self._color[x] = y
self.color(self._color)
self.speed(0)
self.left(90)
self.pu()
self.goto(x,0)
self.pd()
self.sety(1)
self.pu()
self.sety(y)
self.pencolor("gray25")
self.ondrag(self.shift)
def shift(self, x, y):
self.sety(max(0,min(y,1)))
self._color[self.x] = self.ycor()
self.fillcolor(self._color)
setbgcolor()
def setbgcolor():
screen.bgcolor(red.ycor(), green.ycor(), blue.ycor())
def main():
global screen, red, green, blue
screen = Screen()
screen.delay(0)
screen.setworldcoordinates(-1, -0.3, 3, 1.3)
red = ColorTurtle(0, .5)
green = ColorTurtle(1, .5)
blue = ColorTurtle(2, .5)
setbgcolor()
writer = Turtle()
writer.ht()
writer.pu()
writer.goto(1,1.15)
writer.write("DRAG!",align="center",font=("Arial",30,("bold","italic")))
return "EVENTLOOP"
if __name__ == "__main__":
msg = main()
print(msg)
mainloop()
|
liavkoren/djangoDev
|
refs/heads/master
|
django/contrib/gis/geos/libgeos.py
|
90
|
"""
This module houses the ctypes initialization procedures, as well
as the notice and error handler function callbacks (get called
when an error occurs in GEOS).
This module also houses GEOS Pointer utilities, including
get_pointer_arr(), and GEOM_PTR.
"""
import logging
import os
import re
from ctypes import c_char_p, Structure, CDLL, CFUNCTYPE, POINTER
from ctypes.util import find_library
from django.contrib.gis.geos.error import GEOSException
from django.core.exceptions import ImproperlyConfigured
logger = logging.getLogger('django.contrib.gis')
# Custom library path set?
try:
from django.conf import settings
lib_path = settings.GEOS_LIBRARY_PATH
except (AttributeError, EnvironmentError,
ImportError, ImproperlyConfigured):
lib_path = None
# Setting the appropriate names for the GEOS-C library.
if lib_path:
lib_names = None
elif os.name == 'nt':
# Windows NT libraries
lib_names = ['geos_c', 'libgeos_c-1']
elif os.name == 'posix':
# *NIX libraries
lib_names = ['geos_c', 'GEOS']
else:
raise ImportError('Unsupported OS "%s"' % os.name)
# Using the ctypes `find_library` utility to find the path to the GEOS
# shared library. This is better than manually specifying each library name
# and extension (e.g., libgeos_c.[so|so.1|dylib].).
if lib_names:
for lib_name in lib_names:
lib_path = find_library(lib_name)
if lib_path is not None:
break
# No GEOS library could be found.
if lib_path is None:
raise ImportError(
'Could not find the GEOS library (tried "%s"). '
'Try setting GEOS_LIBRARY_PATH in your settings.' %
'", "'.join(lib_names)
)
# Getting the GEOS C library. The C interface (CDLL) is used for
# both *NIX and Windows.
# See the GEOS C API source code for more details on the library function calls:
# http://geos.refractions.net/ro/doxygen_docs/html/geos__c_8h-source.html
lgeos = CDLL(lib_path)
# The notice and error handler C function callback definitions.
# Supposed to mimic the GEOS message handler (C below):
# typedef void (*GEOSMessageHandler)(const char *fmt, ...);
NOTICEFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def notice_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
warn_msg = fmt % lst
except TypeError:
warn_msg = fmt
logger.warning('GEOS_NOTICE: %s\n' % warn_msg)
notice_h = NOTICEFUNC(notice_h)
ERRORFUNC = CFUNCTYPE(None, c_char_p, c_char_p)
def error_h(fmt, lst):
fmt, lst = fmt.decode(), lst.decode()
try:
err_msg = fmt % lst
except TypeError:
err_msg = fmt
logger.error('GEOS_ERROR: %s\n' % err_msg)
error_h = ERRORFUNC(error_h)
#### GEOS Geometry C data structures, and utility functions. ####
# Opaque GEOS geometry structures, used for GEOM_PTR and CS_PTR
class GEOSGeom_t(Structure):
pass
class GEOSPrepGeom_t(Structure):
pass
class GEOSCoordSeq_t(Structure):
pass
class GEOSContextHandle_t(Structure):
pass
# Pointers to opaque GEOS geometry structures.
GEOM_PTR = POINTER(GEOSGeom_t)
PREPGEOM_PTR = POINTER(GEOSPrepGeom_t)
CS_PTR = POINTER(GEOSCoordSeq_t)
CONTEXT_PTR = POINTER(GEOSContextHandle_t)
# Used specifically by the GEOSGeom_createPolygon and GEOSGeom_createCollection
# GEOS routines
def get_pointer_arr(n):
"Gets a ctypes pointer array (of length `n`) for GEOSGeom_t opaque pointer."
GeomArr = GEOM_PTR * n
return GeomArr()
# Returns the string version of the GEOS library. Have to set the restype
# explicitly to c_char_p to ensure compatibility across 32 and 64-bit platforms.
geos_version = lgeos.GEOSversion
geos_version.argtypes = None
geos_version.restype = c_char_p
# Regular expression should be able to parse version strings such as
# '3.0.0rc4-CAPI-1.3.3', '3.0.0-CAPI-1.4.1', '3.4.0dev-CAPI-1.8.0' or '3.4.0dev-CAPI-1.8.0 r0'
version_regex = re.compile(
r'^(?P<version>(?P<major>\d+)\.(?P<minor>\d+)\.(?P<subminor>\d+))'
r'((rc(?P<release_candidate>\d+))|dev)?-CAPI-(?P<capi_version>\d+\.\d+\.\d+)( r\d+)?$'
)
def geos_version_info():
"""
Returns a dictionary containing the various version metadata parsed from
the GEOS version string, including the version number, whether the version
is a release candidate (and what number release candidate), and the C API
version.
"""
ver = geos_version().decode()
m = version_regex.match(ver)
if not m:
raise GEOSException('Could not parse version info string "%s"' % ver)
return dict((key, m.group(key)) for key in (
'version', 'release_candidate', 'capi_version', 'major', 'minor', 'subminor'))
# Version numbers and whether or not prepared geometry support is available.
_verinfo = geos_version_info()
GEOS_MAJOR_VERSION = int(_verinfo['major'])
GEOS_MINOR_VERSION = int(_verinfo['minor'])
GEOS_SUBMINOR_VERSION = int(_verinfo['subminor'])
del _verinfo
GEOS_VERSION = (GEOS_MAJOR_VERSION, GEOS_MINOR_VERSION, GEOS_SUBMINOR_VERSION)
# Here we set up the prototypes for the initGEOS_r and finishGEOS_r
# routines. These functions aren't actually called until they are
# attached to a GEOS context handle -- this actually occurs in
# geos/prototypes/threadsafe.py.
lgeos.initGEOS_r.restype = CONTEXT_PTR
lgeos.finishGEOS_r.argtypes = [CONTEXT_PTR]
|
tempesta-tech/linux-4.8.15-tfw
|
refs/heads/master
|
Documentation/conf.py
|
37
|
# -*- coding: utf-8 -*-
#
# The Linux Kernel documentation build configuration file, created by
# sphinx-quickstart on Fri Feb 12 13:51:46 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('sphinx'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['kernel-doc', 'rstFlatTable', 'kernel_include']
# Gracefully handle missing rst2pdf.
try:
import rst2pdf
extensions += ['rst2pdf.pdfbuilder']
except ImportError:
pass
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'The Linux Kernel'
copyright = '2016, The kernel development community'
author = 'The kernel development community'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# In a normal build, version and release are are set to KERNELVERSION and
# KERNELRELEASE, respectively, from the Makefile via Sphinx command line
# arguments.
#
# The following code tries to extract the information by reading the Makefile,
# when Sphinx is run directly (e.g. by Read the Docs).
try:
makefile_version = None
makefile_patchlevel = None
for line in open('../Makefile'):
key, val = [x.strip() for x in line.split('=', 2)]
if key == 'VERSION':
makefile_version = val
elif key == 'PATCHLEVEL':
makefile_patchlevel = val
if makefile_version and makefile_patchlevel:
break
except:
pass
finally:
if makefile_version and makefile_patchlevel:
version = release = makefile_version + '.' + makefile_patchlevel
else:
sys.stderr.write('Warning: Could not extract kernel version\n')
version = release = "unknown version"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['output']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
primary_domain = 'C'
highlight_language = 'guess'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# The Read the Docs theme is available from
# - https://github.com/snide/sphinx_rtd_theme
# - https://pypi.python.org/pypi/sphinx_rtd_theme
# - python-sphinx-rtd-theme package (on Debian)
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
sys.stderr.write('Warning: The Sphinx \'sphinx_rtd_theme\' HTML theme was not found. Make sure you have the theme installed to produce pretty HTML output. Falling back to the default theme.\n')
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['sphinx-static']
html_context = {
'css_files': [
'_static/theme_overrides.css',
],
}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TheLinuxKerneldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TheLinuxKernel.tex', 'The Linux Kernel Documentation',
'The kernel development community', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'thelinuxkernel', 'The Linux Kernel Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TheLinuxKernel', 'The Linux Kernel Documentation',
author, 'TheLinuxKernel', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
#=======
# rst2pdf
#
# Grouping the document tree into PDF files. List of tuples
# (source start file, target name, title, author, options).
#
# See the Sphinx chapter of http://ralsina.me/static/manual.pdf
#
# FIXME: Do not add the index file here; the result will be too big. Adding
# multiple PDF files here actually tries to get the cross-referencing right
# *between* PDF files.
pdf_documents = [
('kernel-documentation', u'Kernel', u'Kernel', u'J. Random Bozo'),
]
# kernel-doc extension configuration for running Sphinx directly (e.g. by Read
# the Docs). In a normal build, these are supplied from the Makefile via command
# line arguments.
kerneldoc_bin = '../scripts/kernel-doc'
kerneldoc_srctree = '..'
|
UManPychron/pychron
|
refs/heads/develop
|
pychron/core/fits/measurement_fits_selector.py
|
2
|
# ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
import ast
import os
import yaml
from traits.api import Str, Button, List
from traitsui.api import HGroup, UItem, VGroup, Item
from traitsui.extras.checkbox_column import CheckboxColumn
from traitsui.handler import Controller
from traitsui.table_column import ObjectColumn
from pychron.core.fits.filter_fit_selector import FilterFitSelector
from pychron.core.fits.fit import FilterFit
from pychron.core.helpers.filetools import add_extension, glob_list_directory
from pychron.core.helpers.iterfuncs import partition
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
from pychron.core.ui.enum_editor import myEnumEditor
from pychron.core.ui.table_editor import myTableEditor
from pychron.core.yaml import yload
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.paths import paths
class MeasurementFit(FilterFit):
is_baseline = False
ATTRS = ['fit', 'error_type', 'name', 'filter_outliers', 'filter_iterations', 'filter_std_devs']
class MeasurementFitsSelector(FilterFitSelector):
fit_klass = MeasurementFit
name = Str(auto_set=False, enter_set=True)
available_names = List
def __init__(self, *args, **kw):
super(MeasurementFitsSelector, self).__init__(*args, **kw)
self._load_available_names()
def _name_changed(self, new):
if new:
self._load_name(new)
def _load_name(self, name):
self.load(os.path.join(paths.fits_dir, add_extension(name, '.yaml')))
def duplicate(self):
self.save()
self._load_available_names()
self._load_name(self.name)
def open(self, script_path):
dfp = self._extract_default_fits_file(script_path)
if dfp:
self.load(os.path.join(paths.fits_dir, add_extension(dfp, '.yaml')))
def save(self, name=None):
if name is None:
name = self.name
bfs, sfs = partition(self.fits, lambda x: x.is_baseline)
yd = {'signal': self._dump(sfs),
'baseline': self._dump(bfs)}
p = os.path.join(paths.fits_dir, '{}.yaml'.format(name))
with open(p, 'w') as wfile:
yaml.dump(yd, wfile, default_flow_style=False)
def load(self, p):
if not os.path.isfile(p):
return
yd = yload(p)
fits = self._load_fits(yd['signal'])
fits.extend(self._load_fits(yd['baseline'], is_baseline=True))
self.fits = fits
h, _ = os.path.splitext(os.path.basename(p))
self.name = h
def _load_available_names(self):
ps = glob_list_directory(paths.fits_dir, extension='.yaml', remove_extension=True)
self.available_names = ps
def _extract_default_fits_file(self, path):
with open(path, 'r') as rfile:
m = ast.parse(rfile.read())
docstr = ast.get_docstring(m)
yd = yload(docstr)
if yd:
return yd.get('default_fits', None)
def _dump(self, fs):
ys = []
for fi in fs:
d = {ai: getattr(fi, ai) for ai in ATTRS}
ys.append(d)
return ys
def _load_fits(self, fs, is_baseline=False):
fits = []
for fi in fs:
d = {ai: fi[ai] for ai in ATTRS}
f = MeasurementFit(is_baseline=is_baseline, **d)
fits.append(f)
return fits
class MeasurementFitsSelectorView(Controller):
duplicate_button = Button
def _duplicate_button_fired(self):
info = self.model.edit_traits(view=okcancel_view(Item('name'),
title='Enter a new name',
width=300,
kind='modal'))
if info.result:
self.model.duplicate()
def closed(self, info, is_ok):
if is_ok:
self.model.save()
def _get_toggle_group(self):
g = HGroup(
UItem('filter_all_button'), )
return g
def _get_auto_group(self):
return HGroup(UItem('global_fit', editor=myEnumEditor(name='fit_types')),
UItem('global_error_type', editor=myEnumEditor(name='error_types')))
def _get_fit_group(self):
cols = [ObjectColumn(name='name', editable=False,
tooltip='If name is an isotope e.g Ar40 '
'fit is for a signal, if name is a detector e.g H1 fit is for a baseline'),
ObjectColumn(name='fit',
editor=myEnumEditor(name='fit_types'),
width=75),
ObjectColumn(name='error_type',
editor=myEnumEditor(name='error_types'),
label='Error',
width=75),
CheckboxColumn(name='filter_outliers', label='Out.'),
ObjectColumn(name='filter_iterations', label='Iter.'),
ObjectColumn(name='filter_std_devs', label='NSigma'),
CheckboxColumn(name='use_standard_deviation_filtering', label='Use SD'),
CheckboxColumn(name='use_iqr_filtering', label='Use IQR')
]
editor = myTableEditor(columns=cols,
selected='selected',
selection_mode='rows',
sortable=False,
edit_on_first_click=False,
clear_selection_on_dclicked=True,
on_command_key=self._update_command_key, )
grp = UItem('fits',
style='custom',
editor=editor)
return grp
def traits_view(self):
name_grp = HGroup(
UItem('name', editor=myEnumEditor(name='available_names')),
icon_button_editor('controller.duplicate_button', 'duplicate'))
v = okcancel_view(VGroup(name_grp,
self._get_toggle_group(),
self._get_auto_group(),
self._get_fit_group()),
height=400,
title='Edit Default Fits')
return v
if __name__ == '__main__':
# build_directories(paths)
m = MeasurementFitsSelector()
# keys = ['Ar40', 'Ar39']
# detectors=['H1','AX']
# fits = [('linear', 'SEM', {}),
# ('linear', 'SEM', {})]
t = os.path.join(paths.fits_dir, 'test.yaml')
m.load(t)
a = MeasurementFitsSelectorView(model=m)
a.configure_traits()
# ============= EOF =============================================
|
overtherain/scriptfile
|
refs/heads/master
|
software/googleAppEngine/lib/django_1_4/tests/regressiontests/string_lookup/models.py
|
43
|
# -*- coding: utf-8 -*-
from django.db import models
class Foo(models.Model):
name = models.CharField(max_length=50)
friend = models.CharField(max_length=50, blank=True)
def __unicode__(self):
return "Foo %s" % self.name
class Bar(models.Model):
name = models.CharField(max_length=50)
normal = models.ForeignKey(Foo, related_name='normal_foo')
fwd = models.ForeignKey("Whiz")
back = models.ForeignKey("Foo")
def __unicode__(self):
return "Bar %s" % self.place.name
class Whiz(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return "Whiz %s" % self.name
class Child(models.Model):
parent = models.OneToOneField('Base')
name = models.CharField(max_length=50)
def __unicode__(self):
return "Child %s" % self.name
class Base(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return "Base %s" % self.name
class Article(models.Model):
name = models.CharField(max_length=50)
text = models.TextField()
submitted_from = models.IPAddressField(blank=True, null=True)
def __str__(self):
return "Article %s" % self.name
|
hwu25/AppPkg
|
refs/heads/trunk
|
Applications/Python/Python-2.7.2/Lib/encodings/cp037.py
|
93
|
""" Python Character Mapping Codec cp037 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP037.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp037',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xa2' # 0x4A -> CENT SIGN
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'|' # 0x4F -> VERTICAL LINE
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'!' # 0x5A -> EXCLAMATION MARK
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'\xac' # 0x5F -> NOT SIGN
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'^' # 0xB0 -> CIRCUMFLEX ACCENT
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'[' # 0xBA -> LEFT SQUARE BRACKET
u']' # 0xBB -> RIGHT SQUARE BRACKET
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
rafaelvieiras/plugin.program.advanced.launcher
|
refs/heads/master
|
resources/scrapers/datas/ComicVine/__init__.py
|
201
|
# Dummy file to make this directory a package.
|
CPFL/gxen
|
refs/heads/master
|
tools/python/xen/remus/vm.py
|
43
|
#!/usr/bin/env python
import xmlrpclib
from xen.xend.XendClient import server
from xen.xend import sxp, osdep
from xen.lowlevel.xc import xc
import vif
import blkdev
# need a nicer way to load disk drivers
import vbd
class VMException(Exception): pass
class VM(object):
"Representation of a virtual machine"
def __init__(self, domid=None, dominfo=None):
self.dominfo = dominfo
self.domid = -1
self.name = 'unknown'
self.dom = {}
self.disks = []
self.vifs = []
if domid:
try:
self.dominfo = server.xend.domain(domid, 'all')
except xmlrpclib.Fault:
raise VMException('error looking up domain %s' % str(domid))
if self.dominfo:
self.loaddominfo()
def loaddominfo(self):
self.dom = parsedominfo(self.dominfo)
self.domid = self.dom['domid']
self.name = self.dom['name']
self.disks = getdisks(self.dom)
self.vifs = getvifs(self.dom)
def __str__(self):
return 'VM %d (%s), vifs: [%s], disks: [%s]' % \
(self.domid, self.name,
', '.join([str(v) for v in self.vifs]),
', '.join([str(d) for d in self.disks]))
def parsedominfo(dominfo):
"parses a dominfo sexpression in the form of python lists of lists"
def s2d(s):
r = {}
for elem in s:
if len(elem) == 0:
continue
name = elem[0]
if len(elem) == 1:
val = None
else:
val = elem[1]
if isinstance(val, list):
val = s2d(elem[1:])
if isinstance(name, list):
# hack for ['cpus', [[1]]]
return s2d(elem)
if name in r:
for k, v in val.iteritems():
if k in r[name]:
if not isinstance(r[name][k], list):
r[name][k] = [r[name][k]]
r[name][k].append(v)
else:
r[name][k] = v
else:
r[name] = val
return r
return s2d(dominfo[1:])
def domtosxpr(dom):
"convert a dominfo into a python sxpr"
def d2s(d):
r = []
for k, v in d.iteritems():
elem = [k]
if isinstance(v, dict):
elem.extend(d2s(v))
else:
if v is None:
v = ''
elem.append(v)
r.append(elem)
return r
sxpr = ['domain']
sxpr.extend(d2s(dom))
return sxpr
def strtosxpr(s):
"convert a string to a python sxpr"
p = sxp.Parser()
p.input(s)
return p.get_val()
def sxprtostr(sxpr):
"convert an sxpr to string"
return sxp.to_string(sxpr)
def getvifs(dom):
"return vif objects for devices in dom"
vifs = dom['device'].get('vif', [])
if type(vifs) != list:
vifs = [vifs]
vifno = 0
parsed = []
for v in vifs:
parsed.append(vif.parse(v, dom['domid'], vifno))
vifno += 1
return parsed
def getdisks(dom):
"return block device objects for devices in dom"
disks = dom['device'].get('vbd', [])
if type(disks) != list:
disks = [disks]
# tapdisk1 devices
tap1s = dom['device'].get('tap', [])
if type(tap1s) != list:
disks.append(tap1s)
else:
disks.extend(tap1s)
# tapdisk2 devices
tap2s = dom['device'].get('tap2', [])
if type(tap2s) != list:
disks.append(tap2s)
else:
disks.extend(tap2s)
return [blkdev.parse(disk) for disk in disks]
def fromxend(domid):
"create a VM object from xend information"
return VM(domid)
def getshadowmem(vm):
"Balloon down domain0 to create free memory for shadow paging."
maxmem = int(vm.dom['maxmem'])
shadow = int(vm.dom['shadow_memory'])
vcpus = int(vm.dom['vcpus'])
# from XendDomainInfo.checkLiveMigrateMemory:
# 1MB per vcpu plus 4Kib/Mib of RAM. This is higher than
# the minimum that Xen would allocate if no value were given.
shadowneeded = vcpus * 1024 + maxmem * 4 - shadow * 1024
physinfo = xc().physinfo()
freemem = int(physinfo['free_memory'])
needed = shadowneeded - freemem
if needed > 0:
print "Freeing %d kB for shadow mode" % needed
dom0cur = osdep.lookup_balloon_stat('current')
# target is in MB, not KB
target = (dom0cur - needed) / 1024
server.xend.domain.setMemoryTarget(0, target)
|
microhh/microhh2
|
refs/heads/master
|
cases_old_setup/strongscaling/strongscalingprof.py
|
5
|
import numpy
#from pylab import *
# set the height
kmax = 1024
dn = 1./kmax
n = numpy.linspace(dn, 1.-dn, kmax)
nloc1 = 160.*dn
nbuf1 = 32.*dn
nloc2 = 1024.*dn
nbuf2 = 144.*dn
dz1 = 0.0005
dz2 = 0.001
dz3 = 0.01
dzdn1 = dz1/dn
dzdn2 = dz2/dn
dzdn3 = dz3/dn
dzdn = dzdn1 + 0.5*(dzdn2-dzdn1)*(1. + numpy.tanh((n-nloc1)/nbuf1)) + 0.5*(dzdn3-dzdn2)*(1. + numpy.tanh((n-nloc2)/nbuf2))
dz = dzdn*dn
z = numpy.zeros(numpy.size(dz))
stretch = numpy.zeros(numpy.size(dz))
z [0] = 0.5*dz[0]
stretch[0] = 1.
for k in range(1,kmax):
z [k] = z[k-1] + 0.5*(dz[k-1]+dz[k])
stretch[k] = dz[k]/dz[k-1]
zsize = z[kmax-1] + 0.5*dz[kmax-1]
#print('zsize = ', zsize)
b0 = 1.
delta = 4.407731e-3
N2 = 3.
b = numpy.zeros(numpy.size(z))
for k in range(kmax):
b[k] = N2*z[k]
# write the data to a file
proffile = open('strongscaling.prof','w')
proffile.write('{0:^20s} {1:^20s}\n'.format('z','b'))
for k in range(kmax):
proffile.write('{0:1.14E} {1:1.14E}\n'.format(z[k], b[k]))
proffile.close()
"""
#plot the grid
figure()
subplot(131)
plot(n,z)
subplot(132)
plot(n,dz)
subplot(133)
plot(n,stretch)
"""
|
chrrrles/ansible
|
refs/heads/devel
|
test/units/plugins/lookup/__init__.py
|
7690
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
|
roadmapper/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/ec2_vpc_nacl_info.py
|
5
|
#!/usr/bin/python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vpc_nacl_info
short_description: Gather information about Network ACLs in an AWS VPC
description:
- Gather information about Network ACLs in an AWS VPC
- This module was called C(ec2_vpc_nacl_facts) before Ansible 2.9. The usage did not change.
version_added: "2.2"
author: "Brad Davidson (@brandond)"
requirements: [ boto3 ]
options:
nacl_ids:
description:
- A list of Network ACL IDs to retrieve information about.
required: false
default: []
aliases: [nacl_id]
type: list
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value. See
U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeNetworkAcls.html) for possible filters. Filter
names and values are case sensitive.
required: false
default: {}
type: dict
notes:
- By default, the module will return all Network ACLs.
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
# Gather information about all Network ACLs:
- name: Get All NACLs
register: all_nacls
ec2_vpc_nacl_info:
region: us-west-2
# Retrieve default Network ACLs:
- name: Get Default NACLs
register: default_nacls
ec2_vpc_nacl_info:
region: us-west-2
filters:
'default': 'true'
'''
RETURN = '''
nacls:
description: Returns an array of complex objects as described below.
returned: success
type: complex
contains:
nacl_id:
description: The ID of the Network Access Control List.
returned: always
type: str
vpc_id:
description: The ID of the VPC that the NACL is attached to.
returned: always
type: str
is_default:
description: True if the NACL is the default for its VPC.
returned: always
type: bool
tags:
description: A dict of tags associated with the NACL.
returned: always
type: dict
subnets:
description: A list of subnet IDs that are associated with the NACL.
returned: always
type: list
elements: str
ingress:
description:
- A list of NACL ingress rules with the following format.
- "C([rule no, protocol, allow/deny, v4 or v6 cidr, icmp_type, icmp_code, port from, port to])"
returned: always
type: list
elements: list
sample: [[100, 'tcp', 'allow', '0.0.0.0/0', null, null, 22, 22]]
egress:
description:
- A list of NACL egress rules with the following format.
- "C([rule no, protocol, allow/deny, v4 or v6 cidr, icmp_type, icmp_code, port from, port to])"
returned: always
type: list
elements: list
sample: [[100, 'all', 'allow', '0.0.0.0/0', null, null, null, null]]
'''
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
from ansible.module_utils.aws.core import AnsibleAWSModule
from ansible.module_utils._text import to_native
from ansible.module_utils.ec2 import (AWSRetry, ansible_dict_to_boto3_filter_list,
camel_dict_to_snake_dict, boto3_tag_list_to_ansible_dict)
# VPC-supported IANA protocol numbers
# http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml
PROTOCOL_NAMES = {'-1': 'all', '1': 'icmp', '6': 'tcp', '17': 'udp'}
def list_ec2_vpc_nacls(connection, module):
nacl_ids = module.params.get("nacl_ids")
filters = ansible_dict_to_boto3_filter_list(module.params.get("filters"))
if nacl_ids is None:
nacl_ids = []
try:
nacls = connection.describe_network_acls(aws_retry=True, NetworkAclIds=nacl_ids, Filters=filters)
except ClientError as e:
if e.response['Error']['Code'] == 'InvalidNetworkAclID.NotFound':
module.fail_json(msg='Unable to describe ACL. NetworkAcl does not exist')
module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids))
except BotoCoreError as e:
module.fail_json_aws(e, msg="Unable to describe network ACLs {0}".format(nacl_ids))
# Turn the boto3 result in to ansible_friendly_snaked_names
snaked_nacls = []
for nacl in nacls['NetworkAcls']:
snaked_nacls.append(camel_dict_to_snake_dict(nacl))
# Turn the boto3 result in to ansible friendly tag dictionary
for nacl in snaked_nacls:
if 'tags' in nacl:
nacl['tags'] = boto3_tag_list_to_ansible_dict(nacl['tags'], 'key', 'value')
if 'entries' in nacl:
nacl['egress'] = [nacl_entry_to_list(entry) for entry in nacl['entries']
if entry['rule_number'] < 32767 and entry['egress']]
nacl['ingress'] = [nacl_entry_to_list(entry) for entry in nacl['entries']
if entry['rule_number'] < 32767 and not entry['egress']]
del nacl['entries']
if 'associations' in nacl:
nacl['subnets'] = [a['subnet_id'] for a in nacl['associations']]
del nacl['associations']
if 'network_acl_id' in nacl:
nacl['nacl_id'] = nacl['network_acl_id']
del nacl['network_acl_id']
module.exit_json(nacls=snaked_nacls)
def nacl_entry_to_list(entry):
# entry list format
# [ rule_num, protocol name or number, allow or deny, ipv4/6 cidr, icmp type, icmp code, port from, port to]
elist = []
elist.append(entry['rule_number'])
if entry.get('protocol') in PROTOCOL_NAMES:
elist.append(PROTOCOL_NAMES[entry['protocol']])
else:
elist.append(entry.get('protocol'))
elist.append(entry['rule_action'])
if entry.get('cidr_block'):
elist.append(entry['cidr_block'])
elif entry.get('ipv6_cidr_block'):
elist.append(entry['ipv6_cidr_block'])
else:
elist.append(None)
elist = elist + [None, None, None, None]
if entry['protocol'] in ('1', '58'):
elist[4] = entry.get('icmp_type_code', {}).get('type')
elist[5] = entry.get('icmp_type_code', {}).get('code')
if entry['protocol'] not in ('1', '6', '17', '58'):
elist[6] = 0
elist[7] = 65535
elif 'port_range' in entry:
elist[6] = entry['port_range']['from']
elist[7] = entry['port_range']['to']
return elist
def main():
argument_spec = dict(
nacl_ids=dict(default=[], type='list', aliases=['nacl_id']),
filters=dict(default={}, type='dict'))
module = AnsibleAWSModule(argument_spec=argument_spec, supports_check_mode=True)
if module._name == 'ec2_vpc_nacl_facts':
module.deprecate("The 'ec2_vpc_nacl_facts' module has been renamed to 'ec2_vpc_nacl_info'", version='2.13')
connection = module.client('ec2', retry_decorator=AWSRetry.jittered_backoff())
list_ec2_vpc_nacls(connection, module)
if __name__ == '__main__':
main()
|
koying/xbmc
|
refs/heads/master-krypton
|
tools/Fake Episode Maker/openAnything.py
|
169
|
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2013 Team XBMC
# http://xbmc.org
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, see
# <http://www.gnu.org/licenses/>.
#
import urllib2, urlparse, gzip
from StringIO import StringIO
USER_AGENT = 'OpenAnything/1.0 +http://diveintopython.org/http_web_services/'
class SmartRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_301(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_301(
self, req, fp, code, msg, headers)
result.status = code
return result
def http_error_302(self, req, fp, code, msg, headers):
result = urllib2.HTTPRedirectHandler.http_error_302(
self, req, fp, code, msg, headers)
result.status = code
return result
class DefaultErrorHandler(urllib2.HTTPDefaultErrorHandler):
def http_error_default(self, req, fp, code, msg, headers):
result = urllib2.HTTPError(
req.get_full_url(), code, msg, headers, fp)
result.status = code
return result
def openAnything(source, etag=None, lastmodified=None, agent=USER_AGENT):
'''URL, filename, or string --> stream
This function lets you define parsers that take any input source
(URL, pathname to local or network file, or actual data as a string)
and deal with it in a uniform manner. Returned object is guaranteed
to have all the basic stdio read methods (read, readline, readlines).
Just .close() the object when you're done with it.
If the etag argument is supplied, it will be used as the value of an
If-None-Match request header.
If the lastmodified argument is supplied, it must be a formatted
date/time string in GMT (as returned in the Last-Modified header of
a previous request). The formatted date/time will be used
as the value of an If-Modified-Since request header.
If the agent argument is supplied, it will be used as the value of a
User-Agent request header.
'''
if hasattr(source, 'read'):
return source
if source == '-':
return sys.stdin
if urlparse.urlparse(source)[0] == 'http':
# open URL with urllib2
request = urllib2.Request(source)
request.add_header('User-Agent', agent)
if etag:
request.add_header('If-None-Match', etag)
if lastmodified:
request.add_header('If-Modified-Since', lastmodified)
request.add_header('Accept-encoding', 'gzip')
opener = urllib2.build_opener(SmartRedirectHandler(), DefaultErrorHandler())
return opener.open(request)
# try to open with native open function (if source is a filename)
try:
return open(source)
except (IOError, OSError):
pass
# treat source as string
return StringIO(str(source))
def fetch(source, etag=None, last_modified=None, agent=USER_AGENT):
'''Fetch data and metadata from a URL, file, stream, or string'''
result = {}
f = openAnything(source, etag, last_modified, agent)
result['data'] = f.read()
if hasattr(f, 'headers'):
# save ETag, if the server sent one
result['etag'] = f.headers.get('ETag')
# save Last-Modified header, if the server sent one
result['lastmodified'] = f.headers.get('Last-Modified')
if f.headers.get('content-encoding', '') == 'gzip':
# data came back gzip-compressed, decompress it
result['data'] = gzip.GzipFile(fileobj=StringIO(result['data'])).read()
if hasattr(f, 'url'):
result['url'] = f.url
result['status'] = 200
if hasattr(f, 'status'):
result['status'] = f.status
f.close()
return result
|
rudimeier/numpy
|
refs/heads/master
|
numpy/polynomial/legendre.py
|
75
|
"""
Legendre Series (:mod: `numpy.polynomial.legendre`)
===================================================
.. currentmodule:: numpy.polynomial.polynomial
This module provides a number of objects (mostly functions) useful for
dealing with Legendre series, including a `Legendre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
.. autosummary::
:toctree: generated/
legdomain Legendre series default domain, [-1,1].
legzero Legendre series that evaluates identically to 0.
legone Legendre series that evaluates identically to 1.
legx Legendre series for the identity map, ``f(x) = x``.
Arithmetic
----------
.. autosummary::
:toctree: generated/
legmulx multiply a Legendre series in P_i(x) by x.
legadd add two Legendre series.
legsub subtract one Legendre series from another.
legmul multiply two Legendre series.
legdiv divide one Legendre series by another.
legpow raise a Legendre series to an positive integer power
legval evaluate a Legendre series at given points.
legval2d evaluate a 2D Legendre series at given points.
legval3d evaluate a 3D Legendre series at given points.
leggrid2d evaluate a 2D Legendre series on a Cartesian product.
leggrid3d evaluate a 3D Legendre series on a Cartesian product.
Calculus
--------
.. autosummary::
:toctree: generated/
legder differentiate a Legendre series.
legint integrate a Legendre series.
Misc Functions
--------------
.. autosummary::
:toctree: generated/
legfromroots create a Legendre series with specified roots.
legroots find the roots of a Legendre series.
legvander Vandermonde-like matrix for Legendre polynomials.
legvander2d Vandermonde-like matrix for 2D power series.
legvander3d Vandermonde-like matrix for 3D power series.
leggauss Gauss-Legendre quadrature, points and weights.
legweight Legendre weight function.
legcompanion symmetrized companion matrix in Legendre form.
legfit least-squares fit returning a Legendre series.
legtrim trim leading coefficients from a Legendre series.
legline Legendre series representing given straight line.
leg2poly convert a Legendre series to a polynomial.
poly2leg convert a polynomial to a Legendre series.
Classes
-------
Legendre A Legendre series class.
See also
--------
numpy.polynomial.polynomial
numpy.polynomial.chebyshev
numpy.polynomial.laguerre
numpy.polynomial.hermite
numpy.polynomial.hermite_e
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd',
'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder',
'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander',
'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d',
'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion',
'leggauss', 'legweight']
legtrim = pu.trimcoef
def poly2leg(pol):
"""
Convert a polynomial to a Legendre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Legendre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Legendre
series.
See Also
--------
leg2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([ 0., 1., 2., 3.], [-1., 1.])
>>> c = P.Legendre(P.poly2leg(p.coef))
>>> c
Legendre([ 1. , 3.25, 1. , 0.75], [-1., 1.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = legadd(legmulx(res), pol[i])
return res
def leg2poly(c):
"""
Convert a Legendre series to a polynomial.
Convert an array representing the coefficients of a Legendre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Legendre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2leg
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> c = P.Legendre(range(4))
>>> c
Legendre([ 0., 1., 2., 3.], [-1., 1.])
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.])
>>> P.leg2poly(range(4))
array([-1. , -3.5, 3. , 7.5])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Legendre
legdomain = np.array([-1, 1])
# Legendre coefficients representing zero.
legzero = np.array([0])
# Legendre coefficients representing one.
legone = np.array([1])
# Legendre coefficients representing the identity x.
legx = np.array([0, 1])
def legline(off, scl):
"""
Legendre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Legendre series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legline(3,2)
array([3, 2])
>>> L.legval(-3, L.legline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def legfromroots(roots):
"""
Generate a Legendre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Legendre form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Legendre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, chebfromroots, lagfromroots, hermfromroots,
hermefromroots.
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.4, 0. , 0.4])
>>> j = complex(0,1)
>>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [legline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [legmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = legmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def legadd(c1, c2):
"""
Add one Legendre series to another.
Returns the sum of two Legendre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Legendre series of their sum.
See Also
--------
legsub, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Legendre series
is a Legendre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legadd(c1,c2)
array([ 4., 4., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legsub(c1, c2):
"""
Subtract one Legendre series from another.
Returns the difference of two Legendre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their difference.
See Also
--------
legadd, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Legendre
series is a Legendre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legsub(c1,c2)
array([-2., 0., 2.])
>>> L.legsub(c2,c1) # -C.legsub(c1,c2)
array([ 2., 0., -2.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def legmulx(c):
"""Multiply a Legendre series by x.
Multiply the Legendre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Legendre
polynomials in the form
.. math::
xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
j = i + 1
k = i - 1
s = i + j
prd[j] = (c[i]*j)/s
prd[k] += (c[i]*i)/s
return prd
def legmul(c1, c2):
"""
Multiply one Legendre series by another.
Returns the product of two Legendre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their product.
See Also
--------
legadd, legsub, legdiv, legpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Legendre polynomial basis set. Thus, to express
the product as a Legendre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2)
>>> P.legmul(c1,c2) # multiplication requires "reprojection"
array([ 4.33333333, 10.4 , 11.66666667, 3.6 ])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd)
return legadd(c0, legmulx(c1))
def legdiv(c1, c2):
"""
Divide one Legendre series by another.
Returns the quotient-with-remainder of two Legendre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
quo, rem : ndarrays
Of Legendre series coefficients representing the quotient and
remainder.
See Also
--------
legadd, legsub, legmul, legpow
Notes
-----
In general, the (polynomial) division of one Legendre series by another
results in quotient and remainder terms that are not in the Legendre
polynomial basis set. Thus, to express these results as a Legendre
series, it is necessary to "reproject" the results onto the Legendre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legdiv(c1,c2) # quotient "intuitive," remainder not
(array([ 3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> L.legdiv(c2,c1) # neither "intuitive"
(array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = legmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def legpow(c, pow, maxpower=16):
"""Raise a Legendre series to a power.
Returns the Legendre series `c` raised to the power `pow`. The
arguement `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Legendre series of power.
See Also
--------
legadd, legsub, legmul, legdiv
Examples
--------
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = legmul(prd, c)
return prd
def legder(c, m=1, scl=1, axis=0):
"""
Differentiate a Legendre series.
Returns the Legendre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Legendre series of the derivative.
See Also
--------
legint
Notes
-----
In general, the result of differentiating a Legendre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3,4)
>>> L.legder(c)
array([ 6., 9., 20.])
>>> L.legder(c, 3)
array([ 60.])
>>> L.legder(c, scl=-1)
array([ -6., -9., -20.])
>>> L.legder(c, 2,-1)
array([ 9., 60.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j - 1)*c[j]
c[j - 2] += c[j]
if n > 1:
der[1] = 3*c[2]
der[0] = c[1]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Legendre series.
Returns the Legendre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Legendre series coefficient array of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
legder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3)
>>> L.legint(c)
array([ 0.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, 3)
array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02,
-1.73472348e-18, 1.90476190e-02, 9.52380952e-03])
>>> L.legint(c, k=3)
array([ 3.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, lbnd=-2)
array([ 7.33333333, 0.4 , 0.66666667, 0.6 ])
>>> L.legint(c, scl=2)
array([ 0.66666667, 0.8 , 1.33333333, 1.2 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/3
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = t
tmp[j - 1] -= t
tmp[0] += k[i] - legval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def legval(x, c, tensor=True):
"""
Evaluate a Legendre series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
legval2d, leggrid2d, legval3d, leggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - (c1*(nd - 1))/nd
c1 = tmp + (c1*x*(2*nd - 1))/nd
return c0 + c1*x
def legval2d(x, y, c):
"""
Evaluate a 2-D Legendre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Legendre series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
legval, leggrid2d, legval3d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
return c
def leggrid2d(x, y, c):
"""
Evaluate a 2-D Legendre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in `c[i,j]`. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
legval, legval2d, legval3d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = legval(x, c)
c = legval(y, c)
return c
def legval3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
legval, legval2d, leggrid2d, leggrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = legval(x, c)
c = legval(y, c, tensor=False)
c = legval(z, c, tensor=False)
return c
def leggrid3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
legval, legval2d, leggrid2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
c = legval(x, c)
c = legval(y, c)
c = legval(z, c)
return c
def legvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Legendre polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and
``legval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Legendre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Legendre polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries. This is not as accurate
# as reverse recursion in this application but it is more efficient.
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
return np.rollaxis(v, 0, v.ndim)
def legvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = L_i(x) * L_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Legendre polynomials.
If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def legvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Legendre polynomials.
If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
legvander, legvander3d. legval2d, legval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = legvander(x, degx)
vy = legvander(y, degy)
vz = legvander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def legfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Legendre series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
.. versionadded:: 1.5.0
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Legendre coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, polyfit, lagfit, hermfit, hermefit
legval : Evaluates a Legendre series.
legvander : Vandermonde matrix of Legendre series.
legweight : Legendre weight function (= 1).
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Legendre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Legendre series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = legvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def legcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Legendre basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = 1./np.sqrt(2*np.arange(n) + 1)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n]
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1))
return mat
def legroots(c):
"""
Compute the roots of a Legendre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, chebroots, lagroots, hermroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such values.
Roots with multiplicity greater than 1 will also show larger errors as
the value of the series near such points is relatively insensitive to
errors in the roots. Isolated roots near the origin can be improved by
a few iterations of Newton's method.
The Legendre series basis polynomials aren't powers of ``x`` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.legendre as leg
>>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots
array([-0.85099543, -0.11407192, 0.51506735])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = legcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def leggauss(deg):
"""
Gauss-Legendre quadrature.
Computes the sample points and weights for Gauss-Legendre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = legcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = legval(x, c)
df = legval(x, legder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = legval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Legendre we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= 2. / w.sum()
return x, w
def legweight(x):
"""
Weight function of the Legendre polynomials.
The weight function is :math:`1` and the interval of integration is
:math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = x*0.0 + 1.0
return w
#
# Legendre series class
#
class Legendre(ABCPolyBase):
"""A Legendre series class.
The Legendre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
Legendre coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(legadd)
_sub = staticmethod(legsub)
_mul = staticmethod(legmul)
_div = staticmethod(legdiv)
_pow = staticmethod(legpow)
_val = staticmethod(legval)
_int = staticmethod(legint)
_der = staticmethod(legder)
_fit = staticmethod(legfit)
_line = staticmethod(legline)
_roots = staticmethod(legroots)
_fromroots = staticmethod(legfromroots)
# Virtual properties
nickname = 'leg'
domain = np.array(legdomain)
window = np.array(legdomain)
|
gcallah/Indra
|
refs/heads/master
|
registry/tests/test_registry.py
|
1
|
"""
This is the test suite for registry.py.
"""
from unittest import TestCase, main
from registry.registry import Registry, REGISTRY, get_prop
from registry.registry import set_env, get_env, add_group, get_group
TEST_VAL_STR = "test_val"
TEST_VAL = 1
class RegisteryTestCase(TestCase):
def setUp(self):
self.reg = Registry()
self.reg[TEST_VAL_STR] = TEST_VAL
def tearDown(self):
self.reg = None
def test_get_item(self):
self.assertEqual(TEST_VAL, self.reg[TEST_VAL_STR])
def test_get_prop_no_pa(self):
"""
No prop args set up... can we still get prop ok?
"""
prop_val = get_prop("key", "default")
self.assertEqual(prop_val, "default")
def test_get_env(self):
"""
We don't need to use "real" Env: just make sure get returns
what set put in!
"""
set_env("Test")
env = get_env()
self.assertEqual(env, "Test")
def test_get_group(self):
"""
Don't worry about using a "real" group: just make sure we
get what was added!
"""
add_group("Group name", "Test")
grp = get_group("Group name")
self.assertEqual(grp, "Test")
|
BT-jmichaud/sale-workflow
|
refs/heads/8.0
|
sale_quotation_sourcing/tests/test_consistent_route.py
|
35
|
# Author: Leonardo Pistone
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from openerp.tests.common import TransactionCase
class TestConsistentRoute(TransactionCase):
def test_dropshipping_route_purchase_to_customer_passes(self):
self.sale_line.route_id = self.dropship
self.po.location_id.usage = 'customer'
self.assertIs(True, self.sale_line.has_consistent_route())
def test_dropshipping_route_purchase_to_internal_fails(self):
self.sale_line.route_id = self.dropship
self.po.location_id.usage = 'internal'
self.assertIs(False, self.sale_line.has_consistent_route())
def test_mto_route_purchase_to_customer_fails(self):
self.sale_line.route_id = self.mto
self.po.location_id.usage = 'customer'
self.assertIs(False, self.sale_line.has_consistent_route())
def test_mto_route_purchase_to_internal_passes(self):
self.sale_line.route_id = self.mto
self.po.location_id.usage = 'internal'
self.assertIs(True, self.sale_line.has_consistent_route())
def test_no_route_passes(self):
self.assertIs(True, self.sale_line.has_consistent_route())
def setUp(self):
super(TestConsistentRoute, self).setUp()
self.dropship = self.env.ref('stock_dropshipping.route_drop_shipping')
self.mto = self.env.ref('stock.route_warehouse0_mto')
self.po = self.env['purchase.order'].new({
'location_id': self.env['stock.location'].new(),
})
self.purchase_line = self.env['purchase.order.line'].new({
'order_id': self.po,
})
self.sale_line = self.env['sale.order.line'].new({
'sourced_by': self.purchase_line,
})
|
prabhucomo/symfonytask
|
refs/heads/master
|
vendor/doctrine/orm/docs/en/conf.py
|
2448
|
# -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
defionscode/ansible
|
refs/heads/devel
|
lib/ansible/plugins/connection/paramiko_ssh.py
|
19
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Ansible Core Team
connection: paramiko
short_description: Run tasks via python ssh (paramiko)
description:
- Use the python ssh implementation (Paramiko) to connect to targets
- The paramiko transport is provided because many distributions, in particular EL6 and before do not support ControlPersist
in their SSH implementations.
- This is needed on the Ansible control machine to be reasonably efficient with connections.
Thus paramiko is faster for most users on these platforms.
Users with ControlPersist capability can consider using -c ssh or configuring the transport in the configuration file.
- This plugin also borrows a lot of settings from the ssh plugin as they both cover the same protocol.
version_added: "0.1"
options:
remote_addr:
description:
- Address of the remote target
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_ssh_host
- name: ansible_paramiko_host
remote_user:
description:
- User to login/authenticate as
- Can be set from the CLI via the C(--user) or C(-u) options.
vars:
- name: ansible_user
- name: ansible_ssh_user
- name: ansible_paramiko_user
env:
- name: ANSIBLE_REMOTE_USER
- name: ANSIBLE_PARAMIKO_REMOTE_USER
version_added: '2.5'
ini:
- section: defaults
key: remote_user
- section: paramiko_connection
key: remote_user
version_added: '2.5'
password:
description:
- Secret used to either login the ssh server or as a passphrase for ssh keys that require it
- Can be set from the CLI via the C(--ask-pass) option.
vars:
- name: ansible_password
- name: ansible_ssh_pass
- name: ansible_paramiko_pass
version_added: '2.5'
host_key_auto_add:
description: 'TODO: write it'
env: [{name: ANSIBLE_PARAMIKO_HOST_KEY_AUTO_ADD}]
ini:
- {key: host_key_auto_add, section: paramiko_connection}
type: boolean
look_for_keys:
default: True
description: 'TODO: write it'
env: [{name: ANSIBLE_PARAMIKO_LOOK_FOR_KEYS}]
ini:
- {key: look_for_keys, section: paramiko_connection}
type: boolean
proxy_command:
default: ''
description:
- Proxy information for running the connection via a jumphost
- Also this plugin will scan 'ssh_args', 'ssh_extra_args' and 'ssh_common_args' from the 'ssh' plugin settings for proxy information if set.
env: [{name: ANSIBLE_PARAMIKO_PROXY_COMMAND}]
ini:
- {key: proxy_command, section: paramiko_connection}
pty:
default: True
description: 'TODO: write it'
env:
- name: ANSIBLE_PARAMIKO_PTY
ini:
- section: paramiko_connection
key: pty
type: boolean
record_host_keys:
default: True
description: 'TODO: write it'
env: [{name: ANSIBLE_PARAMIKO_RECORD_HOST_KEYS}]
ini:
- section: paramiko_connection
key: record_host_keys
type: boolean
host_key_checking:
description: 'Set this to "False" if you want to avoid host key checking by the underlying tools Ansible uses to connect to the host'
type: boolean
default: True
env:
- name: ANSIBLE_HOST_KEY_CHECKING
- name: ANSIBLE_SSH_HOST_KEY_CHECKING
version_added: '2.5'
- name: ANSIBLE_PARAMIKO_HOST_KEY_CHECKING
version_added: '2.5'
ini:
- section: defaults
key: host_key_checking
- section: paramiko_connection
key: host_key_checking
version_added: '2.5'
vars:
- name: ansible_host_key_checking
version_added: '2.5'
- name: ansible_ssh_host_key_checking
version_added: '2.5'
- name: ansible_paramiko_host_key_checking
version_added: '2.5'
use_persistent_connections:
description: 'Toggles the use of persistence for connections'
type: boolean
default: False
env:
- name: ANSIBLE_USE_PERSISTENT_CONNECTIONS
ini:
- section: defaults
key: use_persistent_connections
# TODO:
#timeout=self._play_context.timeout,
"""
import warnings
import os
import socket
import tempfile
import traceback
import fcntl
import sys
import re
from termios import tcflush, TCIFLUSH
from binascii import hexlify
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.module_utils.six import iteritems
from ansible.module_utils.six.moves import input
from ansible.plugins.connection import ConnectionBase
from ansible.utils.path import makedirs_safe
from ansible.module_utils._text import to_bytes, to_native
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
AUTHENTICITY_MSG = """
paramiko: The authenticity of host '%s' can't be established.
The %s key fingerprint is %s.
Are you sure you want to continue connecting (yes/no)?
"""
# SSH Options Regex
SETTINGS_REGEX = re.compile(r'(\w+)(?:\s*=\s*|\s+)(.+)')
# prevent paramiko warning noise -- see http://stackoverflow.com/questions/3920502/
HAVE_PARAMIKO = False
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
import paramiko
HAVE_PARAMIKO = True
except ImportError:
pass
class MyAddPolicy(object):
"""
Based on AutoAddPolicy in paramiko so we can determine when keys are added
and also prompt for input.
Policy for automatically adding the hostname and new host key to the
local L{HostKeys} object, and saving it. This is used by L{SSHClient}.
"""
def __init__(self, new_stdin, connection):
self._new_stdin = new_stdin
self.connection = connection
self._options = connection._options
def missing_host_key(self, client, hostname, key):
if all((self._options['host_key_checking'], not self._options['host_key_auto_add'])):
fingerprint = hexlify(key.get_fingerprint())
ktype = key.get_name()
if self.connection.get_option('use_persistent_connections') or self.connection.force_persistence:
# don't print the prompt string since the user cannot respond
# to the question anyway
raise AnsibleError(AUTHENTICITY_MSG[1:92] % (hostname, ktype, fingerprint))
self.connection.connection_lock()
old_stdin = sys.stdin
sys.stdin = self._new_stdin
# clear out any premature input on sys.stdin
tcflush(sys.stdin, TCIFLUSH)
inp = input(AUTHENTICITY_MSG % (hostname, ktype, fingerprint))
sys.stdin = old_stdin
self.connection.connection_unlock()
if inp not in ['yes', 'y', '']:
raise AnsibleError("host connection rejected by user")
key._added_by_ansible_this_time = True
# existing implementation below:
client._host_keys.add(hostname, key.get_name(), key)
# host keys are actually saved in close() function below
# in order to control ordering.
# keep connection objects on a per host basis to avoid repeated attempts to reconnect
SSH_CONNECTION_CACHE = {}
SFTP_CONNECTION_CACHE = {}
class Connection(ConnectionBase):
''' SSH based connections with Paramiko '''
transport = 'paramiko'
_log_channel = None
def _cache_key(self):
return "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
def _connect(self):
cache_key = self._cache_key()
if cache_key in SSH_CONNECTION_CACHE:
self.ssh = SSH_CONNECTION_CACHE[cache_key]
else:
self.ssh = SSH_CONNECTION_CACHE[cache_key] = self._connect_uncached()
return self
def _set_log_channel(self, name):
'''Mimic paramiko.SSHClient.set_log_channel'''
self._log_channel = name
def _parse_proxy_command(self, port=22):
proxy_command = None
# Parse ansible_ssh_common_args, specifically looking for ProxyCommand
ssh_args = [
getattr(self._play_context, 'ssh_extra_args', '') or '',
getattr(self._play_context, 'ssh_common_args', '') or '',
getattr(self._play_context, 'ssh_args', '') or '',
]
if ssh_args is not None:
args = self._split_ssh_args(' '.join(ssh_args))
for i, arg in enumerate(args):
if arg.lower() == 'proxycommand':
# _split_ssh_args split ProxyCommand from the command itself
proxy_command = args[i + 1]
else:
# ProxyCommand and the command itself are a single string
match = SETTINGS_REGEX.match(arg)
if match:
if match.group(1).lower() == 'proxycommand':
proxy_command = match.group(2)
if proxy_command:
break
proxy_command = proxy_command or self.get_option('proxy_command')
sock_kwarg = {}
if proxy_command:
replacers = {
'%h': self._play_context.remote_addr,
'%p': port,
'%r': self._play_context.remote_user
}
for find, replace in replacers.items():
proxy_command = proxy_command.replace(find, str(replace))
try:
sock_kwarg = {'sock': paramiko.ProxyCommand(proxy_command)}
display.vvv("CONFIGURE PROXY COMMAND FOR CONNECTION: %s" % proxy_command, host=self._play_context.remote_addr)
except AttributeError:
display.warning('Paramiko ProxyCommand support unavailable. '
'Please upgrade to Paramiko 1.9.0 or newer. '
'Not using configured ProxyCommand')
return sock_kwarg
def _connect_uncached(self):
''' activates the connection object '''
if not HAVE_PARAMIKO:
raise AnsibleError("paramiko is not installed")
port = self._play_context.port or 22
display.vvv("ESTABLISH CONNECTION FOR USER: %s on PORT %s TO %s" % (self._play_context.remote_user, port, self._play_context.remote_addr),
host=self._play_context.remote_addr)
ssh = paramiko.SSHClient()
# override paramiko's default logger name
if self._log_channel is not None:
ssh.set_log_channel(self._log_channel)
self.keyfile = os.path.expanduser("~/.ssh/known_hosts")
if self.get_option('host_key_checking'):
for ssh_known_hosts in ("/etc/ssh/ssh_known_hosts", "/etc/openssh/ssh_known_hosts"):
try:
# TODO: check if we need to look at several possible locations, possible for loop
ssh.load_system_host_keys(ssh_known_hosts)
break
except IOError:
pass # file was not found, but not required to function
ssh.load_system_host_keys()
sock_kwarg = self._parse_proxy_command(port)
ssh.set_missing_host_key_policy(MyAddPolicy(self._new_stdin, self))
allow_agent = True
if self._play_context.password is not None:
allow_agent = False
try:
key_filename = None
if self._play_context.private_key_file:
key_filename = os.path.expanduser(self._play_context.private_key_file)
ssh.connect(
self._play_context.remote_addr.lower(),
username=self._play_context.remote_user,
allow_agent=allow_agent,
look_for_keys=self.get_option('look_for_keys'),
key_filename=key_filename,
password=self._play_context.password,
timeout=self._play_context.timeout,
port=port,
**sock_kwarg
)
except paramiko.ssh_exception.BadHostKeyException as e:
raise AnsibleConnectionFailure('host key mismatch for %s' % e.hostname)
except Exception as e:
msg = str(e)
if "PID check failed" in msg:
raise AnsibleError("paramiko version issue, please upgrade paramiko on the machine running ansible")
elif "Private key file is encrypted" in msg:
msg = 'ssh %s@%s:%s : %s\nTo connect as a different user, use -u <username>.' % (
self._play_context.remote_user, self._play_context.remote_addr, port, msg)
raise AnsibleConnectionFailure(msg)
else:
raise AnsibleConnectionFailure(msg)
return ssh
def exec_command(self, cmd, in_data=None, sudoable=True):
''' run a command on the remote host '''
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
if in_data:
raise AnsibleError("Internal Error: this module does not support optimized module pipelining")
bufsize = 4096
try:
self.ssh.get_transport().set_keepalive(5)
chan = self.ssh.get_transport().open_session()
except Exception as e:
msg = "Failed to open session"
if len(str(e)) > 0:
msg += ": %s" % str(e)
raise AnsibleConnectionFailure(msg)
# sudo usually requires a PTY (cf. requiretty option), therefore
# we give it one by default (pty=True in ansible.cfg), and we try
# to initialise from the calling environment when sudoable is enabled
if self.get_option('pty') and sudoable:
chan.get_pty(term=os.getenv('TERM', 'vt100'), width=int(os.getenv('COLUMNS', 0)), height=int(os.getenv('LINES', 0)))
display.vvv("EXEC %s" % cmd, host=self._play_context.remote_addr)
cmd = to_bytes(cmd, errors='surrogate_or_strict')
no_prompt_out = b''
no_prompt_err = b''
become_output = b''
try:
chan.exec_command(cmd)
if self._play_context.prompt:
passprompt = False
become_sucess = False
while not (become_sucess or passprompt):
display.debug('Waiting for Privilege Escalation input')
chunk = chan.recv(bufsize)
display.debug("chunk is: %s" % chunk)
if not chunk:
if b'unknown user' in become_output:
raise AnsibleError('user %s does not exist' % self._play_context.become_user)
else:
break
# raise AnsibleError('ssh connection closed waiting for password prompt')
become_output += chunk
# need to check every line because we might get lectured
# and we might get the middle of a line in a chunk
for l in become_output.splitlines(True):
if self.check_become_success(l):
become_sucess = True
break
elif self.check_password_prompt(l):
passprompt = True
break
if passprompt:
if self._play_context.become and self._play_context.become_pass:
chan.sendall(to_bytes(self._play_context.become_pass) + b'\n')
else:
raise AnsibleError("A password is required but none was supplied")
else:
no_prompt_out += become_output
no_prompt_err += become_output
except socket.timeout:
raise AnsibleError('ssh timed out waiting for privilege escalation.\n' + become_output)
stdout = b''.join(chan.makefile('rb', bufsize))
stderr = b''.join(chan.makefile_stderr('rb', bufsize))
return (chan.recv_exit_status(), no_prompt_out + stdout, no_prompt_out + stderr)
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
super(Connection, self).put_file(in_path, out_path)
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("file or module does not exist: %s" % in_path)
try:
self.sftp = self.ssh.open_sftp()
except Exception as e:
raise AnsibleError("failed to open a SFTP connection (%s)" % e)
try:
self.sftp.put(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
except IOError:
raise AnsibleError("failed to transfer file to %s" % out_path)
def _connect_sftp(self):
cache_key = "%s__%s__" % (self._play_context.remote_addr, self._play_context.remote_user)
if cache_key in SFTP_CONNECTION_CACHE:
return SFTP_CONNECTION_CACHE[cache_key]
else:
result = SFTP_CONNECTION_CACHE[cache_key] = self._connect().ssh.open_sftp()
return result
def fetch_file(self, in_path, out_path):
''' save a remote file to the specified path '''
super(Connection, self).fetch_file(in_path, out_path)
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
try:
self.sftp = self._connect_sftp()
except Exception as e:
raise AnsibleError("failed to open a SFTP connection (%s)" % to_native(e))
try:
self.sftp.get(to_bytes(in_path, errors='surrogate_or_strict'), to_bytes(out_path, errors='surrogate_or_strict'))
except IOError:
raise AnsibleError("failed to transfer file from %s" % in_path)
def _any_keys_added(self):
for hostname, keys in iteritems(self.ssh._host_keys):
for keytype, key in iteritems(keys):
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if added_this_time:
return True
return False
def _save_ssh_host_keys(self, filename):
'''
not using the paramiko save_ssh_host_keys function as we want to add new SSH keys at the bottom so folks
don't complain about it :)
'''
if not self._any_keys_added():
return False
path = os.path.expanduser("~/.ssh")
makedirs_safe(path)
f = open(filename, 'w')
for hostname, keys in iteritems(self.ssh._host_keys):
for keytype, key in iteritems(keys):
# was f.write
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if not added_this_time:
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
for hostname, keys in iteritems(self.ssh._host_keys):
for keytype, key in iteritems(keys):
added_this_time = getattr(key, '_added_by_ansible_this_time', False)
if added_this_time:
f.write("%s %s %s\n" % (hostname, keytype, key.get_base64()))
f.close()
def close(self):
''' terminate the connection '''
cache_key = self._cache_key()
SSH_CONNECTION_CACHE.pop(cache_key, None)
SFTP_CONNECTION_CACHE.pop(cache_key, None)
if hasattr(self, 'sftp'):
if self.sftp is not None:
self.sftp.close()
if self.get_option('host_key_checking') and self.get_option('record_host_keys') and self._any_keys_added():
# add any new SSH host keys -- warning -- this could be slow
# (This doesn't acquire the connection lock because it needs
# to exclude only other known_hosts writers, not connections
# that are starting up.)
lockfile = self.keyfile.replace("known_hosts", ".known_hosts.lock")
dirname = os.path.dirname(self.keyfile)
makedirs_safe(dirname)
KEY_LOCK = open(lockfile, 'w')
fcntl.lockf(KEY_LOCK, fcntl.LOCK_EX)
try:
# just in case any were added recently
self.ssh.load_system_host_keys()
self.ssh._host_keys.update(self.ssh._system_host_keys)
# gather information about the current key file, so
# we can ensure the new file has the correct mode/owner
key_dir = os.path.dirname(self.keyfile)
if os.path.exists(self.keyfile):
key_stat = os.stat(self.keyfile)
mode = key_stat.st_mode
uid = key_stat.st_uid
gid = key_stat.st_gid
else:
mode = 33188
uid = os.getuid()
gid = os.getgid()
# Save the new keys to a temporary file and move it into place
# rather than rewriting the file. We set delete=False because
# the file will be moved into place rather than cleaned up.
tmp_keyfile = tempfile.NamedTemporaryFile(dir=key_dir, delete=False)
os.chmod(tmp_keyfile.name, mode & 0o7777)
os.chown(tmp_keyfile.name, uid, gid)
self._save_ssh_host_keys(tmp_keyfile.name)
tmp_keyfile.close()
os.rename(tmp_keyfile.name, self.keyfile)
except:
# unable to save keys, including scenario when key was invalid
# and caught earlier
traceback.print_exc()
fcntl.lockf(KEY_LOCK, fcntl.LOCK_UN)
self.ssh.close()
|
mbedmicro/pyOCD
|
refs/heads/master
|
test/unit/test_memory_map.py
|
3
|
# pyOCD debugger
# Copyright (c) 2016-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import logging
import copy
from pyocd.core.memory_map import (
MemoryType,
check_range,
MemoryRange,
MemoryRegion,
MemoryMap,
FlashRegion,
RomRegion,
RamRegion,
DeviceRegion,
DefaultFlashWeights
)
@pytest.fixture(scope='function')
def flash():
return FlashRegion(start=0, length=1*1024, blocksize=0x100, name='flash', is_boot_memory=True)
@pytest.fixture(scope='function')
def rom():
return RomRegion(start=0x1c000000, length=16*1024, name='rom')
@pytest.fixture(scope='function')
def ram1():
return RamRegion(start=0x20000000, length=1*1024, name='ram', is_default=False)
@pytest.fixture(scope='function')
def ram2():
return RamRegion(start=0x20000400, length=1*1024, name='ram2', is_cacheable=False)
@pytest.fixture(scope='function')
def ram_alias():
return RamRegion(start=0x30000400, length=1*1024, name='ram2_alias', alias='ram2', is_cacheable=False)
@pytest.fixture(scope='function')
def memmap(flash, rom, ram1, ram2):
return MemoryMap(flash, rom, ram1, ram2)
@pytest.fixture(scope='function')
def memmap2(flash, rom, ram1, ram2, ram_alias):
return MemoryMap(flash, rom, ram1, ram2, ram_alias)
class TestCheckRange:
def test_1(self):
assert check_range(0, end=0x1ff) == (0, 0x1ff)
def test_2(self):
assert check_range(0, length=0x200) == (0, 0x1ff)
def test_3(self):
with pytest.raises(AssertionError):
check_range(None, end=100)
def test_4(self):
with pytest.raises(AssertionError):
check_range(0x100, end=None)
def test_5(self):
with pytest.raises(AssertionError):
check_range(0x100, length=None)
class TestRange:
def test_empty_range_1(self):
range = MemoryRange(start=0x1000, length=0)
assert range.start == 0x1000
assert range.end == 0xfff
assert range.length == 0
def test_empty_range_2(self):
range = MemoryRange(start=0x1000, end=0xfff)
assert range.start == 0x1000
assert range.end == 0xfff
assert range.length == 0
def test_eq(self):
assert MemoryRange(0, length=1000) == MemoryRange(0, length=1000)
def test_lt(self):
assert MemoryRange(0, length=1000) < MemoryRange(1000, length=1000)
def test_gt(self):
assert MemoryRange(1000, length=1000) > MemoryRange(0, length=1000)
def test_sort(self, ram1, ram2, flash, rom):
regionList = [ram2, rom, flash, ram1]
sortedRegionList = sorted(regionList)
assert sortedRegionList == [flash, rom, ram1, ram2]
def test_inplace_sort(self, ram1, ram2, flash, rom):
regionList = [ram2, rom, flash, ram1]
regionList.sort()
assert regionList == [flash, rom, ram1, ram2]
class TestHash:
def test_range_neq(self):
a = MemoryRange(0, 0x1000)
b = MemoryRange(10, 20)
assert hash(a) != hash(b)
def test_range_eq(self):
a = MemoryRange(0, 0x1000)
b = MemoryRange(0, 0x1000)
assert hash(a) == hash(b)
def test_range_w_region_neq(self, ram1, rom):
a = MemoryRange(0, 0x1000, region=ram1)
b = MemoryRange(0x5000, length=200, region=rom)
assert hash(a) != hash(b)
a = MemoryRange(0, 0x1000, region=ram1)
b = MemoryRange(0, 0x1000, region=rom)
assert hash(a) != hash(b)
def test_range_eq(self, ram1, rom):
a = MemoryRange(0, 0x1000, region=ram1)
b = MemoryRange(0, 0x1000, region=ram1)
assert hash(a) == hash(b)
# MemoryRegion test cases.
class TestMemoryRegion:
def test_empty_region_1(self):
with pytest.raises(AssertionError):
rgn = MemoryRegion(start=0x1000, length=0)
def test_empty_region_2(self):
with pytest.raises(AssertionError):
rgn = MemoryRegion(start=0x1000, end=0xfff)
def test_default_name(self):
rgn = RamRegion(start=0x1000, end=0x1fff)
assert rgn.name == 'ram'
rgn = RomRegion(start=0x1000, end=0x1fff)
assert rgn.name == 'rom'
rgn = FlashRegion(start=0x1000, end=0x1fff, blocksize=256)
assert rgn.name == 'flash'
rgn = DeviceRegion(start=0x1000, end=0x1fff)
assert rgn.name == 'device'
def test_block_sector_size(self):
rgn = FlashRegion(start=0x1000, end=0x1fff, blocksize=256)
assert rgn.blocksize == 256
assert rgn.sector_size == 256
rgn = FlashRegion(start=0x1000, end=0x1fff, sector_size=256)
assert rgn.blocksize == 256
assert rgn.sector_size == 256
def test_flash_attrs(self, flash):
assert flash.type == MemoryType.FLASH
assert flash.start == 0
assert flash.end == 0x3ff
assert flash.length == 0x400
assert flash.blocksize == 0x100
assert flash.page_size == 0x100
assert flash.phrase_size == 0x100
assert flash.erase_all_weight == DefaultFlashWeights.ERASE_ALL_WEIGHT
assert flash.erase_sector_weight == DefaultFlashWeights.ERASE_SECTOR_WEIGHT
assert flash.program_page_weight == DefaultFlashWeights.PROGRAM_PAGE_WEIGHT
assert flash.name == 'flash'
assert flash.is_flash
assert not flash.is_ram
assert not flash.is_rom
assert flash.is_boot_memory
assert flash.is_cacheable
assert flash.is_powered_on_boot
assert flash.is_readable
assert not flash.is_writable
assert flash.is_executable
def test_custom_flash_attrs(self):
flash = FlashRegion(start=0x01000000, length=4*1024, blocksize=0x800, name='myflash',
is_boot_memory=False, page_size=256, phrase_size=4,
erase_all_weight=1.2, erase_sector_weight=0.1, program_page_weight=0.25)
assert flash.type == MemoryType.FLASH
assert flash.start == 0x01000000
assert flash.end == 0x01000fff
assert flash.length == 0x1000
assert flash.blocksize == 0x800
assert flash.page_size == 256
assert flash.phrase_size == 4
assert flash.erase_all_weight == 1.2
assert flash.erase_sector_weight == 0.1
assert flash.program_page_weight == 0.25
assert flash.name == 'myflash'
assert flash.is_flash
assert not flash.is_ram
assert not flash.is_rom
assert not flash.is_boot_memory
assert flash.is_cacheable
assert flash.is_powered_on_boot
assert flash.is_readable
assert not flash.is_writable
assert flash.is_executable
def test_rom_attrs(self, rom):
assert rom.type == MemoryType.ROM
assert rom.start == 0x1c000000
assert rom.end == 0x1c003fff
assert rom.length == 0x4000
with pytest.raises(AttributeError):
rom.blocksize
assert rom.name == 'rom'
assert not rom.is_flash
assert not rom.is_ram
assert rom.is_rom
assert not rom.is_boot_memory
assert rom.is_cacheable
assert rom.is_powered_on_boot
assert rom.is_readable
assert not rom.is_writable
assert rom.is_executable
def test_ram1_attrs(self, ram1):
assert ram1.type == MemoryType.RAM
assert ram1.start == 0x20000000
assert ram1.end == 0x200003ff
assert ram1.length == 0x400
with pytest.raises(AttributeError):
ram1.blocksize
assert ram1.name == 'ram'
assert not ram1.is_flash
assert ram1.is_ram
assert not ram1.is_rom
assert not ram1.is_boot_memory
assert ram1.is_cacheable
assert ram1.is_powered_on_boot
assert ram1.is_readable
assert ram1.is_writable
assert ram1.is_executable
def test_ram2_attrs(self, ram2):
assert ram2.type == MemoryType.RAM
assert ram2.start == 0x20000400
assert ram2.end == 0x200007ff
assert ram2.length == 0x400
with pytest.raises(AttributeError):
ram2.blocksize
assert ram2.name == 'ram2'
assert not ram2.is_flash
assert ram2.is_ram
assert not ram2.is_rom
assert not ram2.is_boot_memory
assert not ram2.is_cacheable
assert ram2.is_powered_on_boot
assert ram2.is_readable
assert ram2.is_writable
assert ram2.is_executable
def test_flash_range(self, flash):
assert flash.contains_address(0)
assert flash.contains_address(0x3ff)
assert not flash.contains_address(0x400)
assert flash.contains_range(0, length=0x400)
assert flash.contains_range(0, end=0x3ff)
assert flash.contains_range(0x100, length=0x100)
assert not flash.contains_range(0x300, end=0x720)
assert flash.intersects_range(0, length=0x100)
assert flash.intersects_range(0x300, end=0x720)
def test_intersects(self, ram1):
assert not ram1.intersects_range(0, length=10)
assert not ram1.intersects_range(0xf0000000, end=0xffffffff)
assert ram1.intersects_range(0x100000, end=0x20000010)
assert ram1.intersects_range(0x20000010, end=0x30000000)
assert ram1.intersects_range(0x20000040, length=0x1000)
assert ram1.intersects_range(0x20000020, end=0x20000030)
assert ram1.intersects_range(0x20000020, length=0x10)
assert ram1.intersects_range(0x1fff0000, end=0x20001000)
assert ram1.intersects_range(0x1ffff000, length=0x40000)
def test_copy_ram(self, ram1):
ramcpy = copy.copy(ram1)
assert ramcpy.type == ram1.type
assert ramcpy.start == ram1.start
assert ramcpy.end == ram1.end
assert ramcpy.length == ram1.length
assert ramcpy.name == ram1.name
assert ramcpy == ram1
def test_copy_flash(self, flash):
flashcpy = copy.copy(flash)
assert flashcpy == flash
def test_eq(self, flash, ram1):
assert flash != ram1
a = RamRegion(name='a', start=0x1000, length=0x2000)
b = RamRegion(name='a', start=0x1000, length=0x2000)
assert a == b
# MemoryMap test cases.
class TestMemoryMap:
def test_empty_map(self):
memmap = MemoryMap()
assert memmap.region_count == 0
assert memmap.regions == []
assert memmap.get_boot_memory() is None
assert memmap.get_region_for_address(0x1000) is None
assert not memmap.is_valid_address(0x2000)
assert memmap.get_contained_regions(0, end=0xffffffff) == []
assert memmap.get_intersecting_regions(0, end=0xffffffff) == []
def test_regions(self, memmap):
rgns = memmap.regions
# Count
assert len(rgns) == 4
assert memmap.region_count == 4
# Sorted order
assert rgns[0].start < rgns[1].start and rgns[1].start < rgns[2].start and rgns[2].start < rgns[3].start
def test_boot_mem(self, memmap):
bootmem = memmap.get_boot_memory()
assert bootmem is not None
assert bootmem.name == 'flash'
assert bootmem.start == 0
assert bootmem.end == 0x3ff
assert bootmem.is_boot_memory == True
def test_rgn_for_addr(self, memmap):
assert memmap.get_region_for_address(0).name == 'flash'
assert memmap.get_region_for_address(0x20000000).name == 'ram'
assert memmap.get_region_for_address(0x20000500).name == 'ram2'
def test_valid(self, memmap):
assert memmap.is_valid_address(0)
assert memmap.is_valid_address(0x200)
assert memmap.is_valid_address(0x3ff)
assert not memmap.is_valid_address(0x400)
assert not memmap.is_valid_address(0x1bffffff)
assert memmap.is_valid_address(0x1c000000)
assert not memmap.is_valid_address(0x1fffffff)
assert memmap.is_valid_address(0x20000000)
assert memmap.is_valid_address(0x20000001)
assert memmap.is_valid_address(0x200003ff)
assert memmap.is_valid_address(0x20000400)
assert memmap.is_valid_address(0x200007ff)
assert not memmap.is_valid_address(0x20000800)
def test_contained_1(self, memmap):
rgns = memmap.get_contained_regions(0, 0x100)
assert len(rgns) == 0
def test_contained_2(self, memmap):
rgns = memmap.get_contained_regions(0x20000000, 0x20000600)
assert len(rgns) == 1
def test_intersect_1(self, memmap):
rgns = memmap.get_intersecting_regions(0, 0x100)
assert len(rgns) == 1
def test_intersect_2(self, memmap):
rgns = memmap.get_intersecting_regions(0x20000200, end=0x20000700)
assert len(rgns) == 2
def test_x(self):
ramrgn = RamRegion(name='core0 ram', start=0x1fffa000, length=0x18000)
assert ramrgn.contains_range(0x1fffc9f8, end=0x1fffc9fc)
assert ramrgn.intersects_range(0x1fffc9f8, end=0x1fffc9fc)
dualMap = MemoryMap(
FlashRegion(name='flash', start=0, length=0x80000, blocksize=0x800, is_boot_memory=True),
RomRegion(name='core1 imem alias', start=0x1d200000, length=0x40000),
ramrgn,
RomRegion(name='core1 imem', start=0x2d200000, length=0x40000),
RamRegion(name='core1 dmem', start=0x2d300000, length=0x8000),
RamRegion(name='usb ram', start=0x40100000, length=0x800)
)
rgns = dualMap.get_intersecting_regions(0x1fffc9f8, end=0x1fffc9fc)
assert len(rgns) > 0
def test_get_type_iter(self, memmap, flash, rom, ram1, ram2):
assert list(memmap.iter_matching_regions(type=MemoryType.FLASH)) == [flash]
assert list(memmap.iter_matching_regions(type=MemoryType.ROM)) == [rom]
assert list(memmap.iter_matching_regions(type=MemoryType.RAM)) == [ram1, ram2]
def test_match_iter(self, memmap, flash, ram1, ram2, ram_alias):
assert list(memmap.iter_matching_regions(blocksize=0x100)) == [flash]
assert list(memmap.iter_matching_regions(start=0x20000000)) == [ram1]
def test_first_match(self, memmap, flash, ram2):
assert memmap.get_first_matching_region(blocksize=0x100) == flash
assert memmap.get_first_matching_region(length=1024, is_cacheable=False) == ram2
def test_alias(self, memmap2, ram2, ram_alias):
assert ram_alias.alias is ram2
def test_get_default(self, memmap, flash, ram2):
assert memmap.get_default_region_of_type(MemoryType.FLASH) == flash
assert memmap.get_default_region_of_type(MemoryType.RAM) == ram2
def test_index_by_num(self, memmap, flash, ram2):
assert memmap[0] == flash
assert memmap[3] == ram2
def test_index_by_name(self, memmap, rom, ram2):
assert memmap['rom'] == rom
assert memmap['ram2'] == ram2
def test_clone(self, memmap):
mapcpy = memmap.clone()
assert id(mapcpy) != id(memmap)
assert id(mapcpy.get_first_matching_region(type=MemoryType.RAM)) != \
id(memmap.get_first_matching_region(type=MemoryType.RAM))
assert mapcpy == memmap
|
Addepar/buck
|
refs/heads/master
|
python-dsl/buck_parser/buck_test.py
|
5
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
import shutil
import tempfile
import unittest
from pathlib import Path, PurePosixPath, PureWindowsPath
from .buck import (
BuildFileContext,
LazyBuildEnvPartial,
flatten_dicts,
get_mismatched_args,
host_info,
subdir_glob,
)
from .glob_internal import glob_internal, path_component_starts_with_dot
from .glob_watchman import format_watchman_query_params
class FakePathMixin(object):
def glob(self, pattern):
# Python glob supports unix paths on windows out of the box
norm_pattern = pattern.replace("\\", "/")
return self.glob_results.get(norm_pattern)
def is_file(self):
return True
class FakePosixPath(FakePathMixin, PurePosixPath):
pass
class FakeWindowsPath(FakePathMixin, PureWindowsPath):
pass
def fake_path(fake_path_class, path, glob_results={}):
# Path does magic in __new__ with its args; it's hard to add more without
# changing that class. So we use a wrapper function to diddle with
# FakePath's members.
result = fake_path_class(path)
result.glob_results = {}
for pattern, paths in glob_results.items():
result.glob_results[pattern] = [result / fake_path_class(p) for p in paths]
return result
class TestBuckPlatform(unittest.TestCase):
def test_lazy_build_env_partial(self):
def cobol_binary(name, deps=[], build_env=None):
return (name, deps, build_env)
testLazy = LazyBuildEnvPartial(cobol_binary)
testLazy.build_env = {}
self.assertEqual(
("HAL", [1, 2, 3], {}), testLazy.invoke(name="HAL", deps=[1, 2, 3])
)
testLazy.build_env = {"abc": 789}
self.assertEqual(
("HAL", [1, 2, 3], {"abc": 789}),
testLazy.invoke(name="HAL", deps=[1, 2, 3]),
)
class TestBuckGlobMixin(object):
def do_glob(self, *args, **kwargs):
# subclasses can override this to test a different glob implementation
return glob_internal(*args, **kwargs)
def test_glob_includes_simple(self):
search_base = self.fake_path(
"foo", glob_results={"*.java": ["A.java", "B.java"]}
)
self.assertGlobMatches(
["A.java", "B.java"],
self.do_glob(
includes=["*.java"],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_includes_sort(self):
search_base = self.fake_path(
"foo",
glob_results={"*.java": ["A.java", "E.java", "D.java", "C.java", "B.java"]},
)
self.assertGlobMatches(
["A.java", "B.java", "C.java", "D.java", "E.java"],
self.do_glob(
includes=["*.java"],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_includes_multi(self):
search_base = self.fake_path(
"foo",
glob_results={
"bar/*.java": ["bar/A.java", "bar/B.java"],
"baz/*.java": ["baz/C.java", "baz/D.java"],
},
)
self.assertGlobMatches(
["bar/A.java", "bar/B.java", "baz/C.java", "baz/D.java"],
self.do_glob(
includes=["bar/*.java", "baz/*.java"],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_excludes_double_star(self):
search_base = self.fake_path(
"foo", glob_results={"**/*.java": ["A.java", "B.java", "Test.java"]}
)
self.assertGlobMatches(
["A.java", "B.java"],
self.do_glob(
includes=["**/*.java"],
excludes=["**/*Test.java"],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_excludes_multi(self):
search_base = self.fake_path(
"foo",
glob_results={
"bar/*.java": ["bar/A.java", "bar/B.java"],
"baz/*.java": ["baz/C.java", "baz/D.java"],
},
)
self.assertGlobMatches(
["bar/B.java", "baz/D.java"],
self.do_glob(
includes=["bar/*.java", "baz/*.java"],
excludes=["*/[AC].java"],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_excludes_relative(self):
search_base = self.fake_path(
"foo",
glob_results={"**/*.java": ["foo/A.java", "foo/bar/B.java", "bar/C.java"]},
)
self.assertGlobMatches(
["foo/A.java", "foo/bar/B.java"],
self.do_glob(
includes=["**/*.java"],
excludes=["bar/*.java"],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_project_root_relative_excludes_relative(self):
search_base = self.fake_path(
"foo",
glob_results={"**/*.java": ["foo/A.java", "foo/bar/B.java", "bar/C.java"]},
)
self.assertGlobMatches(
["bar/C.java"],
self.do_glob(
includes=["**/*.java"],
excludes=[],
project_root_relative_excludes=["foo/foo/**"],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_includes_skips_dotfiles(self):
search_base = self.fake_path(
"foo", glob_results={"*.java": ["A.java", ".B.java"]}
)
self.assertGlobMatches(
["A.java"],
self.do_glob(
includes=["*.java"],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_includes_skips_dot_directories(self):
search_base = self.fake_path(
"foo", glob_results={"*.java": ["A.java", ".test/B.java"]}
)
self.assertGlobMatches(
["A.java"],
self.do_glob(
includes=["*.java"],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
def test_glob_includes_does_not_skip_dotfiles_if_include_dotfiles(self):
search_base = self.fake_path(
"foo", glob_results={"*.java": ["A.java", ".B.java"]}
)
self.assertGlobMatches(
[".B.java", "A.java"],
self.do_glob(
includes=["*.java"],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=True,
search_base=search_base,
project_root=".",
),
)
def test_explicit_exclude_with_file_separator_excludes(self):
search_base = self.fake_path(
"foo",
glob_results={"java/**/*.java": ["java/Include.java", "java/Exclude.java"]},
)
self.assertGlobMatches(
["java/Include.java"],
self.do_glob(
includes=["java/**/*.java"],
excludes=["java/Exclude.java"],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=search_base,
project_root=".",
),
)
class TestBuckSubdirGlobMixin(object):
def do_subdir_glob(self, *args, **kwargs):
# subclasses can override this to test a different glob implementation
return subdir_glob(*args, **kwargs)
def test_subdir_glob(self):
build_env = BuildFileContext(
self.fake_path(""),
None,
"BUCK",
None,
None,
None,
[],
None,
None,
None,
None,
False,
False,
{},
)
search_base = self.fake_path(
"foo",
glob_results={
"lib/bar/*.h": ["lib/bar/A.h", "lib/bar/B.h"],
"lib/baz/*.h": ["lib/baz/C.h", "lib/baz/D.h"],
},
)
self.assertGlobMatches(
{
"bar/B.h": "lib/bar/B.h",
"bar/A.h": "lib/bar/A.h",
"baz/D.h": "lib/baz/D.h",
"baz/C.h": "lib/baz/C.h",
},
self.do_subdir_glob(
[("lib", "bar/*.h"), ("lib", "baz/*.h")],
build_env=build_env,
search_base=search_base,
),
)
def test_subdir_glob_with_prefix(self):
build_env = BuildFileContext(
self.fake_path(""),
None,
"BUCK",
None,
None,
None,
[],
None,
None,
None,
None,
False,
False,
{},
)
search_base = self.fake_path(
"foo", glob_results={"lib/bar/*.h": ["lib/bar/A.h", "lib/bar/B.h"]}
)
self.assertGlobMatches(
{"Prefix/bar/B.h": "lib/bar/B.h", "Prefix/bar/A.h": "lib/bar/A.h"},
self.do_subdir_glob(
[("lib", "bar/*.h")],
prefix="Prefix",
build_env=build_env,
search_base=search_base,
),
)
class TestBuckPosix(TestBuckGlobMixin, TestBuckSubdirGlobMixin, unittest.TestCase):
@staticmethod
def fake_path(*args, **kwargs):
return fake_path(FakePosixPath, *args, **kwargs)
def assertGlobMatches(self, expected, actual):
self.assertEqual(expected, actual)
class TestBuckWindows(TestBuckGlobMixin, TestBuckSubdirGlobMixin, unittest.TestCase):
@staticmethod
def fake_path(*args, **kwargs):
return fake_path(FakeWindowsPath, *args, **kwargs)
def assertGlobMatches(self, expected, actual):
# Fix the path separator to make test writing easier
fixed_expected = None
if isinstance(expected, list):
fixed_expected = []
for path in expected:
fixed_expected.append(path.replace("/", "\\"))
else:
fixed_expected = {}
for key, value in expected.items():
fixed_expected.update(
{key.replace("/", "\\"): value.replace("/", "\\")}
)
self.assertEqual(fixed_expected, actual)
class TestBuck(unittest.TestCase):
def test_glob_double_star_integration(self):
d = tempfile.mkdtemp()
try:
subdir = os.path.join(d, "b", "a", "c", "a")
os.makedirs(subdir)
f = open(os.path.join(subdir, "A.java"), "w")
f.close()
f = open(os.path.join(subdir, "B.java"), "w")
f.close()
f = open(os.path.join(subdir, "Test.java"), "w")
f.close()
f = open(os.path.join(subdir, ".tmp.java"), "w")
f.close()
os.makedirs(os.path.join(subdir, "NotAFile.java"))
self.assertEquals(
[
os.path.join("b", "a", "c", "a", "A.java"),
os.path.join("b", "a", "c", "a", "B.java"),
],
glob_internal(
includes=["b/a/**/*.java"],
excludes=["**/*Test.java"],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=Path(d),
project_root=Path(d),
),
)
finally:
shutil.rmtree(d)
def test_glob_star_does_not_zero_match_dotfile(self):
# Verify the behavior of the "*." pattern. Note that in the shell,
# "*." does not match dotfiles by default:
#
# $ ls ~/*.gitconfig
# ls: /Users/mbolin/*.gitconfig: No such file or directory
#
# By comparison, glob() from the pathlib code in Python 3 will:
#
# >>> list(pathlib.Path(os.getenv('HOME')).glob('*.gitconfig'))
# [PosixPath('/Users/mbolin/.gitconfig')]
#
# Buck should follow what the shell does here. Note this must also
# hold true when Watchman is used to implement glob().
d = tempfile.mkdtemp()
try:
a_subdir = os.path.join(d, "a")
os.makedirs(a_subdir)
f = open(os.path.join(a_subdir, ".project.toml"), "w")
f.close()
f = open(os.path.join(a_subdir, "..project.toml"), "w")
f.close()
f = open(os.path.join(a_subdir, ".foo.project.toml"), "w")
f.close()
f = open(os.path.join(a_subdir, "Buck.project.toml"), "w")
f.close()
b_subdir = os.path.join(d, "b")
os.makedirs(b_subdir)
f = open(os.path.join(b_subdir, "B.project.toml"), "w")
f.close()
f = open(os.path.join(b_subdir, "B..project.toml"), "w")
f.close()
f = open(os.path.join(b_subdir, "Buck.project.toml"), "w")
f.close()
def do_glob(pattern, include_dotfiles):
return glob_internal(
includes=[pattern],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=include_dotfiles,
search_base=Path(d),
project_root=Path(d),
)
# Note that if include_dotfiles=False, the "*" in "*." will never
# do a zero-length match or match a sequence that starts with "."
# because "*." appears at the start of a path component boundary.
self.assertEquals(
[os.path.join("a", "Buck.project.toml")],
do_glob("a/*.project.toml", include_dotfiles=False),
)
self.assertEquals(
[
os.path.join("a", "..project.toml"),
os.path.join("a", ".foo.project.toml"),
os.path.join("a", ".project.toml"),
os.path.join("a", "Buck.project.toml"),
],
do_glob("a/*.project.toml", include_dotfiles=True),
)
# Note that "*." behaves differently if it is not at the start of a
# path component boundary.
self.assertEquals(
[
os.path.join("b", "B..project.toml"),
os.path.join("b", "B.project.toml"),
os.path.join("b", "Buck.project.toml"),
],
do_glob("b/B*.project.toml", include_dotfiles=False),
)
self.assertEquals(
[
os.path.join("b", "B..project.toml"),
os.path.join("b", "B.project.toml"),
os.path.join("b", "Buck.project.toml"),
],
do_glob("b/B*.project.toml", include_dotfiles=True),
)
finally:
shutil.rmtree(d)
def test_case_preserved(self):
d = tempfile.mkdtemp()
try:
subdir = os.path.join(d, "java")
os.makedirs(subdir)
open(os.path.join(subdir, "Main.java"), "w").close()
self.assertEquals(
[os.path.join("java", "Main.java")],
glob_internal(
includes=["java/Main.java"],
excludes=[],
project_root_relative_excludes=[],
include_dotfiles=False,
search_base=Path(d),
project_root=Path(d),
),
)
finally:
shutil.rmtree(d)
def test_watchman_query_params_includes(self):
query_params = format_watchman_query_params(
["**/*.java"], [], False, "/path/to/glob", False
)
self.assertEquals(
{
"relative_root": "/path/to/glob",
"path": [""],
"fields": ["name"],
"expression": [
"allof",
["anyof", ["type", "f"], ["type", "l"]],
"exists",
["anyof", ["match", "**/*.java", "wholename", {}]],
],
},
query_params,
)
def test_watchman_query_params_includes_and_excludes(self):
query_params = format_watchman_query_params(
["**/*.java"], ["**/*Test.java"], False, "/path/to/glob", False
)
self.assertEquals(
{
"relative_root": "/path/to/glob",
"path": [""],
"fields": ["name"],
"expression": [
"allof",
["anyof", ["type", "f"], ["type", "l"]],
["not", ["anyof", ["match", "**/*Test.java", "wholename", {}]]],
"exists",
["anyof", ["match", "**/*.java", "wholename", {}]],
],
},
query_params,
)
def test_watchman_query_params_glob_generator(self):
query_params = format_watchman_query_params(
["**/*.java"], ["**/*Test.java"], False, "/path/to/glob", True
)
self.assertEquals(
{
"relative_root": "/path/to/glob",
"glob": ["**/*.java"],
"glob_includedotfiles": False,
"fields": ["name"],
"expression": [
"allof",
["anyof", ["type", "f"], ["type", "l"]],
["not", ["anyof", ["match", "**/*Test.java", "wholename", {}]]],
],
},
query_params,
)
def test_flatten_dicts_overrides_earlier_keys_with_later_ones(self):
base = {"a": "foo", "b": "bar"}
override = {"a": "baz"}
override2 = {"a": 42, "c": "new"}
self.assertEquals({"a": "baz", "b": "bar"}, flatten_dicts(base, override))
self.assertEquals(
{"a": 42, "b": "bar", "c": "new"}, flatten_dicts(base, override, override2)
)
# assert none of the input dicts were changed:
self.assertEquals({"a": "foo", "b": "bar"}, base)
self.assertEquals({"a": "baz"}, override)
self.assertEquals({"a": 42, "c": "new"}, override2)
def test_path_component_starts_with_dot(self):
self.assertFalse(path_component_starts_with_dot(Path("")))
self.assertFalse(path_component_starts_with_dot(Path("foo")))
self.assertFalse(path_component_starts_with_dot(Path("foo/bar")))
self.assertTrue(path_component_starts_with_dot(Path(".foo/bar")))
self.assertTrue(path_component_starts_with_dot(Path("foo/.bar")))
self.assertTrue(path_component_starts_with_dot(Path(".foo/.bar")))
class TestHostInfo(unittest.TestCase):
def test_returns_correct_os(self):
test_data = {
"Darwin": "is_macos",
"Windows": "is_windows",
"Linux": "is_linux",
"FreeBSD": "is_freebsd",
"blarg": "is_unknown",
"unknown": "is_unknown",
}
for platform_value, expected_true_field in test_data.items():
struct = host_info(
platform_system=lambda: platform_value,
platform_machine=lambda: "x86_64",
)
self.validate_host_info_struct(
struct, "os", expected_true_field, "platform.system", platform_value
)
def test_returns_correct_arch(self):
test_data = {
"aarch64": "is_aarch64",
"arm": "is_arm",
"armeb": "is_armeb",
"i386": "is_i386",
"mips": "is_mips",
"mips64": "is_mips64",
"mipsel": "is_mipsel",
"mipsel64": "is_mipsel64",
"powerpc": "is_powerpc",
"ppc64": "is_ppc64",
"unknown": "is_unknown",
"blarg": "is_unknown",
"x86_64": "is_x86_64",
"amd64": "is_x86_64",
"arm64": "is_aarch64",
}
for platform_value, expected_true_field in test_data.items():
struct = host_info(
platform_system=lambda: "Darwin",
platform_machine=lambda: platform_value,
)
self.validate_host_info_struct(
struct, "arch", expected_true_field, "platform.machine", platform_value
)
def validate_host_info_struct(
self, struct, top_level, true_key, platform_func, platform_value
):
top_level_struct = getattr(struct, top_level)
for field in top_level_struct._fields:
if field == true_key:
continue
self.assertFalse(
getattr(top_level_struct, field),
"Expected {}.{} to be false in {} with {} returning "
"value {}".format(
top_level, field, struct, platform_func, platform_value
),
)
self.assertTrue(
getattr(top_level_struct, true_key),
"Expected {}.{} to be false in {} with {} returning "
"value {}".format(
top_level, true_key, struct, platform_func, platform_value
),
)
class TestMemoized(unittest.TestCase):
def _makeone(self, func, *args, **kwargs):
from .util import memoized
return memoized(*args, **kwargs)(func)
def test_cache_none(self):
decorated = self._makeone(lambda _retval=iter([None, "foo"]): next(_retval))
uncached = decorated()
cached = decorated()
self.assertEqual(uncached, cached)
self.assertTrue(cached is None)
def test_no_deepcopy(self):
decorated = self._makeone(lambda: [], deepcopy=False)
initial = decorated()
cached = decorated()
self.assertTrue(initial is cached)
def test_deepcopy(self):
decorated = self._makeone(lambda: [{}])
initial = decorated()
cached = decorated()
self.assertTrue(initial is not cached)
initial[0]["foo"] = "bar"
self.assertTrue(cached[0] == {})
def test_cachekey(self):
decorated = self._makeone(
# note that in Python 2 without hash randomisation, 'bar' and 'baz' will collide in
# a small dictionary, as their hash keys differ by 8.
lambda foo, bar="baz", baz="bar", _retval=itertools.count(): next(_retval)
)
initial = decorated(42, baz="spam", bar="eggs")
cached = decorated(42, bar="eggs", baz="spam")
different_keyword_values = decorated(42, bar="eric", baz="idle")
self.assertEqual(initial, cached)
self.assertNotEqual(initial, different_keyword_values)
def test_custom_cachekey(self):
decorated = self._makeone(
lambda foo, bar="baz", _retval=itertools.count(): next(_retval),
keyfunc=lambda foo, **kwargs: foo,
)
initial = decorated(42, bar="spam")
cached = decorated(42, bar="ignored")
different_foo = decorated(81, bar="spam")
self.assertEqual(initial, cached)
self.assertNotEqual(initial, different_foo)
def test_missing_foo(self):
def fn(foo, bar=1, baz=None):
pass
missing, extra = get_mismatched_args(fn, [], {})
self.assertEqual(missing, ["foo"])
self.assertEqual(extra, [])
def test_extra_kwargs(self):
def fn(foo, bar=1, baz=None):
pass
missing, extra = get_mismatched_args(
fn, [], {"parrot": "dead", "trout": "slapped"}
)
self.assertEqual(missing, ["foo"])
self.assertEqual(extra, ["parrot", "trout"])
def test_foo_as_kwarg(self):
def fn(foo, bar=1, baz=None):
pass
missing, extra = get_mismatched_args(fn, [], {"foo": "value"})
self.assertEqual(missing, [])
self.assertEqual(extra, [])
if __name__ == "__main__":
unittest.main()
|
rabbitt/artifactory
|
refs/heads/master
|
artifactory/exceptions.py
|
1
|
from .utils import export
@export
class ArtifactoryError(Exception):
pass
@export
class ImmutableConfigError(ArtifactoryError):
pass
|
tianon/hy
|
refs/heads/master
|
hy/lex/__init__.py
|
8
|
# Copyright (c) 2013 Paul Tagliamonte <paultag@debian.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from rply.errors import LexingError
from hy.lex.exceptions import LexException, PrematureEndOfInput # NOQA
from hy.lex.lexer import lexer
from hy.lex.parser import parser
def tokenize(buf):
"""
Tokenize a Lisp file or string buffer into internal Hy objects.
"""
try:
return parser.parse(lexer.lex(buf))
except LexingError as e:
pos = e.getsourcepos()
raise LexException("Could not identify the next token.",
pos.lineno, pos.colno)
|
sysalexis/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/test_asyncio/test_tasks.py
|
60
|
"""Tests for tasks.py."""
import re
import sys
import types
import unittest
import weakref
from test import support
from test.script_helper import assert_python_ok
from unittest import mock
import asyncio
from asyncio import coroutines
from asyncio import test_utils
PY34 = (sys.version_info >= (3, 4))
PY35 = (sys.version_info >= (3, 5))
@asyncio.coroutine
def coroutine_function():
pass
def format_coroutine(qualname, state, src, source_traceback, generator=False):
if generator:
state = '%s' % state
else:
state = '%s, defined' % state
if source_traceback is not None:
frame = source_traceback[-1]
return ('coro=<%s() %s at %s> created at %s:%s'
% (qualname, state, src, frame[0], frame[1]))
else:
return 'coro=<%s() %s at %s>' % (qualname, state, src)
class Dummy:
def __repr__(self):
return '<Dummy>'
def __call__(self, *args):
pass
class TaskTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_task_class(self):
@asyncio.coroutine
def notmuch():
return 'ok'
t = asyncio.Task(notmuch(), loop=self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t._loop, self.loop)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
t = asyncio.Task(notmuch(), loop=loop)
self.assertIs(t._loop, loop)
loop.run_until_complete(t)
loop.close()
def test_async_coroutine(self):
@asyncio.coroutine
def notmuch():
return 'ok'
t = asyncio.async(notmuch(), loop=self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t._loop, self.loop)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
t = asyncio.async(notmuch(), loop=loop)
self.assertIs(t._loop, loop)
loop.run_until_complete(t)
loop.close()
def test_async_future(self):
f_orig = asyncio.Future(loop=self.loop)
f_orig.set_result('ko')
f = asyncio.async(f_orig)
self.loop.run_until_complete(f)
self.assertTrue(f.done())
self.assertEqual(f.result(), 'ko')
self.assertIs(f, f_orig)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
with self.assertRaises(ValueError):
f = asyncio.async(f_orig, loop=loop)
loop.close()
f = asyncio.async(f_orig, loop=self.loop)
self.assertIs(f, f_orig)
def test_async_task(self):
@asyncio.coroutine
def notmuch():
return 'ok'
t_orig = asyncio.Task(notmuch(), loop=self.loop)
t = asyncio.async(t_orig)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t, t_orig)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
with self.assertRaises(ValueError):
t = asyncio.async(t_orig, loop=loop)
loop.close()
t = asyncio.async(t_orig, loop=self.loop)
self.assertIs(t, t_orig)
def test_async_neither(self):
with self.assertRaises(TypeError):
asyncio.async('ok')
def test_task_repr(self):
self.loop.set_debug(False)
@asyncio.coroutine
def notmuch():
yield from []
return 'abc'
# test coroutine function
self.assertEqual(notmuch.__name__, 'notmuch')
if PY35:
self.assertEqual(notmuch.__qualname__,
'TaskTests.test_task_repr.<locals>.notmuch')
self.assertEqual(notmuch.__module__, __name__)
filename, lineno = test_utils.get_function_source(notmuch)
src = "%s:%s" % (filename, lineno)
# test coroutine object
gen = notmuch()
if coroutines._DEBUG or PY35:
coro_qualname = 'TaskTests.test_task_repr.<locals>.notmuch'
else:
coro_qualname = 'notmuch'
self.assertEqual(gen.__name__, 'notmuch')
if PY35:
self.assertEqual(gen.__qualname__,
coro_qualname)
# test pending Task
t = asyncio.Task(gen, loop=self.loop)
t.add_done_callback(Dummy())
coro = format_coroutine(coro_qualname, 'running', src,
t._source_traceback, generator=True)
self.assertEqual(repr(t),
'<Task pending %s cb=[<Dummy>()]>' % coro)
# test cancelling Task
t.cancel() # Does not take immediate effect!
self.assertEqual(repr(t),
'<Task cancelling %s cb=[<Dummy>()]>' % coro)
# test cancelled Task
self.assertRaises(asyncio.CancelledError,
self.loop.run_until_complete, t)
coro = format_coroutine(coro_qualname, 'done', src,
t._source_traceback)
self.assertEqual(repr(t),
'<Task cancelled %s>' % coro)
# test finished Task
t = asyncio.Task(notmuch(), loop=self.loop)
self.loop.run_until_complete(t)
coro = format_coroutine(coro_qualname, 'done', src,
t._source_traceback)
self.assertEqual(repr(t),
"<Task finished %s result='abc'>" % coro)
def test_task_repr_coro_decorator(self):
self.loop.set_debug(False)
@asyncio.coroutine
def notmuch():
# notmuch() function doesn't use yield from: it will be wrapped by
# @coroutine decorator
return 123
# test coroutine function
self.assertEqual(notmuch.__name__, 'notmuch')
if PY35:
self.assertEqual(notmuch.__qualname__,
'TaskTests.test_task_repr_coro_decorator.<locals>.notmuch')
self.assertEqual(notmuch.__module__, __name__)
# test coroutine object
gen = notmuch()
if coroutines._DEBUG or PY35:
# On Python >= 3.5, generators now inherit the name of the
# function, as expected, and have a qualified name (__qualname__
# attribute).
coro_name = 'notmuch'
coro_qualname = 'TaskTests.test_task_repr_coro_decorator.<locals>.notmuch'
else:
# On Python < 3.5, generators inherit the name of the code, not of
# the function. See: http://bugs.python.org/issue21205
coro_name = coro_qualname = 'coro'
self.assertEqual(gen.__name__, coro_name)
if PY35:
self.assertEqual(gen.__qualname__, coro_qualname)
# test repr(CoroWrapper)
if coroutines._DEBUG:
# format the coroutine object
if coroutines._DEBUG:
filename, lineno = test_utils.get_function_source(notmuch)
frame = gen._source_traceback[-1]
coro = ('%s() running, defined at %s:%s, created at %s:%s'
% (coro_qualname, filename, lineno,
frame[0], frame[1]))
else:
code = gen.gi_code
coro = ('%s() running at %s:%s'
% (coro_qualname, code.co_filename, code.co_firstlineno))
self.assertEqual(repr(gen), '<CoroWrapper %s>' % coro)
# test pending Task
t = asyncio.Task(gen, loop=self.loop)
t.add_done_callback(Dummy())
# format the coroutine object
if coroutines._DEBUG:
src = '%s:%s' % test_utils.get_function_source(notmuch)
else:
code = gen.gi_code
src = '%s:%s' % (code.co_filename, code.co_firstlineno)
coro = format_coroutine(coro_qualname, 'running', src,
t._source_traceback,
generator=not coroutines._DEBUG)
self.assertEqual(repr(t),
'<Task pending %s cb=[<Dummy>()]>' % coro)
self.loop.run_until_complete(t)
def test_task_repr_wait_for(self):
self.loop.set_debug(False)
@asyncio.coroutine
def wait_for(fut):
return (yield from fut)
fut = asyncio.Future(loop=self.loop)
task = asyncio.Task(wait_for(fut), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertRegex(repr(task),
'<Task .* wait_for=%s>' % re.escape(repr(fut)))
fut.set_result(None)
self.loop.run_until_complete(task)
def test_task_basics(self):
@asyncio.coroutine
def outer():
a = yield from inner1()
b = yield from inner2()
return a+b
@asyncio.coroutine
def inner1():
return 42
@asyncio.coroutine
def inner2():
return 1000
t = outer()
self.assertEqual(self.loop.run_until_complete(t), 1042)
def test_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
@asyncio.coroutine
def task():
yield from asyncio.sleep(10.0, loop=loop)
return 12
t = asyncio.Task(task(), loop=loop)
loop.call_soon(t.cancel)
with self.assertRaises(asyncio.CancelledError):
loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t.cancel())
def test_cancel_yield(self):
@asyncio.coroutine
def task():
yield
yield
return 12
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop) # start coro
t.cancel()
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t.cancel())
def test_cancel_inner_future(self):
f = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def task():
yield from f
return 12
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop) # start task
f.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
self.assertTrue(f.cancelled())
self.assertTrue(t.cancelled())
def test_cancel_both_task_and_inner_future(self):
f = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def task():
yield from f
return 12
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
t.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertTrue(f.cancelled())
self.assertTrue(t.cancelled())
def test_cancel_task_catching(self):
fut1 = asyncio.Future(loop=self.loop)
fut2 = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def task():
yield from fut1
try:
yield from fut2
except asyncio.CancelledError:
return 42
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut1) # White-box test.
fut1.set_result(None)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut2) # White-box test.
t.cancel()
self.assertTrue(fut2.cancelled())
res = self.loop.run_until_complete(t)
self.assertEqual(res, 42)
self.assertFalse(t.cancelled())
def test_cancel_task_ignoring(self):
fut1 = asyncio.Future(loop=self.loop)
fut2 = asyncio.Future(loop=self.loop)
fut3 = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def task():
yield from fut1
try:
yield from fut2
except asyncio.CancelledError:
pass
res = yield from fut3
return res
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut1) # White-box test.
fut1.set_result(None)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut2) # White-box test.
t.cancel()
self.assertTrue(fut2.cancelled())
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut3) # White-box test.
fut3.set_result(42)
res = self.loop.run_until_complete(t)
self.assertEqual(res, 42)
self.assertFalse(fut3.cancelled())
self.assertFalse(t.cancelled())
def test_cancel_current_task(self):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
@asyncio.coroutine
def task():
t.cancel()
self.assertTrue(t._must_cancel) # White-box test.
# The sleep should be cancelled immediately.
yield from asyncio.sleep(100, loop=loop)
return 12
t = asyncio.Task(task(), loop=loop)
self.assertRaises(
asyncio.CancelledError, loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertFalse(t._must_cancel) # White-box test.
self.assertFalse(t.cancel())
def test_stop_while_run_in_complete(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0.1
self.assertAlmostEqual(0.2, when)
when = yield 0.1
self.assertAlmostEqual(0.3, when)
yield 0.1
loop = self.new_test_loop(gen)
x = 0
waiters = []
@asyncio.coroutine
def task():
nonlocal x
while x < 10:
waiters.append(asyncio.sleep(0.1, loop=loop))
yield from waiters[-1]
x += 1
if x == 2:
loop.stop()
t = asyncio.Task(task(), loop=loop)
with self.assertRaises(RuntimeError) as cm:
loop.run_until_complete(t)
self.assertEqual(str(cm.exception),
'Event loop stopped before Future completed.')
self.assertFalse(t.done())
self.assertEqual(x, 2)
self.assertAlmostEqual(0.3, loop.time())
# close generators
for w in waiters:
w.close()
t.cancel()
self.assertRaises(asyncio.CancelledError, loop.run_until_complete, t)
def test_wait_for(self):
def gen():
when = yield
self.assertAlmostEqual(0.2, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
when = yield 0.1
loop = self.new_test_loop(gen)
foo_running = None
@asyncio.coroutine
def foo():
nonlocal foo_running
foo_running = True
try:
yield from asyncio.sleep(0.2, loop=loop)
finally:
foo_running = False
return 'done'
fut = asyncio.Task(foo(), loop=loop)
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(fut, 0.1, loop=loop))
self.assertTrue(fut.done())
# it should have been cancelled due to the timeout
self.assertTrue(fut.cancelled())
self.assertAlmostEqual(0.1, loop.time())
self.assertEqual(foo_running, False)
def test_wait_for_blocking(self):
loop = self.new_test_loop()
@asyncio.coroutine
def coro():
return 'done'
res = loop.run_until_complete(asyncio.wait_for(coro(),
timeout=None,
loop=loop))
self.assertEqual(res, 'done')
def test_wait_for_with_global_loop(self):
def gen():
when = yield
self.assertAlmostEqual(0.2, when)
when = yield 0
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
@asyncio.coroutine
def foo():
yield from asyncio.sleep(0.2, loop=loop)
return 'done'
asyncio.set_event_loop(loop)
try:
fut = asyncio.Task(foo(), loop=loop)
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(fut, 0.01))
finally:
asyncio.set_event_loop(None)
self.assertAlmostEqual(0.01, loop.time())
self.assertTrue(fut.done())
self.assertTrue(fut.cancelled())
def test_wait_for_race_condition(self):
def gen():
yield 0.1
yield 0.1
yield 0.1
loop = self.new_test_loop(gen)
fut = asyncio.Future(loop=loop)
task = asyncio.wait_for(fut, timeout=0.2, loop=loop)
loop.call_later(0.1, fut.set_result, "ok")
res = loop.run_until_complete(task)
self.assertEqual(res, "ok")
def test_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = self.new_test_loop(gen)
a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.15, loop=loop), loop=loop)
@asyncio.coroutine
def foo():
done, pending = yield from asyncio.wait([b, a], loop=loop)
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
return 42
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertEqual(res, 42)
self.assertAlmostEqual(0.15, loop.time())
# Doing it again should take no time and exercise a different path.
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
self.assertEqual(res, 42)
def test_wait_with_global_loop(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
when = yield 0
self.assertAlmostEqual(0.015, when)
yield 0.015
loop = self.new_test_loop(gen)
a = asyncio.Task(asyncio.sleep(0.01, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.015, loop=loop), loop=loop)
@asyncio.coroutine
def foo():
done, pending = yield from asyncio.wait([b, a])
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
return 42
asyncio.set_event_loop(loop)
res = loop.run_until_complete(
asyncio.Task(foo(), loop=loop))
self.assertEqual(res, 42)
def test_wait_duplicate_coroutines(self):
@asyncio.coroutine
def coro(s):
return s
c = coro('test')
task = asyncio.Task(
asyncio.wait([c, c, coro('spam')], loop=self.loop),
loop=self.loop)
done, pending = self.loop.run_until_complete(task)
self.assertFalse(pending)
self.assertEqual(set(f.result() for f in done), {'test', 'spam'})
def test_wait_errors(self):
self.assertRaises(
ValueError, self.loop.run_until_complete,
asyncio.wait(set(), loop=self.loop))
# -1 is an invalid return_when value
sleep_coro = asyncio.sleep(10.0, loop=self.loop)
wait_coro = asyncio.wait([sleep_coro], return_when=-1, loop=self.loop)
self.assertRaises(ValueError,
self.loop.run_until_complete, wait_coro)
sleep_coro.close()
def test_wait_first_completed(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
yield 0.1
loop = self.new_test_loop(gen)
a = asyncio.Task(asyncio.sleep(10.0, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
task = asyncio.Task(
asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED,
loop=loop),
loop=loop)
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertFalse(a.done())
self.assertTrue(b.done())
self.assertIsNone(b.result())
self.assertAlmostEqual(0.1, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait_really_done(self):
# there is possibility that some tasks in the pending list
# became done but their callbacks haven't all been called yet
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
yield
yield
a = asyncio.Task(coro1(), loop=self.loop)
b = asyncio.Task(coro2(), loop=self.loop)
task = asyncio.Task(
asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED,
loop=self.loop),
loop=self.loop)
done, pending = self.loop.run_until_complete(task)
self.assertEqual({a, b}, done)
self.assertTrue(a.done())
self.assertIsNone(a.result())
self.assertTrue(b.done())
self.assertIsNone(b.result())
def test_wait_first_exception(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
# first_exception, task already has exception
a = asyncio.Task(asyncio.sleep(10.0, loop=loop), loop=loop)
@asyncio.coroutine
def exc():
raise ZeroDivisionError('err')
b = asyncio.Task(exc(), loop=loop)
task = asyncio.Task(
asyncio.wait([b, a], return_when=asyncio.FIRST_EXCEPTION,
loop=loop),
loop=loop)
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertAlmostEqual(0, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait_first_exception_in_wait(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
when = yield 0
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
# first_exception, exception during waiting
a = asyncio.Task(asyncio.sleep(10.0, loop=loop), loop=loop)
@asyncio.coroutine
def exc():
yield from asyncio.sleep(0.01, loop=loop)
raise ZeroDivisionError('err')
b = asyncio.Task(exc(), loop=loop)
task = asyncio.wait([b, a], return_when=asyncio.FIRST_EXCEPTION,
loop=loop)
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertAlmostEqual(0.01, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait_with_exception(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = self.new_test_loop(gen)
a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
@asyncio.coroutine
def sleeper():
yield from asyncio.sleep(0.15, loop=loop)
raise ZeroDivisionError('really')
b = asyncio.Task(sleeper(), loop=loop)
@asyncio.coroutine
def foo():
done, pending = yield from asyncio.wait([b, a], loop=loop)
self.assertEqual(len(done), 2)
self.assertEqual(pending, set())
errors = set(f for f in done if f.exception() is not None)
self.assertEqual(len(errors), 1)
loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
def test_wait_with_timeout(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
when = yield 0
self.assertAlmostEqual(0.11, when)
yield 0.11
loop = self.new_test_loop(gen)
a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.15, loop=loop), loop=loop)
@asyncio.coroutine
def foo():
done, pending = yield from asyncio.wait([b, a], timeout=0.11,
loop=loop)
self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b]))
loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.11, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait_concurrent_complete(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
yield 0.1
loop = self.new_test_loop(gen)
a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.15, loop=loop), loop=loop)
done, pending = loop.run_until_complete(
asyncio.wait([b, a], timeout=0.1, loop=loop))
self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b]))
self.assertAlmostEqual(0.1, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_as_completed(self):
def gen():
yield 0
yield 0
yield 0.01
yield 0
loop = self.new_test_loop(gen)
# disable "slow callback" warning
loop.slow_callback_duration = 1.0
completed = set()
time_shifted = False
@asyncio.coroutine
def sleeper(dt, x):
nonlocal time_shifted
yield from asyncio.sleep(dt, loop=loop)
completed.add(x)
if not time_shifted and 'a' in completed and 'b' in completed:
time_shifted = True
loop.advance_time(0.14)
return x
a = sleeper(0.01, 'a')
b = sleeper(0.01, 'b')
c = sleeper(0.15, 'c')
@asyncio.coroutine
def foo():
values = []
for f in asyncio.as_completed([b, c, a], loop=loop):
values.append((yield from f))
return values
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
self.assertTrue('a' in res[:2])
self.assertTrue('b' in res[:2])
self.assertEqual(res[2], 'c')
# Doing it again should take no time and exercise a different path.
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
def test_as_completed_with_timeout(self):
def gen():
yield
yield 0
yield 0
yield 0.1
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.1, 'a', loop=loop)
b = asyncio.sleep(0.15, 'b', loop=loop)
@asyncio.coroutine
def foo():
values = []
for f in asyncio.as_completed([a, b], timeout=0.12, loop=loop):
if values:
loop.advance_time(0.02)
try:
v = yield from f
values.append((1, v))
except asyncio.TimeoutError as exc:
values.append((2, exc))
return values
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertEqual(len(res), 2, res)
self.assertEqual(res[0], (1, 'a'))
self.assertEqual(res[1][0], 2)
self.assertIsInstance(res[1][1], asyncio.TimeoutError)
self.assertAlmostEqual(0.12, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_as_completed_with_unused_timeout(self):
def gen():
yield
yield 0
yield 0.01
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.01, 'a', loop=loop)
@asyncio.coroutine
def foo():
for f in asyncio.as_completed([a], timeout=1, loop=loop):
v = yield from f
self.assertEqual(v, 'a')
loop.run_until_complete(asyncio.Task(foo(), loop=loop))
def test_as_completed_reverse_wait(self):
def gen():
yield 0
yield 0.05
yield 0
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.05, 'a', loop=loop)
b = asyncio.sleep(0.10, 'b', loop=loop)
fs = {a, b}
futs = list(asyncio.as_completed(fs, loop=loop))
self.assertEqual(len(futs), 2)
x = loop.run_until_complete(futs[1])
self.assertEqual(x, 'a')
self.assertAlmostEqual(0.05, loop.time())
loop.advance_time(0.05)
y = loop.run_until_complete(futs[0])
self.assertEqual(y, 'b')
self.assertAlmostEqual(0.10, loop.time())
def test_as_completed_concurrent(self):
def gen():
when = yield
self.assertAlmostEqual(0.05, when)
when = yield 0
self.assertAlmostEqual(0.05, when)
yield 0.05
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.05, 'a', loop=loop)
b = asyncio.sleep(0.05, 'b', loop=loop)
fs = {a, b}
futs = list(asyncio.as_completed(fs, loop=loop))
self.assertEqual(len(futs), 2)
waiter = asyncio.wait(futs, loop=loop)
done, pending = loop.run_until_complete(waiter)
self.assertEqual(set(f.result() for f in done), {'a', 'b'})
def test_as_completed_duplicate_coroutines(self):
@asyncio.coroutine
def coro(s):
return s
@asyncio.coroutine
def runner():
result = []
c = coro('ham')
for f in asyncio.as_completed([c, c, coro('spam')],
loop=self.loop):
result.append((yield from f))
return result
fut = asyncio.Task(runner(), loop=self.loop)
self.loop.run_until_complete(fut)
result = fut.result()
self.assertEqual(set(result), {'ham', 'spam'})
self.assertEqual(len(result), 2)
def test_sleep(self):
def gen():
when = yield
self.assertAlmostEqual(0.05, when)
when = yield 0.05
self.assertAlmostEqual(0.1, when)
yield 0.05
loop = self.new_test_loop(gen)
@asyncio.coroutine
def sleeper(dt, arg):
yield from asyncio.sleep(dt/2, loop=loop)
res = yield from asyncio.sleep(dt/2, arg, loop=loop)
return res
t = asyncio.Task(sleeper(0.1, 'yeah'), loop=loop)
loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'yeah')
self.assertAlmostEqual(0.1, loop.time())
def test_sleep_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
t = asyncio.Task(asyncio.sleep(10.0, 'yeah', loop=loop),
loop=loop)
handle = None
orig_call_later = loop.call_later
def call_later(delay, callback, *args):
nonlocal handle
handle = orig_call_later(delay, callback, *args)
return handle
loop.call_later = call_later
test_utils.run_briefly(loop)
self.assertFalse(handle._cancelled)
t.cancel()
test_utils.run_briefly(loop)
self.assertTrue(handle._cancelled)
def test_task_cancel_sleeping_task(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(5000, when)
yield 0.1
loop = self.new_test_loop(gen)
@asyncio.coroutine
def sleep(dt):
yield from asyncio.sleep(dt, loop=loop)
@asyncio.coroutine
def doit():
sleeper = asyncio.Task(sleep(5000), loop=loop)
loop.call_later(0.1, sleeper.cancel)
try:
yield from sleeper
except asyncio.CancelledError:
return 'cancelled'
else:
return 'slept in'
doer = doit()
self.assertEqual(loop.run_until_complete(doer), 'cancelled')
self.assertAlmostEqual(0.1, loop.time())
def test_task_cancel_waiter_future(self):
fut = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def coro():
yield from fut
task = asyncio.Task(coro(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertIs(task._fut_waiter, fut)
task.cancel()
test_utils.run_briefly(self.loop)
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, task)
self.assertIsNone(task._fut_waiter)
self.assertTrue(fut.cancelled())
def test_step_in_completed_task(self):
@asyncio.coroutine
def notmuch():
return 'ko'
gen = notmuch()
task = asyncio.Task(gen, loop=self.loop)
task.set_result('ok')
self.assertRaises(AssertionError, task._step)
gen.close()
def test_step_result(self):
@asyncio.coroutine
def notmuch():
yield None
yield 1
return 'ko'
self.assertRaises(
RuntimeError, self.loop.run_until_complete, notmuch())
def test_step_result_future(self):
# If coroutine returns future, task waits on this future.
class Fut(asyncio.Future):
def __init__(self, *args, **kwds):
self.cb_added = False
super().__init__(*args, **kwds)
def add_done_callback(self, fn):
self.cb_added = True
super().add_done_callback(fn)
fut = Fut(loop=self.loop)
result = None
@asyncio.coroutine
def wait_for_future():
nonlocal result
result = yield from fut
t = asyncio.Task(wait_for_future(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertTrue(fut.cb_added)
res = object()
fut.set_result(res)
test_utils.run_briefly(self.loop)
self.assertIs(res, result)
self.assertTrue(t.done())
self.assertIsNone(t.result())
def test_step_with_baseexception(self):
@asyncio.coroutine
def notmutch():
raise BaseException()
task = asyncio.Task(notmutch(), loop=self.loop)
self.assertRaises(BaseException, task._step)
self.assertTrue(task.done())
self.assertIsInstance(task.exception(), BaseException)
def test_baseexception_during_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
@asyncio.coroutine
def sleeper():
yield from asyncio.sleep(10, loop=loop)
base_exc = BaseException()
@asyncio.coroutine
def notmutch():
try:
yield from sleeper()
except asyncio.CancelledError:
raise base_exc
task = asyncio.Task(notmutch(), loop=loop)
test_utils.run_briefly(loop)
task.cancel()
self.assertFalse(task.done())
self.assertRaises(BaseException, test_utils.run_briefly, loop)
self.assertTrue(task.done())
self.assertFalse(task.cancelled())
self.assertIs(task.exception(), base_exc)
def test_iscoroutinefunction(self):
def fn():
pass
self.assertFalse(asyncio.iscoroutinefunction(fn))
def fn1():
yield
self.assertFalse(asyncio.iscoroutinefunction(fn1))
@asyncio.coroutine
def fn2():
yield
self.assertTrue(asyncio.iscoroutinefunction(fn2))
def test_yield_vs_yield_from(self):
fut = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def wait_for_future():
yield fut
task = wait_for_future()
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(task)
self.assertFalse(fut.done())
def test_yield_vs_yield_from_generator(self):
@asyncio.coroutine
def coro():
yield
@asyncio.coroutine
def wait_for_future():
gen = coro()
try:
yield gen
finally:
gen.close()
task = wait_for_future()
self.assertRaises(
RuntimeError,
self.loop.run_until_complete, task)
def test_coroutine_non_gen_function(self):
@asyncio.coroutine
def func():
return 'test'
self.assertTrue(asyncio.iscoroutinefunction(func))
coro = func()
self.assertTrue(asyncio.iscoroutine(coro))
res = self.loop.run_until_complete(coro)
self.assertEqual(res, 'test')
def test_coroutine_non_gen_function_return_future(self):
fut = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def func():
return fut
@asyncio.coroutine
def coro():
fut.set_result('test')
t1 = asyncio.Task(func(), loop=self.loop)
t2 = asyncio.Task(coro(), loop=self.loop)
res = self.loop.run_until_complete(t1)
self.assertEqual(res, 'test')
self.assertIsNone(t2.result())
def test_current_task(self):
self.assertIsNone(asyncio.Task.current_task(loop=self.loop))
@asyncio.coroutine
def coro(loop):
self.assertTrue(asyncio.Task.current_task(loop=loop) is task)
task = asyncio.Task(coro(self.loop), loop=self.loop)
self.loop.run_until_complete(task)
self.assertIsNone(asyncio.Task.current_task(loop=self.loop))
def test_current_task_with_interleaving_tasks(self):
self.assertIsNone(asyncio.Task.current_task(loop=self.loop))
fut1 = asyncio.Future(loop=self.loop)
fut2 = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def coro1(loop):
self.assertTrue(asyncio.Task.current_task(loop=loop) is task1)
yield from fut1
self.assertTrue(asyncio.Task.current_task(loop=loop) is task1)
fut2.set_result(True)
@asyncio.coroutine
def coro2(loop):
self.assertTrue(asyncio.Task.current_task(loop=loop) is task2)
fut1.set_result(True)
yield from fut2
self.assertTrue(asyncio.Task.current_task(loop=loop) is task2)
task1 = asyncio.Task(coro1(self.loop), loop=self.loop)
task2 = asyncio.Task(coro2(self.loop), loop=self.loop)
self.loop.run_until_complete(asyncio.wait((task1, task2),
loop=self.loop))
self.assertIsNone(asyncio.Task.current_task(loop=self.loop))
# Some thorough tests for cancellation propagation through
# coroutines, tasks and wait().
def test_yield_future_passes_cancel(self):
# Cancelling outer() cancels inner() cancels waiter.
proof = 0
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
nonlocal proof
try:
yield from waiter
except asyncio.CancelledError:
proof += 1
raise
else:
self.fail('got past sleep() in inner()')
@asyncio.coroutine
def outer():
nonlocal proof
try:
yield from inner()
except asyncio.CancelledError:
proof += 100 # Expect this path.
else:
proof += 10
f = asyncio.async(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
self.loop.run_until_complete(f)
self.assertEqual(proof, 101)
self.assertTrue(waiter.cancelled())
def test_yield_wait_does_not_shield_cancel(self):
# Cancelling outer() makes wait() return early, leaves inner()
# running.
proof = 0
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
nonlocal proof
yield from waiter
proof += 1
@asyncio.coroutine
def outer():
nonlocal proof
d, p = yield from asyncio.wait([inner()], loop=self.loop)
proof += 100
f = asyncio.async(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
self.assertEqual(proof, 1)
def test_shield_result(self):
inner = asyncio.Future(loop=self.loop)
outer = asyncio.shield(inner)
inner.set_result(42)
res = self.loop.run_until_complete(outer)
self.assertEqual(res, 42)
def test_shield_exception(self):
inner = asyncio.Future(loop=self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
exc = RuntimeError('expected')
inner.set_exception(exc)
test_utils.run_briefly(self.loop)
self.assertIs(outer.exception(), exc)
def test_shield_cancel(self):
inner = asyncio.Future(loop=self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
inner.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
def test_shield_shortcut(self):
fut = asyncio.Future(loop=self.loop)
fut.set_result(42)
res = self.loop.run_until_complete(asyncio.shield(fut))
self.assertEqual(res, 42)
def test_shield_effect(self):
# Cancelling outer() does not affect inner().
proof = 0
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
nonlocal proof
yield from waiter
proof += 1
@asyncio.coroutine
def outer():
nonlocal proof
yield from asyncio.shield(inner(), loop=self.loop)
proof += 100
f = asyncio.async(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
self.assertEqual(proof, 1)
def test_shield_gather(self):
child1 = asyncio.Future(loop=self.loop)
child2 = asyncio.Future(loop=self.loop)
parent = asyncio.gather(child1, child2, loop=self.loop)
outer = asyncio.shield(parent, loop=self.loop)
test_utils.run_briefly(self.loop)
outer.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
child1.set_result(1)
child2.set_result(2)
test_utils.run_briefly(self.loop)
self.assertEqual(parent.result(), [1, 2])
def test_gather_shield(self):
child1 = asyncio.Future(loop=self.loop)
child2 = asyncio.Future(loop=self.loop)
inner1 = asyncio.shield(child1, loop=self.loop)
inner2 = asyncio.shield(child2, loop=self.loop)
parent = asyncio.gather(inner1, inner2, loop=self.loop)
test_utils.run_briefly(self.loop)
parent.cancel()
# This should cancel inner1 and inner2 but bot child1 and child2.
test_utils.run_briefly(self.loop)
self.assertIsInstance(parent.exception(), asyncio.CancelledError)
self.assertTrue(inner1.cancelled())
self.assertTrue(inner2.cancelled())
child1.set_result(1)
child2.set_result(2)
test_utils.run_briefly(self.loop)
def test_as_completed_invalid_args(self):
fut = asyncio.Future(loop=self.loop)
# as_completed() expects a list of futures, not a future instance
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.as_completed(fut, loop=self.loop))
coro = coroutine_function()
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.as_completed(coro, loop=self.loop))
coro.close()
def test_wait_invalid_args(self):
fut = asyncio.Future(loop=self.loop)
# wait() expects a list of futures, not a future instance
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.wait(fut, loop=self.loop))
coro = coroutine_function()
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.wait(coro, loop=self.loop))
coro.close()
# wait() expects at least a future
self.assertRaises(ValueError, self.loop.run_until_complete,
asyncio.wait([], loop=self.loop))
def test_corowrapper_mocks_generator(self):
def check():
# A function that asserts various things.
# Called twice, with different debug flag values.
@asyncio.coroutine
def coro():
# The actual coroutine.
self.assertTrue(gen.gi_running)
yield from fut
# A completed Future used to run the coroutine.
fut = asyncio.Future(loop=self.loop)
fut.set_result(None)
# Call the coroutine.
gen = coro()
# Check some properties.
self.assertTrue(asyncio.iscoroutine(gen))
self.assertIsInstance(gen.gi_frame, types.FrameType)
self.assertFalse(gen.gi_running)
self.assertIsInstance(gen.gi_code, types.CodeType)
# Run it.
self.loop.run_until_complete(gen)
# The frame should have changed.
self.assertIsNone(gen.gi_frame)
# Save debug flag.
old_debug = asyncio.coroutines._DEBUG
try:
# Test with debug flag cleared.
asyncio.coroutines._DEBUG = False
check()
# Test with debug flag set.
asyncio.coroutines._DEBUG = True
check()
finally:
# Restore original debug flag.
asyncio.coroutines._DEBUG = old_debug
def test_yield_from_corowrapper(self):
old_debug = asyncio.coroutines._DEBUG
asyncio.coroutines._DEBUG = True
try:
@asyncio.coroutine
def t1():
return (yield from t2())
@asyncio.coroutine
def t2():
f = asyncio.Future(loop=self.loop)
asyncio.Task(t3(f), loop=self.loop)
return (yield from f)
@asyncio.coroutine
def t3(f):
f.set_result((1, 2, 3))
task = asyncio.Task(t1(), loop=self.loop)
val = self.loop.run_until_complete(task)
self.assertEqual(val, (1, 2, 3))
finally:
asyncio.coroutines._DEBUG = old_debug
def test_yield_from_corowrapper_send(self):
def foo():
a = yield
return a
def call(arg):
cw = asyncio.coroutines.CoroWrapper(foo(), foo)
cw.send(None)
try:
cw.send(arg)
except StopIteration as ex:
return ex.args[0]
else:
raise AssertionError('StopIteration was expected')
self.assertEqual(call((1, 2)), (1, 2))
self.assertEqual(call('spam'), 'spam')
def test_corowrapper_weakref(self):
wd = weakref.WeakValueDictionary()
def foo(): yield from []
cw = asyncio.coroutines.CoroWrapper(foo(), foo)
wd['cw'] = cw # Would fail without __weakref__ slot.
cw.gen = None # Suppress warning from __del__.
@unittest.skipUnless(PY34,
'need python 3.4 or later')
def test_log_destroyed_pending_task(self):
@asyncio.coroutine
def kill_me(loop):
future = asyncio.Future(loop=loop)
yield from future
# at this point, the only reference to kill_me() task is
# the Task._wakeup() method in future._callbacks
raise Exception("code never reached")
mock_handler = mock.Mock()
self.loop.set_debug(True)
self.loop.set_exception_handler(mock_handler)
# schedule the task
coro = kill_me(self.loop)
task = asyncio.async(coro, loop=self.loop)
self.assertEqual(asyncio.Task.all_tasks(loop=self.loop), {task})
# execute the task so it waits for future
self.loop._run_once()
self.assertEqual(len(self.loop._ready), 0)
# remove the future used in kill_me(), and references to the task
del coro.gi_frame.f_locals['future']
coro = None
source_traceback = task._source_traceback
task = None
# no more reference to kill_me() task: the task is destroyed by the GC
support.gc_collect()
self.assertEqual(asyncio.Task.all_tasks(loop=self.loop), set())
mock_handler.assert_called_with(self.loop, {
'message': 'Task was destroyed but it is pending!',
'task': mock.ANY,
'source_traceback': source_traceback,
})
mock_handler.reset_mock()
@mock.patch('asyncio.coroutines.logger')
def test_coroutine_never_yielded(self, m_log):
debug = asyncio.coroutines._DEBUG
try:
asyncio.coroutines._DEBUG = True
@asyncio.coroutine
def coro_noop():
pass
finally:
asyncio.coroutines._DEBUG = debug
tb_filename = __file__
tb_lineno = sys._getframe().f_lineno + 2
# create a coroutine object but don't use it
coro_noop()
support.gc_collect()
self.assertTrue(m_log.error.called)
message = m_log.error.call_args[0][0]
func_filename, func_lineno = test_utils.get_function_source(coro_noop)
regex = (r'^<CoroWrapper %s\(\) .* at %s:%s, .*> was never yielded from\n'
r'Coroutine object created at \(most recent call last\):\n'
r'.*\n'
r' File "%s", line %s, in test_coroutine_never_yielded\n'
r' coro_noop\(\)$'
% (re.escape(coro_noop.__qualname__),
re.escape(func_filename), func_lineno,
re.escape(tb_filename), tb_lineno))
self.assertRegex(message, re.compile(regex, re.DOTALL))
def test_task_source_traceback(self):
self.loop.set_debug(True)
task = asyncio.Task(coroutine_function(), loop=self.loop)
lineno = sys._getframe().f_lineno - 1
self.assertIsInstance(task._source_traceback, list)
self.assertEqual(task._source_traceback[-1][:3],
(__file__,
lineno,
'test_task_source_traceback'))
self.loop.run_until_complete(task)
class GatherTestsBase:
def setUp(self):
self.one_loop = self.new_test_loop()
self.other_loop = self.new_test_loop()
self.set_event_loop(self.one_loop, cleanup=False)
def _run_loop(self, loop):
while loop._ready:
test_utils.run_briefly(loop)
def _check_success(self, **kwargs):
a, b, c = [asyncio.Future(loop=self.one_loop) for i in range(3)]
fut = asyncio.gather(*self.wrap_futures(a, b, c), **kwargs)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
b.set_result(1)
a.set_result(2)
self._run_loop(self.one_loop)
self.assertEqual(cb.called, False)
self.assertFalse(fut.done())
c.set_result(3)
self._run_loop(self.one_loop)
cb.assert_called_once_with(fut)
self.assertEqual(fut.result(), [2, 1, 3])
def test_success(self):
self._check_success()
self._check_success(return_exceptions=False)
def test_result_exception_success(self):
self._check_success(return_exceptions=True)
def test_one_exception(self):
a, b, c, d, e = [asyncio.Future(loop=self.one_loop) for i in range(5)]
fut = asyncio.gather(*self.wrap_futures(a, b, c, d, e))
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
exc = ZeroDivisionError()
a.set_result(1)
b.set_exception(exc)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertIs(fut.exception(), exc)
# Does nothing
c.set_result(3)
d.cancel()
e.set_exception(RuntimeError())
e.exception()
def test_return_exceptions(self):
a, b, c, d = [asyncio.Future(loop=self.one_loop) for i in range(4)]
fut = asyncio.gather(*self.wrap_futures(a, b, c, d),
return_exceptions=True)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
exc = ZeroDivisionError()
exc2 = RuntimeError()
b.set_result(1)
c.set_exception(exc)
a.set_result(3)
self._run_loop(self.one_loop)
self.assertFalse(fut.done())
d.set_exception(exc2)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertEqual(fut.result(), [3, 1, exc, exc2])
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio.coroutines',
'print(asyncio.coroutines._DEBUG)'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'False')
class FutureGatherTests(GatherTestsBase, test_utils.TestCase):
def wrap_futures(self, *futures):
return futures
def _check_empty_sequence(self, seq_or_iter):
asyncio.set_event_loop(self.one_loop)
self.addCleanup(asyncio.set_event_loop, None)
fut = asyncio.gather(*seq_or_iter)
self.assertIsInstance(fut, asyncio.Future)
self.assertIs(fut._loop, self.one_loop)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
self.assertEqual(fut.result(), [])
fut = asyncio.gather(*seq_or_iter, loop=self.other_loop)
self.assertIs(fut._loop, self.other_loop)
def test_constructor_empty_sequence(self):
self._check_empty_sequence([])
self._check_empty_sequence(())
self._check_empty_sequence(set())
self._check_empty_sequence(iter(""))
def test_constructor_heterogenous_futures(self):
fut1 = asyncio.Future(loop=self.one_loop)
fut2 = asyncio.Future(loop=self.other_loop)
with self.assertRaises(ValueError):
asyncio.gather(fut1, fut2)
with self.assertRaises(ValueError):
asyncio.gather(fut1, loop=self.other_loop)
def test_constructor_homogenous_futures(self):
children = [asyncio.Future(loop=self.other_loop) for i in range(3)]
fut = asyncio.gather(*children)
self.assertIs(fut._loop, self.other_loop)
self._run_loop(self.other_loop)
self.assertFalse(fut.done())
fut = asyncio.gather(*children, loop=self.other_loop)
self.assertIs(fut._loop, self.other_loop)
self._run_loop(self.other_loop)
self.assertFalse(fut.done())
def test_one_cancellation(self):
a, b, c, d, e = [asyncio.Future(loop=self.one_loop) for i in range(5)]
fut = asyncio.gather(a, b, c, d, e)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
a.set_result(1)
b.cancel()
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertFalse(fut.cancelled())
self.assertIsInstance(fut.exception(), asyncio.CancelledError)
# Does nothing
c.set_result(3)
d.cancel()
e.set_exception(RuntimeError())
e.exception()
def test_result_exception_one_cancellation(self):
a, b, c, d, e, f = [asyncio.Future(loop=self.one_loop)
for i in range(6)]
fut = asyncio.gather(a, b, c, d, e, f, return_exceptions=True)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
a.set_result(1)
zde = ZeroDivisionError()
b.set_exception(zde)
c.cancel()
self._run_loop(self.one_loop)
self.assertFalse(fut.done())
d.set_result(3)
e.cancel()
rte = RuntimeError()
f.set_exception(rte)
res = self.one_loop.run_until_complete(fut)
self.assertIsInstance(res[2], asyncio.CancelledError)
self.assertIsInstance(res[4], asyncio.CancelledError)
res[2] = res[4] = None
self.assertEqual(res, [1, zde, None, 3, None, rte])
cb.assert_called_once_with(fut)
class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
def setUp(self):
super().setUp()
asyncio.set_event_loop(self.one_loop)
def wrap_futures(self, *futures):
coros = []
for fut in futures:
@asyncio.coroutine
def coro(fut=fut):
return (yield from fut)
coros.append(coro())
return coros
def test_constructor_loop_selection(self):
@asyncio.coroutine
def coro():
return 'abc'
gen1 = coro()
gen2 = coro()
fut = asyncio.gather(gen1, gen2)
self.assertIs(fut._loop, self.one_loop)
self.one_loop.run_until_complete(fut)
self.set_event_loop(self.other_loop, cleanup=False)
gen3 = coro()
gen4 = coro()
fut2 = asyncio.gather(gen3, gen4, loop=self.other_loop)
self.assertIs(fut2._loop, self.other_loop)
self.other_loop.run_until_complete(fut2)
def test_duplicate_coroutines(self):
@asyncio.coroutine
def coro(s):
return s
c = coro('abc')
fut = asyncio.gather(c, c, coro('def'), c, loop=self.one_loop)
self._run_loop(self.one_loop)
self.assertEqual(fut.result(), ['abc', 'abc', 'def', 'abc'])
def test_cancellation_broadcast(self):
# Cancelling outer() cancels all children.
proof = 0
waiter = asyncio.Future(loop=self.one_loop)
@asyncio.coroutine
def inner():
nonlocal proof
yield from waiter
proof += 1
child1 = asyncio.async(inner(), loop=self.one_loop)
child2 = asyncio.async(inner(), loop=self.one_loop)
gatherer = None
@asyncio.coroutine
def outer():
nonlocal proof, gatherer
gatherer = asyncio.gather(child1, child2, loop=self.one_loop)
yield from gatherer
proof += 100
f = asyncio.async(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
self.assertTrue(f.cancel())
with self.assertRaises(asyncio.CancelledError):
self.one_loop.run_until_complete(f)
self.assertFalse(gatherer.cancel())
self.assertTrue(waiter.cancelled())
self.assertTrue(child1.cancelled())
self.assertTrue(child2.cancelled())
test_utils.run_briefly(self.one_loop)
self.assertEqual(proof, 0)
def test_exception_marking(self):
# Test for the first line marked "Mark exception retrieved."
@asyncio.coroutine
def inner(f):
yield from f
raise RuntimeError('should not be ignored')
a = asyncio.Future(loop=self.one_loop)
b = asyncio.Future(loop=self.one_loop)
@asyncio.coroutine
def outer():
yield from asyncio.gather(inner(a), inner(b), loop=self.one_loop)
f = asyncio.async(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
a.set_result(None)
test_utils.run_briefly(self.one_loop)
b.set_result(None)
test_utils.run_briefly(self.one_loop)
self.assertIsInstance(f.exception(), RuntimeError)
if __name__ == '__main__':
unittest.main()
|
home-assistant/home-assistant
|
refs/heads/dev
|
homeassistant/components/juicenet/__init__.py
|
1
|
"""The JuiceNet integration."""
from datetime import timedelta
import logging
import aiohttp
from pyjuicenet import Api, TokenError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_ACCESS_TOKEN
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import DOMAIN, JUICENET_API, JUICENET_COORDINATOR
from .device import JuiceNetApi
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["sensor", "switch"]
CONFIG_SCHEMA = vol.Schema(
vol.All(
cv.deprecated(DOMAIN),
{DOMAIN: vol.Schema({vol.Required(CONF_ACCESS_TOKEN): cv.string})},
),
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the JuiceNet component."""
conf = config.get(DOMAIN)
hass.data.setdefault(DOMAIN, {})
if not conf:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up JuiceNet from a config entry."""
config = entry.data
session = async_get_clientsession(hass)
access_token = config[CONF_ACCESS_TOKEN]
api = Api(access_token, session)
juicenet = JuiceNetApi(api)
try:
await juicenet.setup()
except TokenError as error:
_LOGGER.error("JuiceNet Error %s", error)
return False
except aiohttp.ClientError as error:
_LOGGER.error("Could not reach the JuiceNet API %s", error)
raise ConfigEntryNotReady from error
if not juicenet.devices:
_LOGGER.error("No JuiceNet devices found for this account")
return False
_LOGGER.info("%d JuiceNet device(s) found", len(juicenet.devices))
async def async_update_data():
"""Update all device states from the JuiceNet API."""
for device in juicenet.devices:
await device.update_state(True)
return True
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="JuiceNet",
update_method=async_update_data,
update_interval=timedelta(seconds=30),
)
hass.data[DOMAIN][entry.entry_id] = {
JUICENET_API: juicenet,
JUICENET_COORDINATOR: coordinator,
}
await coordinator.async_config_entry_first_refresh()
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
jcpowermac/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/panos/panos_security_rule.py
|
15
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_security_rule
short_description: Create security rule policy on PAN-OS devices or Panorama management console.
description:
- Security policies allow you to enforce rules and take action, and can be as general or specific as needed.
The policy rules are compared against the incoming traffic in sequence, and because the first rule that matches the traffic is applied,
the more specific rules must precede the more general ones.
author: "Ivan Bojer (@ivanbojer), Robert Hagen (@rnh556)"
version_added: "2.4"
requirements:
- pan-python can be obtained from PyPi U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPi U(https://pypi.python.org/pypi/pandevice)
- xmltodict can be obtained from PyPi U(https://pypi.python.org/pypi/xmltodict)
notes:
- Checkmode is not supported.
- Panorama is supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
username:
description:
- Username credentials to use for auth unless I(api_key) is set.
default: "admin"
password:
description:
- Password credentials to use for auth unless I(api_key) is set.
required: true
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
operation:
description:
- The action to be taken. Supported values are I(add)/I(update)/I(find)/I(delete).
default: 'add'
rule_name:
description:
- Name of the security rule.
required: true
rule_type:
description:
- Type of security rule (version 6.1 of PanOS and above).
default: "universal"
description:
description:
- Description for the security rule.
default: "None"
tag_name:
description:
- Administrative tags that can be added to the rule. Note, tags must be already defined.
default: "None"
source_zone:
description:
- List of source zones.
default: "any"
destination_zone:
description:
- List of destination zones.
default: "any"
source_ip:
description:
- List of source addresses.
default: "any"
source_user:
description:
- Use users to enforce policy for individual users or a group of users.
default: "any"
hip_profiles:
description: >
- If you are using GlobalProtect with host information profile (HIP) enabled, you can also base the policy
on information collected by GlobalProtect. For example, the user access level can be determined HIP that
notifies the firewall about the user's local configuration.
default: "any"
destination_ip:
description:
- List of destination addresses.
default: "any"
application:
description:
- List of applications.
default: "any"
service:
description:
- List of services.
default: "application-default"
log_start:
description:
- Whether to log at session start.
default: false
log_end:
description:
- Whether to log at session end.
default: true
action:
description:
- Action to apply once rules maches.
default: "allow"
group_profile:
description: >
- Security profile group that is already defined in the system. This property supersedes antivirus,
vulnerability, spyware, url_filtering, file_blocking, data_filtering, and wildfire_analysis properties.
default: None
antivirus:
description:
- Name of the already defined antivirus profile.
default: None
vulnerability:
description:
- Name of the already defined vulnerability profile.
default: None
spyware:
description:
- Name of the already defined spyware profile.
default: None
url_filtering:
description:
- Name of the already defined url_filtering profile.
default: None
file_blocking:
description:
- Name of the already defined file_blocking profile.
default: None
data_filtering:
description:
- Name of the already defined data_filtering profile.
default: None
wildfire_analysis:
description:
- Name of the already defined wildfire_analysis profile.
default: None
devicegroup:
description: >
- Device groups are used for the Panorama interaction with Firewall(s). The group must exists on Panorama.
If device group is not define we assume that we are contacting Firewall.
default: None
commit:
description:
- Commit configuration if changed.
default: true
'''
EXAMPLES = '''
- name: add an SSH inbound rule to devicegroup
panos_security_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'add'
rule_name: 'SSH permit'
description: 'SSH rule test'
tag_name: ['ProjectX']
source_zone: ['public']
destination_zone: ['private']
source: ['any']
source_user: ['any']
destination: ['1.1.1.1']
category: ['any']
application: ['ssh']
service: ['application-default']
hip_profiles: ['any']
action: 'allow'
devicegroup: 'Cloud Edge'
- name: add a rule to allow HTTP multimedia only from CDNs
panos_security_rule:
ip_address: '10.5.172.91'
username: 'admin'
password: 'paloalto'
operation: 'add'
rule_name: 'HTTP Multimedia'
description: 'Allow HTTP multimedia only to host at 1.1.1.1'
source_zone: ['public']
destination_zone: ['private']
source: ['any']
source_user: ['any']
destination: ['1.1.1.1']
category: ['content-delivery-networks']
application: ['http-video', 'http-audio']
service: ['service-http', 'service-https']
hip_profiles: ['any']
action: 'allow'
- name: add a more complex rule that uses security profiles
panos_security_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'add'
rule_name: 'Allow HTTP w profile'
log_start: false
log_end: true
action: 'allow'
antivirus: 'default'
vulnerability: 'default'
spyware: 'default'
url_filtering: 'default'
wildfire_analysis: 'default'
- name: delete a devicegroup security rule
panos_security_rule:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
operation: 'delete'
rule_name: 'Allow telnet'
devicegroup: 'DC Firewalls'
- name: find a specific security rule
panos_security_rule:
ip_address: '{{ ip_address }}'
password: '{{ password }}'
operation: 'find'
rule_name: 'Allow RDP to DCs'
register: result
- debug: msg='{{result.stdout_lines}}'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
import pan.xapi
from pan.xapi import PanXapiError
import pandevice
from pandevice import base
from pandevice import firewall
from pandevice import panorama
from pandevice import objects
from pandevice import policies
import xmltodict
import json
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_devicegroup(device, devicegroup):
dg_list = device.refresh_devices()
for group in dg_list:
if isinstance(group, pandevice.panorama.DeviceGroup):
if group.name == devicegroup:
return group
return False
def get_rulebase(device, devicegroup):
# Build the rulebase
if isinstance(device, pandevice.firewall.Firewall):
rulebase = pandevice.policies.Rulebase()
device.add(rulebase)
elif isinstance(device, pandevice.panorama.Panorama):
dg = panorama.DeviceGroup(devicegroup)
device.add(dg)
rulebase = policies.PreRulebase()
dg.add(rulebase)
else:
return False
policies.SecurityRule.refreshall(rulebase)
return rulebase
def find_rule(rulebase, rule_name):
# Search for the rule name
rule = rulebase.find(rule_name)
if rule:
return rule
else:
return False
def rule_is_match(propose_rule, current_rule):
match_check = ['name', 'description', 'group_profile', 'antivirus', 'vulnerability',
'spyware', 'url_filtering', 'file_blocking', 'data_filtering',
'wildfire_analysis', 'type', 'action', 'tag', 'log_start', 'log_end']
list_check = ['tozone', 'fromzone', 'source', 'source_user', 'destination', 'category',
'application', 'service', 'hip_profiles']
for check in match_check:
propose_check = getattr(propose_rule, check, None)
current_check = getattr(current_rule, check, None)
if propose_check != current_check:
return False
for check in list_check:
propose_check = getattr(propose_rule, check, [])
current_check = getattr(current_rule, check, [])
if set(propose_check) != set(current_check):
return False
return True
def create_security_rule(**kwargs):
security_rule = policies.SecurityRule(
name=kwargs['rule_name'],
description=kwargs['description'],
fromzone=kwargs['source_zone'],
source=kwargs['source_ip'],
source_user=kwargs['source_user'],
hip_profiles=kwargs['hip_profiles'],
tozone=kwargs['destination_zone'],
destination=kwargs['destination_ip'],
application=kwargs['application'],
service=kwargs['service'],
category=kwargs['category'],
log_start=kwargs['log_start'],
log_end=kwargs['log_end'],
action=kwargs['action'],
type=kwargs['rule_type']
)
if 'tag_name' in kwargs:
security_rule.tag = kwargs['tag_name']
# profile settings
if 'group_profile' in kwargs:
security_rule.group = kwargs['group_profile']
else:
if 'antivirus' in kwargs:
security_rule.virus = kwargs['antivirus']
if 'vulnerability' in kwargs:
security_rule.vulnerability = kwargs['vulnerability']
if 'spyware' in kwargs:
security_rule.spyware = kwargs['spyware']
if 'url_filtering' in kwargs:
security_rule.url_filtering = kwargs['url_filtering']
if 'file_blocking' in kwargs:
security_rule.file_blocking = kwargs['file_blocking']
if 'data_filtering' in kwargs:
security_rule.data_filtering = kwargs['data_filtering']
if 'wildfire_analysis' in kwargs:
security_rule.wildfire_analysis = kwargs['wildfire_analysis']
return security_rule
def add_rule(rulebase, sec_rule):
if rulebase:
rulebase.add(sec_rule)
sec_rule.create()
return True
else:
return False
def update_rule(rulebase, nat_rule):
if rulebase:
rulebase.add(nat_rule)
nat_rule.apply()
return True
else:
return False
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(no_log=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
operation=dict(default='add', choices=['add', 'update', 'delete', 'find']),
rule_name=dict(required=True),
description=dict(default=''),
tag_name=dict(type='list'),
destination_zone=dict(type='list', default=['any']),
source_zone=dict(type='list', default=['any']),
source_ip=dict(type='list', default=["any"]),
source_user=dict(type='list', default=['any']),
destination_ip=dict(type='list', default=["any"]),
category=dict(type='list', default=['any']),
application=dict(type='list', default=['any']),
service=dict(type='list', default=['application-default']),
hip_profiles=dict(type='list', default=['any']),
group_profile=dict(),
antivirus=dict(),
vulnerability=dict(),
spyware=dict(),
url_filtering=dict(),
file_blocking=dict(),
data_filtering=dict(),
wildfire_analysis=dict(),
log_start=dict(type='bool', default=False),
log_end=dict(type='bool', default=True),
rule_type=dict(default='universal'),
action=dict(default='allow'),
devicegroup=dict(),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']])
if not HAS_LIB:
module.fail_json(msg='Missing required libraries.')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
operation = module.params['operation']
rule_name = module.params['rule_name']
description = module.params['description']
tag_name = module.params['tag_name']
source_zone = module.params['source_zone']
source_ip = module.params['source_ip']
source_user = module.params['source_user']
hip_profiles = module.params['hip_profiles']
destination_zone = module.params['destination_zone']
destination_ip = module.params['destination_ip']
application = module.params['application']
service = module.params['service']
category = module.params['category']
log_start = module.params['log_start']
log_end = module.params['log_end']
action = module.params['action']
group_profile = module.params['group_profile']
antivirus = module.params['antivirus']
vulnerability = module.params['vulnerability']
spyware = module.params['spyware']
url_filtering = module.params['url_filtering']
file_blocking = module.params['file_blocking']
data_filtering = module.params['data_filtering']
wildfire_analysis = module.params['wildfire_analysis']
rule_type = module.params['rule_type']
devicegroup = module.params['devicegroup']
commit = module.params['commit']
# Create the device with the appropriate pandevice type
device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
# If Panorama, validate the devicegroup
dev_group = None
if devicegroup and isinstance(device, panorama.Panorama):
dev_group = get_devicegroup(device, devicegroup)
if dev_group:
device.add(dev_group)
else:
module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup)
# Get the rulebase
rulebase = get_rulebase(device, dev_group)
# Which action shall we take on the object?
if operation == "find":
# Search for the object
match = find_rule(rulebase, rule_name)
# If found, format and return the result
if match:
match_dict = xmltodict.parse(match.element_str())
module.exit_json(
stdout_lines=json.dumps(match_dict, indent=2),
msg='Rule matched'
)
else:
module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name)
elif operation == "delete":
# Search for the object
match = find_rule(rulebase, rule_name)
# If found, delete it
if match:
try:
if commit:
match.delete()
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
module.exit_json(changed=True, msg='Rule \'%s\' successfully deleted' % rule_name)
else:
module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name)
elif operation == "add":
new_rule = create_security_rule(
rule_name=rule_name,
description=description,
tag_name=tag_name,
source_zone=source_zone,
destination_zone=destination_zone,
source_ip=source_ip,
source_user=source_user,
destination_ip=destination_ip,
category=category,
application=application,
service=service,
hip_profiles=hip_profiles,
group_profile=group_profile,
antivirus=antivirus,
vulnerability=vulnerability,
spyware=spyware,
url_filtering=url_filtering,
file_blocking=file_blocking,
data_filtering=data_filtering,
wildfire_analysis=wildfire_analysis,
log_start=log_start,
log_end=log_end,
rule_type=rule_type,
action=action
)
# Search for the rule. Fail if found.
match = find_rule(rulebase, rule_name)
if match:
if rule_is_match(match, new_rule):
module.exit_json(changed=False, msg='Rule \'%s\' is already in place' % rule_name)
else:
module.fail_json(msg='Rule \'%s\' already exists. Use operation: \'update\' to change it.' % rule_name)
else:
try:
changed = add_rule(rulebase, new_rule)
if changed and commit:
device.commit(sync=True)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
module.exit_json(changed=changed, msg='Rule \'%s\' successfully added' % rule_name)
elif operation == 'update':
# Search for the rule. Update if found.
match = find_rule(rulebase, rule_name)
if match:
try:
new_rule = create_security_rule(
rule_name=rule_name,
description=description,
tag_name=tag_name,
source_zone=source_zone,
destination_zone=destination_zone,
source_ip=source_ip,
source_user=source_user,
destination_ip=destination_ip,
category=category,
application=application,
service=service,
hip_profiles=hip_profiles,
group_profile=group_profile,
antivirus=antivirus,
vulnerability=vulnerability,
spyware=spyware,
url_filtering=url_filtering,
file_blocking=file_blocking,
data_filtering=data_filtering,
wildfire_analysis=wildfire_analysis,
log_start=log_start,
log_end=log_end,
rule_type=rule_type,
action=action
)
changed = update_rule(rulebase, new_rule)
if changed and commit:
device.commit(sync=True)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
module.exit_json(changed=changed, msg='Rule \'%s\' successfully updated' % rule_name)
else:
module.fail_json(msg='Rule \'%s\' does not exist. Use operation: \'add\' to add it.' % rule_name)
if __name__ == '__main__':
main()
|
ArcherSys/ArcherSys
|
refs/heads/master
|
Lib/site-packages/construct/protocols/layer4/__init__.py
|
15
|
"""
layer 4 (transporation) protocols
"""
|
neal-rogers/is210-week-05-warmup
|
refs/heads/master
|
tests/test_smoke.py
|
245
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Smoke test for test suite."""
# Import Python libs
import unittest
class SmokeTestCase(unittest.TestCase):
"""Test cases to ensure that the test suite is operational."""
def test_true(self):
"""Tests that True is True."""
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
google/ci_edit
|
refs/heads/master
|
app/unit_test_performance.py
|
1
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from timeit import timeit
import unittest
import app.parser
class PerformanceTestCases(unittest.TestCase):
def test_array_vs_getter(self):
setup = """data = ['a'] * 100\n"""
setup += """def get(n):\n"""
setup += """ return data[n]\n"""
setup += """class B:\n"""
setup += """ def get_via_member(self, n):\n"""
setup += """ return data[n]\n"""
setup += """ def __getitem__(self, n):\n"""
setup += """ return data[n]\n"""
setup += """b = B()\n"""
a = timeit("""x = data[5]\n""", setup=setup, number=10000)
b = timeit("""x = get(5)\n""", setup=setup, number=10000)
c = timeit("""x = b.get_via_member(5)\n""", setup=setup, number=10000)
d = timeit("""x = b[5]\n""", setup=setup, number=10000)
# print("\n%s | %s %s | %s %s | %s %s" % (a, b, a/b, c, a/c, d, a/d))
# Calling a function or member is significantly slower than direct
# access.
self.assertGreater(b, a * 1.5)
self.assertGreater(c, a * 2)
self.assertGreater(d, a * 2)
def test_slice_vs_startswith(self):
if 0:
setup = """x = 'a' * 100\n"""
a = timeit("""x[:2] == " "\n""", setup=setup, number=100000)
b = timeit("""x.startswith(" ")\n""", setup=setup, number=100000)
c = timeit("""x[0] == " " and x[1] == " "\n""", setup=setup, number=100000)
# print("\na %s, b %s, c %s | %s %s" % (a, b, c, c, a/c))
# Calling a function or member is significantly slower than direct
# access.
# This check is not performing the same in Python3.
self.assertGreater(b, a * 1.7) # b is much slower.
self.assertGreater(b, c * 1.9) # b is much slower.
self.assertGreater(a, c * 0.6) # a and c are similar.
self.assertGreater(c, a * 0.4) # a and c are similar.
def test_default_parameter(self):
setup = """def with_default(a, b=None):\n"""
setup += """ if b is not None: return b\n"""
setup += """ return a*a\n"""
setup += """def without_default(a, b):\n"""
setup += """ if b is -1: return b\n"""
setup += """ return a*b\n"""
a = timeit("""with_default(5);""" * 100, setup=setup, number=10000)
b = timeit("""without_default(5, 0);""" * 100, setup=setup, number=10000)
# Assert that neither too much faster than the other
# Note: empirically, this is affected (on a MacBook Air) by whether the
# machine is running from battery or plugged in.
# It also appears to vary between Python 2 and Python 3.
self.assertGreater(a, b * 0.6)
self.assertGreater(b, a * 0.6)
def test_char_vs_ord(self):
setup = """a="apple"\n"""
a = timeit("""a[0] > "z";""" * 100, setup=setup, number=10000)
b = timeit("""ord(a[0]) > 100;""" * 100, setup=setup, number=10000)
self.assertGreater(b, a)
def test_insert1(self):
# Disabled due to running time.
if 0:
# This tests a performance assumption. If this test fails, the
# program should still work fine, but it may not run as fast as it
# could by using different assumptions.
#
# Insert into an array of strings is expected to be faster than
# insert into a contiguous buffer of similar size.
#
# This is why ci_edit uses both a self.data buffer and a
# self.lines[] array. Though splitting the data into lines is also
# expensive, see tests below.
#
# At 1,000 bytes the performance is similar.
a = timeit(
'data1 = data1[:500] + "x" + data1[500:]',
setup='data1 = "a" * 1000',
number=10000,
)
b = timeit(
'data2[5] = data2[5][:50] + "x" + data2[5][50:]',
setup='data2 = ["a" * 100] * 10',
number=10000,
)
self.assertGreater(a, b * 0.8)
self.assertLess(a, b * 4)
# At 10,000 bytes the array of strings is 1.4 to 3 times faster.
a = timeit(
'data1 = data1[:5000] + "x" + data1[5000:]',
setup='data1 = "a" * 10000',
number=10000,
)
b = timeit(
'data2[50] = data2[50][:50] + "x" + data2[50][50:]',
setup='data2 = ["a" * 100] * 100',
number=10000,
)
self.assertGreater(a, b * 1.4)
self.assertLess(a, b * 4)
# At 100,000 bytes the array of strings is 12 to 24 times faster.
a = timeit(
'data1 = data1[:50000] + "x" + data1[50000:]',
setup='data1 = "a" * 100000',
number=10000,
)
b = timeit(
'data2[500] = data2[500][:50] + "x" + data2[500][50:]',
setup='data2 = ["a" * 100] * 1000',
number=10000,
)
self.assertGreater(a, b * 12)
self.assertLess(a, b * 24)
def test_split_insert(self):
# Disabled due to running time.
if 0:
# This tests a performance assumption. If this test fails, the
# program should still work fine, but it may not run as fast as it
# could by using different assumptions.
#
# With frequent splitting the performance reverses.
for lineCount in (100, 1000, 5000):
half = lineCount // 2
a = timeit(
r"""data2 = data1.split('\n'); \
data2[%s] = data2[%s][:50] + "x" + data2[%s][50:]; \
"""
% (half, half, half),
setup=r"""data1 = ("a" * 100 + '\n') * %s""" % (lineCount,),
number=10000,
)
b = timeit(
'data1 = data1[:%s] + "x" + data1[%s:]' % (half, half),
setup=r"""data1 = ("a" * 100 + '\n') * %s""" % (lineCount,),
number=10000,
)
print("\n%9s: %s %s" % (lineCount, a, b))
self.assertGreater(a, b)
def test_split_insert_balance(self):
# Disabled due to running time.
if 0:
# This tests a performance assumption. If this test fails, the
# program should still work fine, but it may not run as fast as it
# could by using different assumptions.
#
# With 5 inserts between splits, the performance is nearly the same.
for lineCount in (100, 1000, 5000):
half = lineCount // 2
a = timeit(
r"""data2 = data1.split('\n');"""
+ (
r"""data2[%s] = data2[%s][:50] + "x" + data2[%s][50:]; \
"""
% (half, half, half)
)
* 5,
setup=r"""data1 = ("a" * 100 + '\n') * %s""" % (lineCount,),
number=10000,
)
b = timeit(
('data1 = data1[:%s] + "x" + data1[%s:]; ' % (half, half)) * 5,
setup=r"""data1 = ("a" * 100 + '\n') * %s""" % (lineCount,),
number=10000,
)
print("\n%9s: %s %s" % (lineCount, a, b))
def test_instance_vs_tuple(self):
# Disabled due to running time.
if 0:
# This tests a performance assumption. If this test fails, the
# program should still work fine, but it may not run as fast as it
# could by using different assumptions.
for lineCount in (100, 1000, 5000):
a = timeit(
r"""
a = Node()
a.foo = 5
a.bar = 'hi'
a.blah = 7
foo.append(a)
""",
setup=r"""
foo = []
class Node:
def __init__(self):
self.foo = None
self.bar = None
self.blah = None
""",
number=10000,
)
b = timeit(
r"""
a = (5, 'hi', 7)
foo.append(a)
""",
setup=r"""
foo = []
""",
number=10000,
)
print("\n%9s: %s %s" % (lineCount, a, b))
|
arenadata/ambari
|
refs/heads/branch-adh-1.6
|
ambari-server/src/main/resources/stacks/ADH/1.4/services/OOZIE/package/scripts/check_oozie_server_status.py
|
8
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons import OSConst
@OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY)
def check_oozie_server_status():
import status_params
from resource_management.libraries.functions.windows_service_utils import check_windows_service_status
check_windows_service_status(status_params.oozie_server_win_service_name)
@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT)
def check_oozie_server_status():
import status_params
from resource_management.libraries.functions.check_process_status import check_process_status
check_process_status(status_params.pid_file)
|
saurabh6790/omn-lib
|
refs/heads/master
|
webnotes/tests/test_fmt_money.py
|
34
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
import webnotes
from webnotes import _
from webnotes.utils import flt, cstr
def fmt_money(amount, precision=None):
"""
Convert to string with commas for thousands, millions etc
"""
number_format = webnotes.conn.get_default("number_format") or "#,###.##"
decimal_str, comma_str, precision = get_number_format_info(number_format)
amount = '%.*f' % (precision, flt(amount))
if amount.find('.') == -1:
decimals = ''
else:
decimals = amount.split('.')[1]
parts = []
minus = ''
if flt(amount) < 0:
minus = '-'
amount = cstr(abs(flt(amount))).split('.')[0]
if len(amount) > 3:
parts.append(amount[-3:])
amount = amount[:-3]
val = number_format=="#,##,###.##" and 2 or 3
while len(amount) > val:
parts.append(amount[-val:])
amount = amount[:-val]
parts.append(amount)
parts.reverse()
amount = comma_str.join(parts) + (precision and (decimal_str + decimals) or "")
amount = minus + amount
return amount
def get_number_format_info(format):
if format=="#.###":
return "", ".", 0
elif format=="#,###":
return "", ",", 0
elif format=="#,###.##" or format=="#,##,###.##":
return ".", ",", 2
elif format=="#.###,##":
return ",", ".", 2
elif format=="# ###.##":
return ".", " ", 2
else:
return ".", ",", 2
import unittest
class TestFmtMoney(unittest.TestCase):
def test_standard(self):
webnotes.conn.set_default("number_format", "#,###.##")
self.assertEquals(fmt_money(100), "100.00")
self.assertEquals(fmt_money(1000), "1,000.00")
self.assertEquals(fmt_money(10000), "10,000.00")
self.assertEquals(fmt_money(100000), "100,000.00")
self.assertEquals(fmt_money(1000000), "1,000,000.00")
self.assertEquals(fmt_money(10000000), "10,000,000.00")
self.assertEquals(fmt_money(100000000), "100,000,000.00")
self.assertEquals(fmt_money(1000000000), "1,000,000,000.00")
def test_negative(self):
webnotes.conn.set_default("number_format", "#,###.##")
self.assertEquals(fmt_money(-100), "-100.00")
self.assertEquals(fmt_money(-1000), "-1,000.00")
self.assertEquals(fmt_money(-10000), "-10,000.00")
self.assertEquals(fmt_money(-100000), "-100,000.00")
self.assertEquals(fmt_money(-1000000), "-1,000,000.00")
self.assertEquals(fmt_money(-10000000), "-10,000,000.00")
self.assertEquals(fmt_money(-100000000), "-100,000,000.00")
self.assertEquals(fmt_money(-1000000000), "-1,000,000,000.00")
def test_decimal(self):
webnotes.conn.set_default("number_format", "#.###,##")
self.assertEquals(fmt_money(-100), "-100,00")
self.assertEquals(fmt_money(-1000), "-1.000,00")
self.assertEquals(fmt_money(-10000), "-10.000,00")
self.assertEquals(fmt_money(-100000), "-100.000,00")
self.assertEquals(fmt_money(-1000000), "-1.000.000,00")
self.assertEquals(fmt_money(-10000000), "-10.000.000,00")
self.assertEquals(fmt_money(-100000000), "-100.000.000,00")
self.assertEquals(fmt_money(-1000000000), "-1.000.000.000,00")
def test_lacs(self):
webnotes.conn.set_default("number_format", "#,##,###.##")
self.assertEquals(fmt_money(100), "100.00")
self.assertEquals(fmt_money(1000), "1,000.00")
self.assertEquals(fmt_money(10000), "10,000.00")
self.assertEquals(fmt_money(100000), "1,00,000.00")
self.assertEquals(fmt_money(1000000), "10,00,000.00")
self.assertEquals(fmt_money(10000000), "1,00,00,000.00")
self.assertEquals(fmt_money(100000000), "10,00,00,000.00")
self.assertEquals(fmt_money(1000000000), "1,00,00,00,000.00")
def test_no_precision(self):
webnotes.conn.set_default("number_format", "#,###")
self.assertEquals(fmt_money(0.3), "0")
self.assertEquals(fmt_money(100.3), "100")
self.assertEquals(fmt_money(1000.3), "1,000")
self.assertEquals(fmt_money(10000.3), "10,000")
self.assertEquals(fmt_money(-0.3), "0")
self.assertEquals(fmt_money(-100.3), "-100")
self.assertEquals(fmt_money(-1000.3), "-1,000")
if __name__=="__main__":
webnotes.connect()
unittest.main()
|
FreescaleSemiconductor/quantum
|
refs/heads/stable/folsom
|
quantum/tests/unit/test_db_rpc_base.py
|
3
|
# Copyright (c) 2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from quantum.db import dhcp_rpc_base
class TestDhcpAugmentContext(unittest.TestCase):
def test_augment_context(self):
context = mock.Mock()
context.user = 'quantum'
context.tenant = None
context.is_admin = True
new_context = dhcp_rpc_base.augment_context(context)
self.assertEqual(new_context.user_id, context.user)
self.assertEqual(new_context.roles, ['admin'])
class TestDhcpRpcCallackMixin(unittest.TestCase):
def setUp(self):
self.context_p = mock.patch('quantum.db.dhcp_rpc_base.augment_context')
self.context_p.start()
self.plugin_p = mock.patch('quantum.manager.QuantumManager.get_plugin')
get_plugin = self.plugin_p.start()
self.plugin = mock.Mock()
get_plugin.return_value = self.plugin
self.callbacks = dhcp_rpc_base.DhcpRpcCallbackMixin()
self.log_p = mock.patch('quantum.db.dhcp_rpc_base.LOG')
self.log = self.log_p.start()
def tearDown(self):
self.log_p.stop()
self.plugin_p.stop()
self.context_p.stop()
def test_get_active_networks(self):
plugin_retval = [dict(id='a'), dict(id='b')]
self.plugin.get_networks.return_value = plugin_retval
networks = self.callbacks.get_active_networks(mock.Mock(), host='host')
self.assertEqual(networks, ['a', 'b'])
self.plugin.assert_has_calls(
[mock.call.get_networks(mock.ANY,
filters=dict(admin_state_up=[True]))])
self.assertEqual(len(self.log.mock_calls), 1)
def test_get_network_info(self):
network_retval = dict(id='a')
subnet_retval = mock.Mock()
port_retval = mock.Mock()
self.plugin.get_network.return_value = network_retval
self.plugin.get_subnets.return_value = subnet_retval
self.plugin.get_ports.return_value = port_retval
retval = self.callbacks.get_network_info(mock.Mock(), network_id='a')
self.assertEquals(retval, network_retval)
self.assertEqual(retval['subnets'], subnet_retval)
self.assertEqual(retval['ports'], port_retval)
def _test_get_dhcp_port_helper(self, port_retval, other_expectations=[],
update_port=None, create_port=None):
subnets_retval = [dict(id='a', enable_dhcp=True),
dict(id='b', enable_dhcp=False)]
self.plugin.get_subnets.return_value = subnets_retval
if port_retval:
self.plugin.get_ports.return_value = [port_retval]
else:
self.plugin.get_ports.return_value = []
self.plugin.update_port.return_value = update_port
self.plugin.create_port.return_value = create_port
retval = self.callbacks.get_dhcp_port(mock.Mock(),
network_id='netid',
device_id='devid',
host='host')
expected = [mock.call.get_subnets(mock.ANY,
filters=dict(network_id=['netid'])),
mock.call.get_ports(mock.ANY,
filters=dict(network_id=['netid'],
device_id=['devid']))]
expected.extend(other_expectations)
self.plugin.assert_has_calls(expected)
return retval
def test_get_dhcp_port_existing(self):
port_retval = dict(id='port_id', fixed_ips=[dict(subnet_id='a')])
expectations = [
mock.call.update_port(mock.ANY, 'port_id', dict(port=port_retval))]
retval = self._test_get_dhcp_port_helper(port_retval, expectations,
update_port=port_retval)
self.assertEqual(len(self.log.mock_calls), 1)
def test_get_dhcp_port_create_new(self):
self.plugin.get_network.return_value = dict(tenant_id='tenantid')
create_spec = dict(tenant_id='tenantid', device_id='devid',
network_id='netid', name='',
admin_state_up=True,
device_owner='network:dhcp',
mac_address=mock.ANY)
create_retval = create_spec.copy()
create_retval['id'] = 'port_id'
create_retval['fixed_ips'] = [dict(subnet_id='a', enable_dhcp=True)]
create_spec['fixed_ips'] = [dict(subnet_id='a')]
expectations = [
mock.call.get_network(mock.ANY, 'netid'),
mock.call.create_port(mock.ANY, dict(port=create_spec))]
retval = self._test_get_dhcp_port_helper(None, expectations,
create_port=create_retval)
self.assertEqual(create_retval, retval)
self.assertEqual(len(self.log.mock_calls), 2)
def test_release_dhcp_port(self):
port_retval = dict(id='port_id', fixed_ips=[dict(subnet_id='a')])
self.plugin.get_ports.return_value = [port_retval]
self.callbacks.release_dhcp_port(mock.ANY, network_id='netid',
device_id='devid')
self.plugin.assert_has_calls([
mock.call.get_ports(mock.ANY, filters=dict(network_id=['netid'],
device_id=['devid'])),
mock.call.delete_port(mock.ANY, 'port_id')])
def test_release_port_fixed_ip(self):
port_retval = dict(id='port_id', fixed_ips=[dict(subnet_id='a')])
port_update = dict(id='port_id', fixed_ips=[])
self.plugin.get_ports.return_value = [port_retval]
self.callbacks.release_port_fixed_ip(mock.ANY, network_id='netid',
device_id='devid', subnet_id='a')
self.plugin.assert_has_calls([
mock.call.get_ports(mock.ANY, filters=dict(network_id=['netid'],
device_id=['devid'])),
mock.call.update_port(mock.ANY, 'port_id',
dict(port=port_update))])
|
illume/numpy3k
|
refs/heads/master
|
numpy/numarray/functions.py
|
5
|
# missing Numarray defined names (in from numarray import *)
##__all__ = ['ClassicUnpickler', 'Complex32_fromtype',
## 'Complex64_fromtype', 'ComplexArray', 'Error',
## 'MAX_ALIGN', 'MAX_INT_SIZE', 'MAX_LINE_WIDTH',
## 'NDArray', 'NewArray', 'NumArray',
## 'NumError', 'PRECISION', 'Py2NumType',
## 'PyINT_TYPES', 'PyLevel2Type', 'PyNUMERIC_TYPES', 'PyREAL_TYPES',
## 'SUPPRESS_SMALL',
## 'SuitableBuffer', 'USING_BLAS',
## 'UsesOpPriority',
## 'codegenerator', 'generic', 'libnumarray', 'libnumeric',
## 'make_ufuncs', 'memory',
## 'numarrayall', 'numarraycore', 'numinclude', 'safethread',
## 'typecode', 'typecodes', 'typeconv', 'ufunc', 'ufuncFactory',
## 'ieeemask']
__all__ = ['asarray', 'ones', 'zeros', 'array', 'where']
__all__ += ['vdot', 'dot', 'matrixmultiply', 'ravel', 'indices',
'arange', 'concatenate', 'all', 'allclose', 'alltrue', 'and_',
'any', 'argmax', 'argmin', 'argsort', 'around', 'array_equal',
'array_equiv', 'arrayrange', 'array_str', 'array_repr',
'array2list', 'average', 'choose', 'CLIP', 'RAISE', 'WRAP',
'clip', 'compress', 'copy', 'copy_reg',
'diagonal', 'divide_remainder', 'e', 'explicit_type', 'pi',
'flush_caches', 'fromfile', 'os', 'sys', 'STRICT',
'SLOPPY', 'WARN', 'EarlyEOFError', 'SizeMismatchError',
'SizeMismatchWarning', 'FileSeekWarning', 'fromstring',
'fromfunction', 'fromlist', 'getShape', 'getTypeObject',
'identity', 'info', 'innerproduct', 'inputarray',
'isBigEndian', 'kroneckerproduct', 'lexsort', 'math',
'operator', 'outerproduct', 'put', 'putmask', 'rank',
'repeat', 'reshape', 'resize', 'round', 'searchsorted',
'shape', 'size', 'sometrue', 'sort', 'swapaxes', 'take',
'tcode', 'tname', 'tensormultiply', 'trace', 'transpose',
'types', 'value', 'cumsum', 'cumproduct', 'nonzero', 'newobj',
'togglebyteorder'
]
import copy
import copy_reg
import types
import os
import sys
import math
import operator
from numpy import dot as matrixmultiply, dot, vdot, ravel, concatenate, all,\
allclose, any, argsort, array_equal, array_equiv,\
array_str, array_repr, CLIP, RAISE, WRAP, clip, concatenate, \
diagonal, e, pi, inner as innerproduct, nonzero, \
outer as outerproduct, kron as kroneckerproduct, lexsort, putmask, rank, \
resize, searchsorted, shape, size, sort, swapaxes, trace, transpose
import numpy as np
from numerictypes import typefrom
isBigEndian = sys.byteorder != 'little'
value = tcode = 'f'
tname = 'Float32'
# If dtype is not None, then it is used
# If type is not None, then it is used
# If typecode is not None then it is used
# If use_default is True, then the default
# data-type is returned if all are None
def type2dtype(typecode, type, dtype, use_default=True):
if dtype is None:
if type is None:
if use_default or typecode is not None:
dtype = np.dtype(typecode)
else:
dtype = np.dtype(type)
if use_default and dtype is None:
dtype = np.dtype('int')
return dtype
def fromfunction(shape, dimensions, type=None, typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, 1)
return np.fromfunction(shape, dimensions, dtype=dtype)
def ones(shape, type=None, typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, 1)
return np.ones(shape, dtype)
def zeros(shape, type=None, typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, 1)
return np.zeros(shape, dtype)
def where(condition, x=None, y=None, out=None):
if x is None and y is None:
arr = np.where(condition)
else:
arr = np.where(condition, x, y)
if out is not None:
out[...] = arr
return out
return arr
def indices(shape, type=None):
return np.indices(shape, type)
def arange(a1, a2=None, stride=1, type=None, shape=None,
typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, 0)
return np.arange(a1, a2, stride, dtype)
arrayrange = arange
def alltrue(x, axis=0):
return np.alltrue(x, axis)
def and_(a, b):
"""Same as a & b
"""
return a & b
def divide_remainder(a, b):
a, b = asarray(a), asarray(b)
return (a/b,a%b)
def around(array, digits=0, output=None):
ret = np.around(array, digits, output)
if output is None:
return ret
return
def array2list(arr):
return arr.tolist()
def choose(selector, population, outarr=None, clipmode=RAISE):
a = np.asarray(selector)
ret = a.choose(population, out=outarr, mode=clipmode)
if outarr is None:
return ret
return
def compress(condition, a, axis=0):
return np.compress(condition, a, axis)
# only returns a view
def explicit_type(a):
x = a.view()
return x
# stub
def flush_caches():
pass
class EarlyEOFError(Exception):
"Raised in fromfile() if EOF unexpectedly occurs."
pass
class SizeMismatchError(Exception):
"Raised in fromfile() if file size does not match shape."
pass
class SizeMismatchWarning(Warning):
"Issued in fromfile() if file size does not match shape."
pass
class FileSeekWarning(Warning):
"Issued in fromfile() if there is unused data and seek() fails"
pass
STRICT, SLOPPY, WARN = range(3)
_BLOCKSIZE=1024
# taken and adapted directly from numarray
def fromfile(infile, type=None, shape=None, sizing=STRICT,
typecode=None, dtype=None):
if isinstance(infile, (str, unicode)):
infile = open(infile, 'rb')
dtype = type2dtype(typecode, type, dtype, True)
if shape is None:
shape = (-1,)
if not isinstance(shape, tuple):
shape = (shape,)
if (list(shape).count(-1)>1):
raise ValueError("At most one unspecified dimension in shape")
if -1 not in shape:
if sizing != STRICT:
raise ValueError("sizing must be STRICT if size complete")
arr = np.empty(shape, dtype)
bytesleft=arr.nbytes
bytesread=0
while(bytesleft > _BLOCKSIZE):
data = infile.read(_BLOCKSIZE)
if len(data) != _BLOCKSIZE:
raise EarlyEOFError("Unexpected EOF reading data for size complete array")
arr.data[bytesread:bytesread+_BLOCKSIZE]=data
bytesread += _BLOCKSIZE
bytesleft -= _BLOCKSIZE
if bytesleft > 0:
data = infile.read(bytesleft)
if len(data) != bytesleft:
raise EarlyEOFError("Unexpected EOF reading data for size complete array")
arr.data[bytesread:bytesread+bytesleft]=data
return arr
##shape is incompletely specified
##read until EOF
##implementation 1: naively use memory blocks
##problematic because memory allocation can be double what is
##necessary (!)
##the most common case, namely reading in data from an unchanging
##file whose size may be determined before allocation, should be
##quick -- only one allocation will be needed.
recsize = dtype.itemsize * np.product([i for i in shape if i != -1])
blocksize = max(_BLOCKSIZE/recsize, 1)*recsize
##try to estimate file size
try:
curpos=infile.tell()
infile.seek(0,2)
endpos=infile.tell()
infile.seek(curpos)
except (AttributeError, IOError):
initsize=blocksize
else:
initsize=max(1,(endpos-curpos)/recsize)*recsize
buf = np.newbuffer(initsize)
bytesread=0
while 1:
data=infile.read(blocksize)
if len(data) != blocksize: ##eof
break
##do we have space?
if len(buf) < bytesread+blocksize:
buf=_resizebuf(buf,len(buf)+blocksize)
## or rather a=resizebuf(a,2*len(a)) ?
assert len(buf) >= bytesread+blocksize
buf[bytesread:bytesread+blocksize]=data
bytesread += blocksize
if len(data) % recsize != 0:
if sizing == STRICT:
raise SizeMismatchError("Filesize does not match specified shape")
if sizing == WARN:
_warnings.warn("Filesize does not match specified shape",
SizeMismatchWarning)
try:
infile.seek(-(len(data) % recsize),1)
except AttributeError:
_warnings.warn("Could not rewind (no seek support)",
FileSeekWarning)
except IOError:
_warnings.warn("Could not rewind (IOError in seek)",
FileSeekWarning)
datasize = (len(data)/recsize) * recsize
if len(buf) != bytesread+datasize:
buf=_resizebuf(buf,bytesread+datasize)
buf[bytesread:bytesread+datasize]=data[:datasize]
##deduce shape from len(buf)
shape = list(shape)
uidx = shape.index(-1)
shape[uidx]=len(buf) / recsize
a = np.ndarray(shape=shape, dtype=type, buffer=buf)
if a.dtype.char == '?':
np.not_equal(a, 0, a)
return a
def fromstring(datastring, type=None, shape=None, typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, True)
if shape is None:
count = -1
else:
count = np.product(shape)
res = np.fromstring(datastring, dtype=dtype, count=count)
if shape is not None:
res.shape = shape
return res
# check_overflow is ignored
def fromlist(seq, type=None, shape=None, check_overflow=0, typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, False)
return np.array(seq, dtype)
def array(sequence=None, typecode=None, copy=1, savespace=0,
type=None, shape=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, 0)
if sequence is None:
if shape is None:
return None
if dtype is None:
dtype = 'l'
return np.empty(shape, dtype)
if isinstance(sequence, file):
return fromfile(sequence, dtype=dtype, shape=shape)
if isinstance(sequence, str):
return fromstring(sequence, dtype=dtype, shape=shape)
if isinstance(sequence, buffer):
arr = np.frombuffer(sequence, dtype=dtype)
else:
arr = np.array(sequence, dtype, copy=copy)
if shape is not None:
arr.shape = shape
return arr
def asarray(seq, type=None, typecode=None, dtype=None):
if isinstance(seq, np.ndarray) and type is None and \
typecode is None and dtype is None:
return seq
return array(seq, type=type, typecode=typecode, copy=0, dtype=dtype)
inputarray = asarray
def getTypeObject(sequence, type):
if type is not None:
return type
try:
return typefrom(np.array(sequence))
except:
raise TypeError("Can't determine a reasonable type from sequence")
def getShape(shape, *args):
try:
if shape is () and not args:
return ()
if len(args) > 0:
shape = (shape, ) + args
else:
shape = tuple(shape)
dummy = np.array(shape)
if not issubclass(dummy.dtype.type, np.integer):
raise TypeError
if len(dummy) > np.MAXDIMS:
raise TypeError
except:
raise TypeError("Shape must be a sequence of integers")
return shape
def identity(n, type=None, typecode=None, dtype=None):
dtype = type2dtype(typecode, type, dtype, True)
return np.identity(n, dtype)
def info(obj, output=sys.stdout, numpy=0):
if numpy:
bp = lambda x: x
else:
bp = lambda x: int(x)
cls = getattr(obj, '__class__', type(obj))
if numpy:
nm = getattr(cls, '__name__', cls)
else:
nm = cls
print >> output, "class: ", nm
print >> output, "shape: ", obj.shape
strides = obj.strides
print >> output, "strides: ", strides
if not numpy:
print >> output, "byteoffset: 0"
if len(strides) > 0:
bs = obj.strides[0]
else:
bs = obj.itemsize
print >> output, "bytestride: ", bs
print >> output, "itemsize: ", obj.itemsize
print >> output, "aligned: ", bp(obj.flags.aligned)
print >> output, "contiguous: ", bp(obj.flags.contiguous)
if numpy:
print >> output, "fortran: ", obj.flags.fortran
if not numpy:
print >> output, "buffer: ", repr(obj.data)
if not numpy:
extra = " (DEBUG ONLY)"
tic = "'"
else:
extra = ""
tic = ""
print >> output, "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra)
print >> output, "byteorder: ",
endian = obj.dtype.byteorder
if endian in ['|','=']:
print >> output, "%s%s%s" % (tic, sys.byteorder, tic)
byteswap = False
elif endian == '>':
print >> output, "%sbig%s" % (tic, tic)
byteswap = sys.byteorder != "big"
else:
print >> output, "%slittle%s" % (tic, tic)
byteswap = sys.byteorder != "little"
print >> output, "byteswap: ", bp(byteswap)
if not numpy:
print >> output, "type: ", typefrom(obj).name
else:
print >> output, "type: %s" % obj.dtype
#clipmode is ignored if axis is not 0 and array is not 1d
def put(array, indices, values, axis=0, clipmode=RAISE):
if not isinstance(array, np.ndarray):
raise TypeError("put only works on subclass of ndarray")
work = asarray(array)
if axis == 0:
if array.ndim == 1:
work.put(indices, values, clipmode)
else:
work[indices] = values
elif isinstance(axis, (int, long, np.integer)):
work = work.swapaxes(0, axis)
work[indices] = values
work = work.swapaxes(0, axis)
else:
def_axes = range(work.ndim)
for x in axis:
def_axes.remove(x)
axis = list(axis)+def_axes
work = work.transpose(axis)
work[indices] = values
work = work.transpose(axis)
def repeat(array, repeats, axis=0):
return np.repeat(array, repeats, axis)
def reshape(array, shape, *args):
if len(args) > 0:
shape = (shape,) + args
return np.reshape(array, shape)
import warnings as _warnings
def round(*args, **keys):
_warnings.warn("round() is deprecated. Switch to around()",
DeprecationWarning)
return around(*args, **keys)
def sometrue(array, axis=0):
return np.sometrue(array, axis)
#clipmode is ignored if axis is not an integer
def take(array, indices, axis=0, outarr=None, clipmode=RAISE):
array = np.asarray(array)
if isinstance(axis, (int, long, np.integer)):
res = array.take(indices, axis, outarr, clipmode)
if outarr is None:
return res
return
else:
def_axes = range(array.ndim)
for x in axis:
def_axes.remove(x)
axis = list(axis) + def_axes
work = array.transpose(axis)
res = work[indices]
if outarr is None:
return res
outarr[...] = res
return
def tensormultiply(a1, a2):
a1, a2 = np.asarray(a1), np.asarray(a2)
if (a1.shape[-1] != a2.shape[0]):
raise ValueError("Unmatched dimensions")
shape = a1.shape[:-1] + a2.shape[1:]
return np.reshape(dot(np.reshape(a1, (-1, a1.shape[-1])),
np.reshape(a2, (a2.shape[0],-1))),
shape)
def cumsum(a1, axis=0, out=None, type=None, dim=0):
return np.asarray(a1).cumsum(axis,dtype=type,out=out)
def cumproduct(a1, axis=0, out=None, type=None, dim=0):
return np.asarray(a1).cumprod(axis,dtype=type,out=out)
def argmax(x, axis=-1):
return np.argmax(x, axis)
def argmin(x, axis=-1):
return np.argmin(x, axis)
def newobj(self, type):
if type is None:
return np.empty_like(self)
else:
return np.empty(self.shape, type)
def togglebyteorder(self):
self.dtype=self.dtype.newbyteorder()
def average(a, axis=0, weights=None, returned=0):
return np.average(a, axis, weights, returned)
|
ndebuhr/thermo-state-solver
|
refs/heads/master
|
thermo-env/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/universaldetector.py
|
1775
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM_UTF8:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8-SIG", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
|
elena/django
|
refs/heads/master
|
tests/postgres_tests/test_introspection.py
|
27
|
from io import StringIO
from django.core.management import call_command
from django.test.utils import modify_settings
from . import PostgreSQLTestCase
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.postgres'})
class InspectDBTests(PostgreSQLTestCase):
def assertFieldsInModel(self, model, field_outputs):
out = StringIO()
call_command(
'inspectdb',
table_name_filter=lambda tn: tn.startswith(model),
stdout=out,
)
output = out.getvalue()
for field_output in field_outputs:
self.assertIn(field_output, output)
def test_range_fields(self):
self.assertFieldsInModel(
'postgres_tests_rangesmodel',
[
'ints = django.contrib.postgres.fields.IntegerRangeField(blank=True, null=True)',
'bigints = django.contrib.postgres.fields.BigIntegerRangeField(blank=True, null=True)',
'decimals = django.contrib.postgres.fields.DecimalRangeField(blank=True, null=True)',
'timestamps = django.contrib.postgres.fields.DateTimeRangeField(blank=True, null=True)',
'dates = django.contrib.postgres.fields.DateRangeField(blank=True, null=True)',
],
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.