repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
bclau/nova
|
refs/heads/master
|
nova/tests/integrated/v3/test_instance_actions.py
|
11
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.compute import api as compute_api
from nova import db
from nova.tests import fake_instance_actions
from nova.tests.integrated.v3 import api_sample_base
from nova.tests import utils as test_utils
class InstanceActionsSampleJsonTest(api_sample_base.ApiSampleTestBaseV3):
extension_name = 'os-instance-actions'
def setUp(self):
super(InstanceActionsSampleJsonTest, self).setUp()
self.actions = fake_instance_actions.FAKE_ACTIONS
self.events = fake_instance_actions.FAKE_EVENTS
self.instance = test_utils.get_test_instance()
def fake_instance_action_get_by_request_id(context, uuid, request_id):
return copy.deepcopy(self.actions[uuid][request_id])
def fake_instance_actions_get(context, uuid):
return [copy.deepcopy(value) for value in
self.actions[uuid].itervalues()]
def fake_instance_action_events_get(context, action_id):
return copy.deepcopy(self.events[action_id])
def fake_instance_get_by_uuid(context, instance_id):
return self.instance
def fake_get(self, context, instance_uuid):
return {'uuid': instance_uuid}
self.stubs.Set(db, 'action_get_by_request_id',
fake_instance_action_get_by_request_id)
self.stubs.Set(db, 'actions_get', fake_instance_actions_get)
self.stubs.Set(db, 'action_events_get',
fake_instance_action_events_get)
self.stubs.Set(db, 'instance_get_by_uuid', fake_instance_get_by_uuid)
self.stubs.Set(compute_api.API, 'get', fake_get)
def test_instance_action_get(self):
fake_uuid = fake_instance_actions.FAKE_UUID
fake_request_id = fake_instance_actions.FAKE_REQUEST_ID1
fake_action = self.actions[fake_uuid][fake_request_id]
response = self._do_get('servers/%s/os-instance-actions/%s' %
(fake_uuid, fake_request_id))
subs = self._get_regexes()
subs['action'] = '(reboot)|(resize)'
subs['instance_uuid'] = fake_uuid
subs['integer_id'] = '[0-9]+'
subs['request_id'] = fake_action['request_id']
subs['start_time'] = fake_action['start_time']
subs['result'] = '(Success)|(Error)'
subs['event'] = '(schedule)|(compute_create)'
self._verify_response('instance-action-get-resp', subs, response, 200)
def test_instance_actions_list(self):
fake_uuid = fake_instance_actions.FAKE_UUID
response = self._do_get('servers/%s/os-instance-actions' % (fake_uuid))
subs = self._get_regexes()
subs['action'] = '(reboot)|(resize)'
subs['integer_id'] = '[0-9]+'
subs['request_id'] = ('req-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12}')
self._verify_response('instance-actions-list-resp', subs,
response, 200)
class InstanceActionsSampleXmlTest(InstanceActionsSampleJsonTest):
ctype = 'xml'
|
caneruguz/osf.io
|
refs/heads/develop
|
website/archiver/signals.py
|
79
|
import blinker
signals = blinker.Namespace()
archive_fail = signals.signal('archive-fail')
|
intgr/django
|
refs/heads/master
|
tests/fixtures_model_package/tests.py
|
84
|
from django.core import management
from django.core.management import CommandError
from django.test import TestCase
from .models import Article
class SampleTestCase(TestCase):
fixtures = ['fixture1.json', 'fixture2.json']
def testClassFixtures(self):
"Test cases can load fixture objects into models defined in packages"
self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline
)
class FixtureTestCase(TestCase):
def test_loaddata(self):
"Fixtures can load data into models defined in packages"
# Load fixture 1. Single JSON file, with two objects
management.call_command("loaddata", "fixture1.json", verbosity=0)
self.assertQuerysetEqual(
Article.objects.all(), [
"Time to reform copyright",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
# Load fixture 2. JSON file imported by default. Overwrites some
# existing objects
management.call_command("loaddata", "fixture2.json", verbosity=0)
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
# Load a fixture that doesn't exist
with self.assertRaisesMessage(CommandError, "No fixture named 'unknown' found."):
management.call_command("loaddata", "unknown.json", verbosity=0)
self.assertQuerysetEqual(
Article.objects.all(), [
"Django conquers world!",
"Copyright is fine the way it is",
"Poker has no place on ESPN",
],
lambda a: a.headline,
)
|
spektom/incubator-airflow
|
refs/heads/master
|
airflow/providers/google/cloud/hooks/tasks.py
|
4
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains a CloudTasksHook
which allows you to connect to GCP Cloud Tasks service,
performing actions to queues or tasks.
"""
from typing import Dict, List, Optional, Sequence, Tuple, Union
from google.api_core.retry import Retry
from google.cloud.tasks_v2 import CloudTasksClient, enums
from google.cloud.tasks_v2.types import FieldMask, Queue, Task
from airflow import AirflowException
from airflow.providers.google.cloud.hooks.base import CloudBaseHook
class CloudTasksHook(CloudBaseHook):
"""
Hook for Google Cloud Tasks APIs. Cloud Tasks allows developers to manage
the execution of background work in their applications.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:type gcp_conn_id: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
"""
def __init__(self, gcp_conn_id="google_cloud_default", delegate_to=None):
super().__init__(gcp_conn_id, delegate_to)
self._client = None
def get_conn(self):
"""
Provides a client for interacting with the Cloud Tasks API.
:return: GCP Cloud Tasks API Client
:rtype: google.cloud.tasks_v2.CloudTasksClient
"""
if not self._client:
self._client = CloudTasksClient(
credentials=self._get_credentials(),
client_info=self.client_info
)
return self._client
@CloudBaseHook.catch_http_exception
@CloudBaseHook.fallback_to_default_project_id
def create_queue(
self,
location: str,
task_queue: Union[Dict, Queue],
project_id: Optional[str] = None,
queue_name: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> Queue:
"""
Creates a queue in Cloud Tasks.
:param location: The location name in which the queue will be created.
:type location: str
:param task_queue: The task queue to create.
Queue's name cannot be the same as an existing queue.
If a dict is provided, it must be of the same form as the protobuf message Queue.
:type task_queue: dict or class google.cloud.tasks_v2.types.Queue
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:type queue_name: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
if queue_name:
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue['name'] = full_queue_name
else:
raise AirflowException('Unable to set queue_name.')
full_location_path = CloudTasksClient.location_path(project_id, location)
return client.create_queue(
parent=full_location_path,
queue=task_queue,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@CloudBaseHook.catch_http_exception
@CloudBaseHook.fallback_to_default_project_id
def update_queue(
self,
task_queue: Queue,
project_id: Optional[str] = None,
location: Optional[str] = None,
queue_name: Optional[str] = None,
update_mask: Optional[FieldMask] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> Queue:
"""
Updates a queue in Cloud Tasks.
:param task_queue: The task queue to update.
This method creates the queue if it does not exist and updates the queue if
it does exist. The queue's name must be specified.
:type task_queue: dict or class google.cloud.tasks_v2.types.Queue
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param location: (Optional) The location name in which the queue will be updated.
If provided, it will be used to construct the full queue path.
:type location: str
:param queue_name: (Optional) The queue's name.
If provided, it will be used to construct the full queue path.
:type queue_name: str
:param update_mask: A mast used to specify which fields of the queue are being updated.
If empty, then all fields will be updated.
If a dict is provided, it must be of the same form as the protobuf message.
:type update_mask: dict or class google.cloud.tasks_v2.types.FieldMask
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
if queue_name and location:
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
if isinstance(task_queue, Queue):
task_queue.name = full_queue_name
elif isinstance(task_queue, dict):
task_queue['name'] = full_queue_name
else:
raise AirflowException('Unable to set queue_name.')
return client.update_queue(
queue=task_queue,
update_mask=update_mask,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@CloudBaseHook.catch_http_exception
@CloudBaseHook.fallback_to_default_project_id
def get_queue(
self,
location: str,
queue_name: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> Queue:
"""
Gets a queue from Cloud Tasks.
:param location: The location name in which the queue was created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Queue
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.get_queue(
name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata
)
@CloudBaseHook.catch_http_exception
@CloudBaseHook.fallback_to_default_project_id
def list_queues(
self,
location: str,
project_id: Optional[str] = None,
results_filter: Optional[str] = None,
page_size: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> List[Queue]:
"""
Lists queues from Cloud Tasks.
:param location: The location name in which the queues were created.
:type location: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param results_filter: (Optional) Filter used to specify a subset of queues.
:type results_filter: str
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:type page_size: int
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_location_path = CloudTasksClient.location_path(project_id, location)
queues = client.list_queues(
parent=full_location_path,
filter_=results_filter,
page_size=page_size,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(queues)
@CloudBaseHook.catch_http_exception
@CloudBaseHook.fallback_to_default_project_id
def delete_queue(
self,
location: str,
queue_name: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> None:
"""
Deletes a queue from Cloud Tasks, even if it has tasks in it.
:param location: The location name in which the queue will be deleted.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
client.delete_queue(
name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata
)
@CloudBaseHook.catch_http_exception
@CloudBaseHook.fallback_to_default_project_id
def purge_queue(
self,
location: str,
queue_name: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> List[Queue]:
"""
Purges a queue by deleting all of its tasks from Cloud Tasks.
:param location: The location name in which the queue will be purged.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.purge_queue(
name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata
)
@CloudBaseHook.catch_http_exception
@CloudBaseHook.fallback_to_default_project_id
def pause_queue(
self,
location: str,
queue_name: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> List[Queue]:
"""
Pauses a queue in Cloud Tasks.
:param location: The location name in which the queue will be paused.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.pause_queue(
name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata
)
@CloudBaseHook.catch_http_exception
@CloudBaseHook.fallback_to_default_project_id
def resume_queue(
self,
location: str,
queue_name: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> List[Queue]:
"""
Resumes a queue in Cloud Tasks.
:param location: The location name in which the queue will be resumed.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Queue]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.resume_queue(
name=full_queue_name, retry=retry, timeout=timeout, metadata=metadata
)
@CloudBaseHook.catch_http_exception
@CloudBaseHook.fallback_to_default_project_id
def create_task(
self,
location: str,
queue_name: str,
task: Union[Dict, Task],
project_id: Optional[str] = None,
task_name: Optional[str] = None,
response_view: Optional[enums.Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> Task:
"""
Creates a task in Cloud Tasks.
:param location: The location name in which the task will be created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task: The task to add.
If a dict is provided, it must be of the same form as the protobuf message Task.
:type task: dict or class google.cloud.tasks_v2.types.Task
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param task_name: (Optional) The task's name.
If provided, it will be used to construct the full task path.
:type task_name: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.enums.Task.View
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
if task_name:
full_task_name = CloudTasksClient.task_path(
project_id, location, queue_name, task_name
)
if isinstance(task, Task):
task.name = full_task_name
elif isinstance(task, dict):
task['name'] = full_task_name
else:
raise AirflowException('Unable to set task_name.')
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
return client.create_task(
parent=full_queue_name,
task=task,
response_view=response_view,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@CloudBaseHook.catch_http_exception
@CloudBaseHook.fallback_to_default_project_id
def get_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: Optional[str] = None,
response_view: Optional[enums.Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> Task:
"""
Gets a task from Cloud Tasks.
:param location: The location name in which the task was created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task_name: The task's name.
:type task_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.enums.Task.View
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
full_task_name = CloudTasksClient.task_path(project_id, location, queue_name, task_name)
return client.get_task(
name=full_task_name,
response_view=response_view,
retry=retry,
timeout=timeout,
metadata=metadata,
)
@CloudBaseHook.catch_http_exception
@CloudBaseHook.fallback_to_default_project_id
def list_tasks(
self,
location: str,
queue_name: str,
project_id: Optional[str] = None,
response_view: Optional[enums.Task.View] = None,
page_size: Optional[int] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> List[Task]:
"""
Lists the tasks in Cloud Tasks.
:param location: The location name in which the tasks were created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.enums.Task.View
:param page_size: (Optional) The maximum number of resources contained in the
underlying API response.
:type page_size: int
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: list[google.cloud.tasks_v2.types.Task]
"""
client = self.get_conn()
full_queue_name = CloudTasksClient.queue_path(project_id, location, queue_name)
tasks = client.list_tasks(
parent=full_queue_name,
response_view=response_view,
page_size=page_size,
retry=retry,
timeout=timeout,
metadata=metadata,
)
return list(tasks)
@CloudBaseHook.catch_http_exception
@CloudBaseHook.fallback_to_default_project_id
def delete_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> None:
"""
Deletes a task from Cloud Tasks.
:param location: The location name in which the task will be deleted.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task_name: The task's name.
:type task_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
"""
client = self.get_conn()
full_task_name = CloudTasksClient.task_path(project_id, location, queue_name, task_name)
client.delete_task(
name=full_task_name, retry=retry, timeout=timeout, metadata=metadata
)
@CloudBaseHook.catch_http_exception
@CloudBaseHook.fallback_to_default_project_id
def run_task(
self,
location: str,
queue_name: str,
task_name: str,
project_id: Optional[str] = None,
response_view: Optional[enums.Task.View] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Optional[Sequence[Tuple[str, str]]] = None
) -> Task:
"""
Forces to run a task in Cloud Tasks.
:param location: The location name in which the task was created.
:type location: str
:param queue_name: The queue's name.
:type queue_name: str
:param task_name: The task's name.
:type task_name: str
:param project_id: (Optional) The ID of the GCP project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the GCP connection is used.
:type project_id: str
:param response_view: (Optional) This field specifies which subset of the Task will
be returned.
:type response_view: google.cloud.tasks_v2.enums.Task.View
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:type retry: google.api_core.retry.Retry
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:type timeout: float
:param metadata: (Optional) Additional metadata that is provided to the method.
:type metadata: sequence[tuple[str, str]]]
:rtype: google.cloud.tasks_v2.types.Task
"""
client = self.get_conn()
full_task_name = CloudTasksClient.task_path(project_id, location, queue_name, task_name)
return client.run_task(
name=full_task_name,
response_view=response_view,
retry=retry,
timeout=timeout,
metadata=metadata,
)
|
ComputationalRadiationPhysics/pyDive
|
refs/heads/master
|
pyDive/arrays/gpu_ndarray.py
|
2
|
"""
Copyright 2015 Heiko Burau
This file is part of pyDive.
pyDive is free software: you can redistribute it and/or modify
it under the terms of of either the GNU General Public License or
the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyDive is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License and the GNU Lesser General Public License
for more details.
You should have received a copy of the GNU General Public License
and the GNU Lesser General Public License along with pyDive.
If not, see <http://www.gnu.org/licenses/>.
"""
__doc__ = None
import numpy as np
import pyDive.distribution.multiple_axes as multiple_axes
from pyDive.distribution.interengine import GPU_copier
import pyDive.arrays.local.gpu_ndarray
gpu_ndarray = multiple_axes.distribute(pyDive.arrays.local.gpu_ndarray.gpu_ndarray, "gpu_ndarray",\
"pyDive.arrays.local.gpu_ndarray", interengine_copier=GPU_copier)
factories = multiple_axes.generate_factories(gpu_ndarray, ("empty", "zeros"), np.float)
factories.update(multiple_axes.generate_factories_like(gpu_ndarray, ("empty_like", "zeros_like")))
globals().update(factories)
def ones(shape, dtype=np.float, distaxes='all', **kwargs):
result = zeros(shape, dtype, distaxes, **kwargs)
result += 1
return result
def ones_like(other, **kwargs):
result = zeros_like(other, **kwargs)
result += 1
return result
import pyDive.IPParallelClient as com
import pyDive.arrays.ndarray
def to_cpu(self):
"""Copy array data to cpu main memory.
:result pyDive.ndarray: distributed cpu array.
"""
result = pyDive.arrays.ndarray.hollow_like(self)
view = com.getView()
view.execute("{0} = {1}.to_cpu()".format(result.name, self.name), targets=result.target_ranks)
return result
gpu_ndarray.to_cpu = to_cpu
del to_cpu
def hollow(shape, dtype=np.float, distaxes='all'):
"""Create a pyDive.gpu_ndarray instance distributed across all engines without allocating a local
gpu-array.
:param ints shape: shape of array
:param dtype: datatype of a single element
:param ints distaxes: distributed axes. Defaults to 'all' meaning each axis is distributed.
"""
return gpu_ndarray(shape, dtype, distaxes, None, None, True)
def hollow_like(other):
"""Create a pyDive.gpu_ndarray instance with the same
shape, distribution and type as ``other`` without allocating a local gpu-array.
"""
return gpu_ndarray(other.shape, other.dtype, other.distaxes, other.target_offsets, other.target_ranks, True)
def array(array_like, distaxes='all'):
"""Create a pyDive.gpu_ndarray instance from an array-like object.
:param array_like: Any object exposing the array interface, e.g. numpy-array, python sequence, ...
:param ints distaxis: distributed axes. Defaults to 'all' meaning each axis is distributed.
"""
result_cpu = pyDive.arrays.ndarray.array(array_like, distaxes)
result = hollow_like(result_cpu)
view = com.getView()
view.execute("{0} = pyDive.arrays.local.gpu_ndarray.gpu_ndarray_cast(pycuda.gpuarray.to_gpu({1}))"\
.format(repr(result), repr(result_cpu)), targets=result.target_ranks)
return result
#ufunc_names = [key for key, value in np.__dict__.items() if isinstance(value, np.ufunc)]
#ufuncs = multiple_axes.generate_ufuncs(ufunc_names, "np")
#globals().update(ufuncs)
|
yiwen-luo/LeetCode
|
refs/heads/master
|
Python/summary-ranges.py
|
3
|
# Time: O(n)
# Space: O(1)
#
# Given a sorted integer array without duplicates,
# return the summary of its ranges.
#
# For example, given [0,1,2,4,5,7],
# return ["0->2","4->5","7"].
#
class Solution:
# @param {integer[]} nums
# @return {string[]}
def summaryRanges(self, nums):
ranges = []
if not nums:
return ranges
start, end = nums[0], nums[0]
for i in xrange(1, len(nums) + 1):
if i < len(nums) and nums[i] == end + 1:
end = nums[i]
else:
interval = str(start)
if start != end:
interval += "->" + str(end)
ranges.append(interval)
if i < len(nums):
start = end = nums[i]
return ranges
# Time: O(n)
# Space: O(n)
class Solution2:
# @param {integer[]} nums
# @return {string[]}
def summaryRanges(self, nums):
return [re.sub('->.*>', '->', '->'.join(`n` for _, n in g))
for _, g in itertools.groupby(enumerate(nums), lambda (i, n): n-i)]
|
o3project/odenos
|
refs/heads/develop
|
src/test/python/org/o3project/odenos/core/component/network/flow/ofpflow/test_ofp_flow_action_pop_mpls.py
|
6
|
# -*- coding:utf-8 -*-
# Copyright 2015 NEC Corporation. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
from org.o3project.odenos.core.component.network.flow.ofpflow.ofp_flow_action_pop_mpls\
import OFPFlowActionPopMpls
import unittest
class OFPFlowActionPopMplsTest(unittest.TestCase):
def setUp(self):
self.target = OFPFlowActionPopMpls("OFPFlowActionPopMpls",
1234)
def tearDown(self):
self.target = None
def test_constractor(self):
self.assertEqual(self.target._body[self.target.TYPE],
"OFPFlowActionPopMpls")
self.assertEqual(self.target._body[self.target.ETH_TYPE],
1234)
def test_eth_type(self):
self.assertEqual(self.target.eth_type, 1234)
def test_create_from_packed(self):
self.value = {self.target.TYPE: "OFPFlowActionPopMpls",
self.target.ETH_TYPE: 4321}
self.result = OFPFlowActionPopMpls.create_from_packed(self.value)
self.assertEqual(self.result._body[self.target.TYPE],
"OFPFlowActionPopMpls")
self.assertEqual(self.result._body[self.target.ETH_TYPE],
4321)
def test_packed_object(self):
self.result = self.target.packed_object()
self.assertEqual(self.result[self.target.TYPE],
"OFPFlowActionPopMpls")
self.assertEqual(self.result[self.target.ETH_TYPE],
1234)
if __name__ == '__main__':
unittest.main()
|
Noahs-ARK/ARKcat
|
refs/heads/master
|
src/generate_vocab.py
|
1
|
import sys, re
from sklearn.feature_extraction.text import TfidfVectorizer
from cnn_methods import *
import argparse
# import scipy
def main(files):
word2vec_file_path = files[0]
output_file_path = files[1]
input_file_paths = files[2:]
print 'processing files:', input_file_paths, 'using word vector file', word2vec_file_path
print 'outputting to', output_file_path
vocab = []
line_counter = 0
vectorizer = TfidfVectorizer(input='filename')
vectorizer.fit(input_file_paths)
for word in vectorizer.vocabulary_:
word = re.sub(r"[^A-Za-z0-9(),!?\'\`]", "", word)
if not (word in vocab):
vocab.append(word.encode('ascii', 'replace'))
print "len vocab =", len(vocab)
with open(output_file_path, 'w') as output_file:
with open(word2vec_file_path) as word2vec:
while True:
line = word2vec.readline()
if not line:
break
else:
tokens = tokenize(line)
word, vector = tokens[0], tokens[1:]
word = re.sub(r"[^A-Za-z0-9(),!?\'\`]", "", word)
if word in vocab:
output_file.write(word + ' ')
for token in vector:
output_file.write(token + ' ')
output_file.write('\n')
del vocab[vocab.index(word)]
line_counter += 1
print 'len file =', line_counter
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='need to write one')
parser.add_argument('files', nargs='+', help='')
main(vars(parser.parse_args(sys.argv[1:]))['files'])
|
sandvine/horizon
|
refs/heads/master
|
openstack_dashboard/test/test_plugins/panel_group_config/_40_admin_add_panel_to_second_group.py
|
69
|
# The name of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'second_panel'
# The name of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'admin'
# The name of the panel group the PANEL is associated with.
PANEL_GROUP = 'second_panel_group'
# Python panel class of the PANEL to be added.
ADD_PANEL = \
'openstack_dashboard.test.test_panels.second_panel.panel.SecondPanel'
|
ashhher3/cvxpy
|
refs/heads/master
|
cvxpy/constraints/eq_constraint.py
|
7
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.constraints.leq_constraint import LeqConstraint
import cvxpy.lin_ops.lin_utils as lu
import numpy as np
class EqConstraint(LeqConstraint):
OP_NAME = "=="
# Both sides must be affine.
def is_dcp(self):
return self._expr.is_affine()
@property
def value(self):
"""Does the constraint hold?
Returns
-------
bool
"""
if self._expr.value is None:
return None
else:
return np.all(np.abs(self._expr.value) <= self.TOLERANCE)
@property
def violation(self):
"""How much is this constraint off by?
Returns
-------
NumPy matrix
"""
if self._expr.value is None:
return None
else:
return np.abs(self._expr.value)
def canonicalize(self):
"""Returns the graph implementation of the object.
Marks the top level constraint as the dual_holder,
so the dual value will be saved to the EqConstraint.
Returns:
A tuple of (affine expression, [constraints]).
"""
obj, constraints = self._expr.canonical_form
dual_holder = lu.create_eq(obj, constr_id=self.id)
return (None, constraints + [dual_holder])
|
ThinkOpen-Solutions/odoo
|
refs/heads/stable
|
doc/_themes/odoodoc/odoo_pygments.py
|
129
|
# -*- coding: utf-8 -*-
import imp
import sys
from pygments.style import Style
from pygments.token import *
# extracted from getbootstrap.com
class OdooStyle(Style):
background_color = '#ffffcc'
highlight_color = '#fcf8e3'
styles = {
Whitespace: '#BBB',
Error: 'bg:#FAA #A00',
Keyword: '#069',
Keyword.Type: '#078',
Name.Attribute: '#4F9FCF',
Name.Builtin: '#366',
Name.Class: '#0A8',
Name.Constant: '#360',
Name.Decorator: '#99F',
Name.Entity: '#999',
Name.Exception: '#C00',
Name.Function: '#C0F',
Name.Label: '#99F',
Name.Namespace: '#0CF',
Name.Tag: '#2F6F9F',
Name.Variable: '#033',
String: '#d44950',
String.Backtick: '#C30',
String.Char: '#C30',
String.Doc: 'italic #C30',
String.Double: '#C30',
String.Escape: '#C30',
String.Heredoc: '#C30',
String.Interol: '#C30',
String.Other: '#C30',
String.Regex: '#3AA',
String.Single: '#C30',
String.Symbol: '#FC3',
Number: '#F60',
Operator: '#555',
Operator.Word: '#000',
Comment: '#999',
Comment.Preproc: '#099',
Generic.Deleted: 'bg:#FCC border:#c00',
Generic.Emph: 'italic',
Generic.Error: '#F00',
Generic.Heading: '#030',
Generic.Inserted: 'bg:#CFC border:#0C0',
Generic.Output: '#AAA',
Generic.Prompt: '#009',
Generic.Strong: '',
Generic.Subheading: '#030',
Generic.Traceback: '#9C6',
}
modname = 'pygments.styles.odoo'
m = imp.new_module(modname)
m.OdooStyle = OdooStyle
sys.modules[modname] = m
|
coldeasy/python-driver
|
refs/heads/master
|
tests/unit/cqlengine/test_udt.py
|
1
|
# Copyright 2013-2017 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra.cqlengine import columns
from cassandra.cqlengine.models import Model
from cassandra.cqlengine.usertype import UserType
class UDTTest(unittest.TestCase):
def test_initialization_without_existing_connection(self):
"""
Test that users can define models with UDTs without initializing
connections.
Written to reproduce PYTHON-649.
"""
class Value(UserType):
t = columns.Text()
class DummyUDT(Model):
__keyspace__ = 'ks'
primary_key = columns.Integer(primary_key=True)
value = columns.UserDefinedType(Value)
|
ventrixcode/yowsup
|
refs/heads/pr/2
|
yowsup/layers/protocol_media/protocolentities/message_media.py
|
1
|
from yowsup.structs import ProtocolEntity, ProtocolTreeNode
from yowsup.layers.protocol_messages.protocolentities import MessageProtocolEntity
class MediaMessageProtocolEntity(MessageProtocolEntity):
'''
<message t="{{TIME_STAMP}}" from="{{CONTACT_JID}}"
offline="{{OFFLINE}}" type="text" id="{{MESSAGE_ID}}" notify="{{NOTIFY_NAME}}">
<media type="{{DOWNLOADABLE_MEDIA_TYPE: (image | audio | video)}}"
> {{THUMBNAIL_RAWDATA (JPEG?)}}
</media>
</message>
'''
'''
<message t="{{TIME_STAMP}}" from="{{CONTACT_JID}}"
offline="{{OFFLINE}}" type="text" id="{{MESSAGE_ID}}" notify="{{NOTIFY_NAME}}">
<media type="audio"
mimetype="audio/aac"
filehash="86nonv++wq864nlkmbhJXZyPbILLlQ2KyYZxsLo8z1g="
url="https://mms884.whatsapp.net/d/h72eS2EAV6YrHfZLBaqzFVRffGUABQdt_-pVhg/Arlt8j7XkRsfFw22i-KRffXxl7j9iVsYLbJN4APwsKGJ.aac"
ip="174.37.199.214"
size="6003"
file="Arlt8j7XkRsfFw22i-KRffXxl7j9iVsYLbJN4APwsKGJ.aac"
origin="live"
seconds="1"
abitrate="32"
duration="1"
asampfreq="22050"
acodec="aac"
></media>
<media type="video"
mimetype="video/mp4"
filehash="b/f7d5bF7wK7GPqo4kH7qeka/JG0KzkFjUC4Veq9Iwg="
url="https://mms880.whatsapp.net/d/Y00AgRfYWMWZQXTXgy_FJ1Rfe9YABQdt914Dvg/ArZwkA1pbhHdyC5eXRzgPb-DCK4K7PooUUi0kYGxm-wj.mp4"
ip="173.193.205.8"
size="112928"
file="ArZwkA1pbhHdyC5eXRzgPb-DCK4K7PooUUi0kYGxm-wj.mp4"
fps="25"
encoding="raw"
seconds="1"
vcodec="h264"
abitrate="60"
vbitrate="726"
height="360"
asampfmt="flt"
duration="1"
asampfreq="44100"
acodec="aac"
width="480"
>{{THUMBNAIL_RAWDATA}}</media>
<media
latitude="52.52393"
type="location"
longitude="13.41747"
encoding="raw"
>{{THUMBNAIL_RAWDATA}}</media>
<media type="vcard">
<vcard name="Hany Yasser">
BEGIN:VCARD
VERSION:3.0
N:Yasser;Hany;;;
FN:Hany Yasser
PHOTO;BASE64:/9j/4AAQSkZJRgABAQEASABIAAD/4QBYRXhpZgAATU0AKgAAAAgAAgESAAMAAAABAAEAAIdpAAQAAAABAAAAJgAAAAAAA6ABAAMAAAABAAEAAKACAAQAAAABAAAAQKADAAQAAAABAAAAQAAAAAD/7QA4UGhvdG9zaG9wIDMuMAA4QklNBAQAAAAAAAA4QklNBCUAAAAAABDUHYzZjwCyBOmACZjs+EJ+/8AAEQgAQABAAwEiAAIRAQMRAf/EAB8AAAEFAQEBAQEBAAAAAAAAAAABAgMEBQYHCAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNRYQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlpeYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk5ebn6Onq8fLz9PX29/j5+v/EAB8BAAMBAQEBAQEBAQEAAAAAAAABAgMEBQYHCAkKC//EALURAAIBAgQEAwQHBQQEAAECdwABAgMRBAUhMQYSQVEHYXETIjKBCBRCkaGxwQkjM1LwFWJy0QoWJDThJfEXGBkaJicoKSo1Njc4OTpDREVGR0hJSlNUVVZXWFlaY2RlZmdoaWpzdHV2d3h5eoKDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uLj5OXm5+jp6vLz9PX29/j5+v/bAEMABgYGBgYGCgYGCg4KCgoOEg4ODg4SFxISEhISFxwXFxcXFxccHBwcHBwcHCIiIiIiIicnJycnLCwsLCwsLCwsLP/bAEMBBwcHCwoLEwoKEy4fGh8uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLi4uLv/dAAQABP/aAAwDAQACEQMRAD8A83lPGaqzn/iXgnqZB/WpWbKjNV7kgWC5/wCen9DXix3PoGtCreFJG3OcbVFZmx2XL8A9PoOa9u0b4TDVLH+0tavDZMyBkiUDKqRkGQsQBkc49O9ebeJ9Am8PXX2bzkuYJAWhmjOVcA4Pc4I7jNelCm1BOx5M6kXNpM5VnX77EEn17D6Vt6aVaNtnABxitnwn4DvPEUS3lxIIoH5HG5yPUL059zVTxLoUPhDUYGs7gzRO+yXOCB6A7eOlTUSa5U9SqbcXzNaGdenbYxhevymsPc0rGVyDg9O1a96d9uPT5RWK/C/d6ck0qK0HXd5H/9Dy2U9B2rpPCNgmp6xp9vKgeNJWmdSMgrGN3P44qponhvVvE9ybLSYw8iKXYs21VHTk+56V7B4T8F3nhSKS91gx/anHlxqjbgqty2TgcnA/KvNo0m2n0PYr1oxi431F8R3d7Jef6MbaZ964huDhSCBlsZ5OfXp2rwzxZdyS6rLC0C26xuRhCNrkHbvAHTpivUvEdrdiaWZ4DIXXarrwVJ/oQce9eZXfg3WLgNc22ySNSIzufawc9Bz6116uTucbUYwSRreFb23sLCG6v72RraFjGbVOQwOeo78HjvTtavfDdvpyRWNo4LyIx3sSTg5xz3Hfr9a4n7Bd6bfW9orxSSSyBAqncpYnGDxx161614T8JXet3/8AbXidRHZaVuxDu3FmXLMWPp+XtxQqTk9Be2UYnj94ymFB64zWSxDnJ5UenGas3bmaWRkG1Gdii+iknA/AVQKsoyRwO1ONJxVmTKrGTuj/0e3+D9iLfR5tRZcSXUu0H/ZjxjH4k165fQG4tXRADJ/Dnpn3ri/BVt9h8OaXCMf6lSw772BY/wDoVdm90qSCPHJ6VUI2gkE581RyPNdQQJKVkj3smCpYZYY6Ae+elcT43e78M+F43twI57u4+Y4B25BYgA8cYHNe3ytbtK7lFLttwcc8nHX8K8V+OF5EdK0+BOrXJP4BD/jQkrlTk7aHjPgmztp/E8FxetsgtUluZH7hYULZ+oOK7XQEsNN+G2ra/bNMLu8mNmC8hI2uwwMdCdpJJOTnPSvOdNuPI0/V5lOG+wOg/wC2ksSH9Ca7DXwNH8A6Fpak7rxpL6X6kAL+QJrVLTQwe5545Qc9u1Z104cbe1Pkl3fSqW4szj8qzbLSP//S+ghGIfJjAA2gDHpgY49qZIxN2T2Rf1NULK5XVL66u4+YLaQ20ZH8Tp/rD+BO38DUyzlndWHclT6r2rVkR3KV7eLb3cELIx8zI3DGAM/mcdT6DmvBPjZdfvNLj6bvMfHoOAP0r6JMqujxnoyH9P8A9dfK/wAZrozeILeFOTHbDA9NzMSfyAqLblyZ57arv0vUmzjbbZ/8ixj+ddd8QbxpW0W0PHk6ZASB0G8Fq86ecx2c8Y6SIqn6bg39K6TxS0pv7dpTnNjabfZREuBWqfumdtTmpG2rmqUT/vDnvU07YGKpx4EoySvuKyZZ/9k=
BDAY;value=date:1989-01-05
ORG:Vodafone Egypt;
item1.EMAIL;type=INTERNET:hanyyasser@hotmail.com
item1.X-ABLabel:INTERNET
item2.EMAIL;type=INTERNET:hanybotbot@hotmail.com
item2.X-ABLabel:INTERNET
item3.ADR;type=HOME:;;Heliopolis;Cairo;Al Qahirah;;Egypt
item4.ADR;type=HOME:;;;cairo;;;Egypt
item5.URL:http://www.facebook.com/profile.php?id=626850952
item5.X-ABLabel:_$!<HomePage>!$_
X-FACEBOOK:hany.yasser1
END:VCARD
</vcard>
</media>
</message>
'''
MEDIA_TYPE_IMAGE = "image"
MEDIA_TYPE_VIDEO = "video"
MEDIA_TYPE_AUDIO = "audio"
MEDIA_TYPE_VCARD = "vcard"
MEDIA_TYPE_LOCATION = "location"
TYPES_MEDIA = (MEDIA_TYPE_AUDIO, MEDIA_TYPE_IMAGE, MEDIA_TYPE_VIDEO, MEDIA_TYPE_VCARD, MEDIA_TYPE_LOCATION)
def __init__(self, mediaType, _id = None, _from = None, to = None, notify = None, timestamp = None, participant = None, preview = None, offline = None, retry = None):
super(MediaMessageProtocolEntity, self).__init__("media", _id, _from, to, notify, timestamp, participant, offline, retry)
self.setMediaType(mediaType)
self.setPreview(preview)
def __str__(self):
out = super(MediaMessageProtocolEntity, self).__str__()
out += "Media Type: %s\n" % self.mediaType
out += "Has Preview: %s\n" % (self.preview is not None)
return out
def setPreview(self, preview):
self.preview = preview
def getPreview(self):
return self.preview
def setMediaType(self, mediaType):
self.mediaType = mediaType
def getMediaType(self):
return self.mediaType
def toProtocolTreeNode(self):
node = super(MediaMessageProtocolEntity, self).toProtocolTreeNode()
mediaNode = ProtocolTreeNode("enc", {"type": self.mediaType}, None, None)
node.addChild(mediaNode)
if self.preview:
mediaNode.setData(self.preview)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = MessageProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = MediaMessageProtocolEntity
entity.setMediaType(node.getChild("media").getAttributeValue("type"))
preview = node.getChild("media").getData()
entity.setPreview(preview)
return entity
|
mollstam/UnrealPy
|
refs/heads/master
|
UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Twisted-15.2.1/twisted/python/test/test_hashlib.py
|
3
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.hashlib}
"""
from twisted.trial.unittest import TestCase
from twisted.trial import util
class HashObjectTests(TestCase):
"""
Tests for the hash object APIs presented by L{hashlib}, C{md5} and C{sha1}.
"""
def test_deprecation(self):
"""
Ensure the deprecation of L{twisted.python.hashlib} is working.
"""
__import__('twisted.python.hashlib')
warnings = self.flushWarnings(
offendingFunctions=[self.test_deprecation])
self.assertIdentical(warnings[0]['category'], DeprecationWarning)
self.assertEqual(len(warnings), 1)
self.assertEqual(warnings[0]['message'],
"twisted.python.hashlib was deprecated in "
"Twisted 13.1.0: Please use hashlib from stdlib.")
def test_md5(self):
"""
L{hashlib.md5} returns an object which can be used to compute an MD5
hash as defined by U{RFC 1321<http://www.ietf.org/rfc/rfc1321.txt>}.
"""
from twisted.python.hashlib import md5
# Test the result using values from section A.5 of the RFC.
self.assertEqual(
md5().hexdigest(), "d41d8cd98f00b204e9800998ecf8427e")
self.assertEqual(
md5("a").hexdigest(), "0cc175b9c0f1b6a831c399e269772661")
self.assertEqual(
md5("abc").hexdigest(), "900150983cd24fb0d6963f7d28e17f72")
self.assertEqual(
md5("message digest").hexdigest(),
"f96b697d7cb7938d525a2f31aaf161d0")
self.assertEqual(
md5("abcdefghijklmnopqrstuvwxyz").hexdigest(),
"c3fcd3d76192e4007dfb496cca67e13b")
self.assertEqual(
md5("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
"0123456789").hexdigest(),
"d174ab98d277d9f5a5611c2c9f419d9f")
self.assertEqual(
md5("1234567890123456789012345678901234567890123456789012345678901"
"2345678901234567890").hexdigest(),
"57edf4a22be3c955ac49da2e2107b67a")
# It should have digest and update methods, too.
self.assertEqual(
md5().digest().encode('hex'),
"d41d8cd98f00b204e9800998ecf8427e")
hash = md5()
hash.update("a")
self.assertEqual(
hash.digest().encode('hex'),
"0cc175b9c0f1b6a831c399e269772661")
# Instances of it should have a digest_size attribute
self.assertEqual(md5().digest_size, 16)
test_md5.suppress = [util.suppress(message="twisted.python.hashlib"
"was deprecated in Twisted 13.1.0: Please use hashlib from stdlib.")]
def test_sha1(self):
"""
L{hashlib.sha1} returns an object which can be used to compute a SHA1
hash as defined by U{RFC 3174<http://tools.ietf.org/rfc/rfc3174.txt>}.
"""
from twisted.python.hashlib import sha1
def format(s):
return ''.join(s.split()).lower()
# Test the result using values from section 7.3 of the RFC.
self.assertEqual(
sha1("abc").hexdigest(),
format(
"A9 99 3E 36 47 06 81 6A BA 3E 25 71 78 50 C2 6C 9C D0 D8 9D"))
self.assertEqual(
sha1("abcdbcdecdefdefgefghfghighijhi"
"jkijkljklmklmnlmnomnopnopq").hexdigest(),
format(
"84 98 3E 44 1C 3B D2 6E BA AE 4A A1 F9 51 29 E5 E5 46 70 F1"))
# It should have digest and update methods, too.
self.assertEqual(
sha1("abc").digest().encode('hex'),
format(
"A9 99 3E 36 47 06 81 6A BA 3E 25 71 78 50 C2 6C 9C D0 D8 9D"))
hash = sha1()
hash.update("abc")
self.assertEqual(
hash.digest().encode('hex'),
format(
"A9 99 3E 36 47 06 81 6A BA 3E 25 71 78 50 C2 6C 9C D0 D8 9D"))
# Instances of it should have a digest_size attribute.
self.assertEqual(
sha1().digest_size, 20)
test_sha1.suppress = [util.suppress(message="twisted.python.hashlib"
"was deprecated in Twisted 13.1.0: Please use hashlib from stdlib.")]
|
cordis/pycloudia
|
refs/heads/master
|
pycloudia/explorer/consts.py
|
1
|
class EXPLORER(object):
PROTOCOL_PREFIX = 'CLD'
IMMEDIATE_HEARTBEAT_INTERVAL = 5
BROADCAST_HEARTBEAT_INTERVAL = 2
|
alexus37/AugmentedRealityChess
|
refs/heads/master
|
pythonAnimations/pyOpenGLChess/engineDirectory/oglc-env/lib/python2.7/site-packages/OpenGL/raw/GL/ARB/indirect_parameters.py
|
9
|
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
# End users want this...
from OpenGL.raw.GL._types import *
from OpenGL.raw.GL import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GL_ARB_indirect_parameters'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GL,'GL_ARB_indirect_parameters',error_checker=_errors._error_checker)
GL_PARAMETER_BUFFER_ARB=_C('GL_PARAMETER_BUFFER_ARB',0x80EE)
GL_PARAMETER_BUFFER_BINDING_ARB=_C('GL_PARAMETER_BUFFER_BINDING_ARB',0x80EF)
@_f
@_p.types(None,_cs.GLenum,_cs.GLintptr,_cs.GLintptr,_cs.GLsizei,_cs.GLsizei)
def glMultiDrawArraysIndirectCountARB(mode,indirect,drawcount,maxdrawcount,stride):pass
@_f
@_p.types(None,_cs.GLenum,_cs.GLenum,_cs.GLintptr,_cs.GLintptr,_cs.GLsizei,_cs.GLsizei)
def glMultiDrawElementsIndirectCountARB(mode,type,indirect,drawcount,maxdrawcount,stride):pass
|
refnode/django-material
|
refs/heads/master
|
material/templatetags/material_form.py
|
10
|
import os
from collections import defaultdict
from django.forms.forms import BoundField
from django.template.base import (
TemplateSyntaxError, Library,
Node, Variable, token_kwargs)
from django.template.loader import get_template
from django.template.loader_tags import IncludeNode
register = Library()
def _render_parts(context, parts_list):
parts = context['form_parts']
for partnode in parts_list:
part = partnode.resolve_part(context)
if partnode.section not in parts[part]:
value = partnode.render(context)
parts[part][partnode.section] = value
@register.tag('form')
class FormNode(Node):
"""
Template based form rendering
Example::
{% form template='material/form.html' form=form layout=view.layout %}
{% part form.email prepend %}<span class="input-group-addon" id="basic-addon1">@</span>{% endpart %}
{% endform %}
"""
def __init__(self, parser, token):
bits = token.split_contents()
remaining_bits = bits[1:]
self.kwargs = token_kwargs(remaining_bits, parser)
if remaining_bits:
raise TemplateSyntaxError("%r received an invalid token: %r" %
(bits[0], remaining_bits[0]))
for key in self.kwargs:
if key not in ('form', 'layout', 'template'):
raise TemplateSyntaxError("%r received an invalid key: %r" %
(bits[0], key))
self.kwargs[key] = self.kwargs[key]
self.nodelist = parser.parse(('end{}'.format(bits[0]),))
parser.delete_first_token()
def render(self, context):
form = self.kwargs.get('form')
form = form.resolve(context) if form else context.get('form')
if form is None:
return ''
# Take one of view.layout or form.layout
layout = self.kwargs.get('layout')
if layout is not None:
layout = layout.resolve(context)
if layout is None:
if 'view' in context:
view = context['view']
if hasattr(view, 'layout'):
layout = view.layout
if layout is None:
if hasattr(form, 'layout'):
layout = form.layout
template_name = self.kwargs.get('template', 'material/form.html')
template = get_template(template_name)
# Render form and parts
parts = defaultdict(dict) # part -> section -> value
with context.push(
form=form,
layout=layout,
form_template_pack=os.path.dirname(template_name),
form_parts=parts):
# direct children
children = (node for node in self.nodelist if isinstance(node, FormPartNode))
_render_parts(context, children)
# include
children = (node for node in self.nodelist if isinstance(node, IncludeNode))
for included_list in children:
included = included_list.template.resolve(context)
children = (node for node in included.nodelist if isinstance(node, FormPartNode))
_render_parts(context, children)
return template.render(context)
@register.tag('part')
class FormPartNode(Node):
def __init__(self, parser, token):
bits = token.split_contents()
if len(bits) > 5:
raise TemplateSyntaxError(
"%r accepts at most 4 arguments (part_id, section, asvar, varname), got: {}" %
(bits[0], ','.join(bits[1:])))
self.part_id = Variable(bits[1])
self.section = bits[2] if len(bits) >= 3 else None
self.varname = None
if len(bits) > 3:
if bits[3] != 'asvar':
raise TemplateSyntaxError('Forth argument should be asvar, got {}', format(bits[3]))
if len(bits) < 4:
raise TemplateSyntaxError('Variable name not provided')
else:
self.varname = Variable(bits[4])
self.nodelist = parser.parse(('end{}'.format(bits[0]),))
parser.delete_first_token()
def resolve_part(self, context):
part = self.part_id.resolve(context)
if isinstance(part, BoundField):
part = part.field
return part
def render(self, context):
part = self.resolve_part(context)
parts = context['form_parts']
if self.section in parts[part]:
# already rendered
if self.varname is not None:
context[self.varname.resolve(context)] = parts[part][self.section]
return ""
else:
return parts[part][self.section]
# child parts
children = (node for node in self.nodelist if isinstance(node, FormPartNode))
_render_parts(context, children)
# render own content
value = self.nodelist.render(context).strip()
if self.varname is not None:
context[self.varname.resolve(context)] = value
return ''
else:
if not value:
return ''
return value
|
aYukiSekiguchi/ACCESS-Chromium
|
refs/heads/master
|
native_client_sdk/src/build_tools/tests/update_manifest_test.py
|
7
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for update_manifest.py."""
__author__ = 'mball@google.com (Matt Ball)'
import errno
import os
import SimpleHTTPServer
import SocketServer
import sys
import tempfile
import threading
import unittest
import urllib
import urlparse
from build_tools.sdk_tools import sdk_update
from build_tools.sdk_tools import update_manifest
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
def RemoveFile(filename):
'''Remove a filename if it exists and do nothing if it doesn't exist'''
try:
os.remove(filename)
except OSError as error:
if error.errno != errno.ENOENT:
raise
def GetHTTPHandler(path, length=None):
'''Returns a simple HTTP Request Handler that only servers up a given file
Args:
path: path and filename of the file to serve up
length: (optional) only serve up the first |length| bytes
Returns:
A SimpleHTTPRequestHandler class'''
class HTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
with open(path, 'rb') as f:
# This code is largely lifted from SimpleHTTPRequestHandler.send_head
self.send_response(200)
self.send_header("Content-type", self.guess_type(path))
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
if length != None:
self.wfile.write(f.read(length))
else:
self.copyfile(f, self.wfile)
return HTTPHandler
class FakeOptions(object):
''' Just a place holder for options '''
def __init__(self):
self.archive_id = None
self.bundle_desc_url = None
self.bundle_name = None
self.bundle_version = None
self.bundle_revision = None
self.desc = None
self.gsutil = os.path.join(TEST_DIR, 'fake_gsutil.bat'
if sys.platform == 'win32' else 'fake_gsutil.py')
self.linux_arch_url = None
self.mac_arch_url = None
self.manifest_file = os.path.join(TEST_DIR, 'naclsdk_manifest_test.json')
self.manifest_version = None
self.recommended = None
self.root_url = 'file://%s' % urllib.pathname2url(TEST_DIR)
self.stability = None
self.upload = False
self.win_arch_url = None
class TestUpdateManifest(unittest.TestCase):
''' Test basic functionality of the update_manifest package.
Note that update_manifest.py now imports sdk_update.py, so this file
tests the update_manifest features within sdk_update.'''
def setUp(self):
self._json_boilerplate=(
'{\n'
' "bundles": [],\n'
' "manifest_version": 1\n'
'}\n')
self._temp_dir = tempfile.gettempdir()
# os.path.join('build_tools', 'tests', 'test_archive')
self._manifest = update_manifest.UpdateSDKManifest()
def testJSONBoilerplate(self):
''' Test creating a manifest object'''
self.assertEqual(self._manifest.GetManifestString(),
self._json_boilerplate)
# Test using a manifest file with a version that is too high
self.assertRaises(sdk_update.Error,
self._manifest.LoadManifestString,
'{"manifest_version": 2}')
def testWriteLoadManifestFile(self):
''' Test writing to and loading from a manifest file'''
# Remove old test file
file_path = os.path.join(self._temp_dir, 'temp_manifest.json')
if os.path.exists(file_path):
os.remove(file_path)
# Create a basic manifest file
manifest_file = sdk_update.SDKManifestFile(file_path)
manifest_file.WriteFile();
self.assertTrue(os.path.exists(file_path))
# Test re-loading the file
manifest_file._manifest._manifest_data['manifest_version'] = 0
manifest_file._LoadFile()
self.assertEqual(manifest_file._manifest.GetManifestString(),
self._json_boilerplate)
os.remove(file_path)
def testValidateBundleName(self):
''' Test validating good and bad bundle names '''
self.assertTrue(
self._manifest._ValidateBundleName('A_Valid.Bundle-Name(1)'))
self.assertFalse(self._manifest._ValidateBundleName('A bad name'))
self.assertFalse(self._manifest._ValidateBundleName('A bad/name'))
self.assertFalse(self._manifest._ValidateBundleName('A bad;name'))
self.assertFalse(self._manifest._ValidateBundleName('A bad,name'))
def testUpdateManifestVersion(self):
''' Test updating the manifest version number '''
options = FakeOptions()
options.manifest_version = 99
self.assertEqual(self._manifest._manifest_data['manifest_version'], 1)
self._manifest._UpdateManifestVersion(options)
self.assertEqual(self._manifest._manifest_data['manifest_version'], 99)
def testVerifyAllOptionsConsumed(self):
''' Test function _VerifyAllOptionsConsumed '''
options = FakeOptions()
options.opt1 = None
self.assertTrue(self._manifest._VerifyAllOptionsConsumed(options, None))
options.opt2 = 'blah'
self.assertRaises(update_manifest.Error,
self._manifest._VerifyAllOptionsConsumed,
options,
'no bundle name')
def testBundleUpdate(self):
''' Test function Bundle.Update '''
bundle = sdk_update.Bundle('test')
options = FakeOptions()
options.bundle_revision = 1
options.bundle_version = 2
options.desc = 'What a hoot'
options.stability = 'dev'
options.recommended = 'yes'
update_manifest.UpdateBundle(bundle, options)
self.assertEqual(bundle['revision'], 1)
def testUpdateManifestModifyTopLevel(self):
''' Test function UpdateManifest: modifying top-level info '''
options = FakeOptions()
options.manifest_version = 0
options.bundle_name = None
self._manifest.UpdateManifest(options)
self.assertEqual(self._manifest._manifest_data['manifest_version'], 0)
def testUpdateManifestModifyBundle(self):
''' Test function UpdateManifest: adding/modifying a bundle '''
# Add a bundle
options = FakeOptions()
options.manifest_version = 1
options.bundle_name = 'test'
options.bundle_revision = 2
options.bundle_version = 3
options.desc = 'nice bundle'
options.stability = 'canary'
options.recommended = 'yes'
self._manifest.UpdateManifest(options)
bundle = self._manifest.GetBundle('test')
self.assertNotEqual(bundle, None)
# Modify the same bundle
options = FakeOptions()
options.manifest_version = None
options.bundle_name = 'test'
options.desc = 'changed'
self._manifest.UpdateManifest(options)
bundle = self._manifest.GetBundle('test')
self.assertEqual(bundle['description'], 'changed')
def testUpdateManifestBadBundle1(self):
''' Test function UpdateManifest: bad bundle data '''
options = FakeOptions()
options.manifest_version = None
options.bundle_name = 'test'
options.stability = 'excellent'
self.assertRaises(sdk_update.Error,
self._manifest.UpdateManifest,
options)
def testUpdateManifestBadBundle2(self):
''' Test function UpdateManifest: incomplete bundle data '''
options = FakeOptions()
options.manifest_version = None
options.bundle_name = 'another_bundle'
self.assertRaises(sdk_update.Error,
self._manifest.UpdateManifest,
options)
def testUpdateManifestArchiveComputeSha1AndSize(self):
''' Test function Archive.Update '''
temp_file_path = None
try:
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
# Create a temp file with some data
temp_file.write(r'abcdefghijklmnopqrstuvwxyz0123456789')
temp_file_path = temp_file.name
# Windows requires that we close the file before reading from it.
temp_file.close()
# Create an archive with a url to the file we created above.
url_parts = urlparse.ParseResult('file', '', temp_file_path, '', '', '')
url = urlparse.urlunparse(url_parts)
archive = sdk_update.Archive('mac')
archive.Update(url)
self.assertEqual(archive['checksum']['sha1'],
'd2985049a677bbc4b4e8dea3b89c4820e5668e3a')
finally:
if temp_file_path and os.path.exists(temp_file_path):
os.remove(temp_file_path)
def testUpdateManifestArchiveValidate(self):
''' Test function Archive.Validate '''
# Test invalid host-os name
archive = sdk_update.Archive('atari')
self.assertRaises(sdk_update.Error, archive.Validate)
# Test missing url
archive['host_os'] = 'mac'
self.assertRaises(sdk_update.Error, archive.Validate)
# Valid archive
archive['url'] = 'http://www.google.com'
archive.Validate()
# Test invalid key name
archive['guess'] = 'who'
self.assertRaises(sdk_update.Error, archive.Validate)
def testUpdatePartialFile(self):
'''Test updating with a partially downloaded file'''
server = None
server_thread = None
temp_filename = os.path.join(self._temp_dir,
'testUpdatePartialFile_temp.txt')
try:
# Create a new local server on an arbitrary port that just serves-up
# the first 10 bytes of this file.
server = SocketServer.TCPServer(
("", 0), GetHTTPHandler(__file__, 10))
ip, port = server.server_address
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
archive = sdk_update.Archive('mac')
self.assertRaises(sdk_update.Error,
archive.Update,
'http://localhost:%s' % port)
try:
self.assertRaises(sdk_update.Error,
archive.DownloadToFile,
temp_filename)
finally:
RemoveFile(temp_filename)
finally:
if server_thread and server_thread.isAlive():
server.shutdown()
server_thread.join()
def testUpdateManifestMain(self):
''' test the main function from update_manifest '''
temp_filename = os.path.join(self._temp_dir, 'testUpdateManifestMain.json')
try:
argv = ['--bundle-version', '0',
'--bundle-revision', '0',
'--description', 'test bundle for update_manifest unit tests',
'--bundle-name', 'test_bundle',
'--stability', 'dev',
'--recommended', 'no',
'--manifest-file', temp_filename]
update_manifest.main(argv)
finally:
RemoveFile(temp_filename)
def testPush(self):
'''Test whether the push function does the right thing'''
options = FakeOptions()
argv = ['-g', options.gsutil, 'push']
update_manifest.main(argv)
def testHandleSDKTools(self):
'''Test the handling of the sdk_tools bundle'''
options = FakeOptions()
options.bundle_name = 'sdk_tools'
options.upload = True
options.bundle_version = 0
self.assertRaises(
update_manifest.Error,
update_manifest.UpdateSDKManifestFile(options).HandleBundles)
options.bundle_version = None
options.bundle_revision = 0
self.assertRaises(
update_manifest.Error,
update_manifest.UpdateSDKManifestFile(options).HandleBundles)
options.bundle_revision = None
update_manifest.UpdateSDKManifestFile(options).HandleBundles()
def testHandlePepper(self):
'''Test the handling of pepper bundles'''
options = FakeOptions()
options.bundle_name = 'pepper'
options.bundle_version = None
self.assertRaises(
update_manifest.Error,
update_manifest.UpdateSDKManifestFile(options).HandleBundles)
options.bundle_name = 'pepper'
options.bundle_version = 1
options.bundle_revision = None
self.assertRaises(
update_manifest.Error,
update_manifest.UpdateSDKManifestFile(options).HandleBundles)
options.bundle_name = 'pepper'
options.bundle_revision = 0
manifest_object = update_manifest.UpdateSDKManifestFile(options)
manifest_object.HandleBundles()
manifest_object.UpdateWithOptions()
options = FakeOptions()
options.bundle_name = 'pepper_1'
options.bundle_revision = 0
manifest_object = update_manifest.UpdateSDKManifestFile(options)
manifest_object.HandleBundles()
manifest_object.UpdateWithOptions()
# Verify that the bundle can be found via the --archive-id option.
options = FakeOptions()
options.archive_id = 'pepper_1_0'
options.bundle_name = 'pepper_phony'
options.bundle_version = -1
options.bundle_revision = -1
options.stability = 'dev'
options.recommended = 'no'
manifest_object = update_manifest.UpdateSDKManifestFile(options)
manifest_object.HandleBundles()
manifest_object.UpdateWithOptions()
def main():
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateManifest)
result = unittest.TextTestRunner(verbosity=2).run(suite)
return int(not result.wasSuccessful())
if __name__ == '__main__':
sys.exit(main())
|
sightmachine/SimpleCV
|
refs/heads/master
|
SimpleCV/examples/display/videowriter.py
|
13
|
#!/usr/bin/python
from SimpleCV import *
import time
c = Camera()
vs = VideoStream("foo.avi")
for i in range(0,500):
c.getImage().edges().invert().save(vs)
time.sleep(0.05)
|
timj/scons
|
refs/heads/master
|
test/DVIPS/DVIPS.py
|
1
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
_python_ = TestSCons._python_
test = TestSCons.TestSCons()
test.write('mytex.py', r"""
import os
import sys
import getopt
cmd_opts, arg = getopt.getopt(sys.argv[1:], 'i:r:', [])
base_name = os.path.splitext(arg[0])[0]
infile = open(arg[0], 'rb')
out_file = open(base_name+'.dvi', 'wb')
for l in infile.readlines():
if l[:4] != '#tex':
out_file.write(l)
sys.exit(0)
""")
test.write('mylatex.py', r"""
import os
import sys
import getopt
cmd_opts, arg = getopt.getopt(sys.argv[1:], 'i:r:', [])
base_name = os.path.splitext(arg[0])[0]
infile = open(arg[0], 'rb')
out_file = open(base_name+'.dvi', 'wb')
for l in infile.readlines():
if l[:6] != '#latex':
out_file.write(l)
sys.exit(0)
""")
test.write('mydvips.py', r"""
import os
import sys
infile = open(sys.argv[3], 'rb')
out_file = open(sys.argv[2], 'wb')
for l in infile.readlines():
if l[:6] != '#dvips':
out_file.write(l)
sys.exit(0)
""")
test.write('SConstruct', """
env = Environment(TEX = r'%(_python_)s mytex.py',
LATEX = r'%(_python_)s mylatex.py',
DVIPS = r'%(_python_)s mydvips.py',
tools=['tex', 'latex', 'dvips'])
dvi = env.DVI(target = 'test1.dvi', source = 'test1.tex')
env.PostScript(target = 'test1.ps', source = dvi)
env.PostScript(target = 'test2.ps', source = 'test2.tex')
env.PostScript(target = 'test3.ps', source = 'test3.ltx')
env.PostScript(target = 'test4.ps', source = 'test4.latex')
""" % locals())
test.write('test1.tex', r"""This is a .dvi test.
#tex
#dvips
""")
test.write('test2.tex', r"""This is a .tex test.
#tex
#dvips
""")
test.write('test3.ltx', r"""This is a .ltx test.
#latex
#dvips
""")
test.write('test4.latex', r"""This is a .latex test.
#latex
#dvips
""")
test.run(arguments = '.', stderr = None)
test.must_match('test1.ps', "This is a .dvi test.\n")
test.must_match('test2.ps', "This is a .tex test.\n")
test.must_match('test3.ps', "This is a .ltx test.\n")
test.must_match('test4.ps', "This is a .latex test.\n")
have_latex = test.where_is('latex')
if not have_latex:
test.skip_test('Could not find latex; skipping test(s).\n')
dvips = test.where_is('dvips')
if dvips:
test.write("wrapper.py", """import os
import sys
cmd = " ".join(sys.argv[1:])
open('%s', 'ab').write("%%s\\n" %% cmd)
os.system(cmd)
""" % test.workpath('wrapper.out').replace('\\', '\\\\'))
test.write('SConstruct', """
import os
ENV = { 'PATH' : os.environ['PATH'] }
foo = Environment(ENV = ENV)
dvips = foo.Dictionary('DVIPS')
bar = Environment(ENV = ENV, DVIPS = r'%(_python_)s wrapper.py ' + dvips)
foo.PostScript(target = 'foo.ps', source = 'foo.tex')
bar.PostScript(target = 'bar1', source = 'bar1.tex')
bar.PostScript(target = 'bar2', source = 'bar2.ltx')
bar.PostScript(target = 'bar3', source = 'bar3.latex')
""" % locals())
tex = r"""
This is the %s TeX file.
\end
"""
latex = r"""
\documentclass{letter}
\begin{document}
This is the %s LaTeX file.
\end{document}
"""
test.write('foo.tex', tex % 'foo.tex')
test.write('bar1.tex', tex % 'bar1.tex')
test.write('bar2.ltx', latex % 'bar2.ltx')
test.write('bar3.latex', latex % 'bar3.latex')
test.run(arguments = 'foo.dvi', stderr = None)
test.must_not_exist(test.workpath('wrapper.out'))
test.must_exist(test.workpath('foo.dvi'))
test.run(arguments = 'bar1.ps bar2.ps bar3.ps', stderr = None)
expect = """dvips -o bar1.ps bar1.dvi
dvips -o bar2.ps bar2.dvi
dvips -o bar3.ps bar3.dvi
"""
test.must_match('wrapper.out', expect)
test.must_exist(test.workpath('bar1.ps'))
test.must_exist(test.workpath('bar2.ps'))
test.must_exist(test.workpath('bar3.ps'))
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
SivagnanamCiena/pynos
|
refs/heads/master
|
tests/versions/base/test_lldp.py
|
1
|
#!/usr/bin/env python
"""
Copyright 2015 Brocade Communications Systems, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import xml.etree.ElementTree as ET
import pynos.versions.base.lldp
import pynos.utilities
class TestLLDP(unittest.TestCase):
"""
LLDP unit tests. Compare expected XML to generated XML.
"""
def setUp(self):
self.lldp = pynos.versions.base.lldp.LLDP(pynos.utilities.return_xml)
self.namespace = 'urn:brocade.com:mgmt:brocade-lldp-ext'
self.netconf_namespace = 'urn:ietf:params:xml:ns:netconf:base:1.0'
def lldp_neighbors_xml(self, *args):
message_id = 'urn:uuid:528cdf32-2e86-11e5-bb27-080027b782e4'
neighbor_xml = '<ns0:rpc-reply xmlns:ns0="{0}" xmlns:ns1="{1}" '\
'message-id="{2}"><ns1:lldp-neighbor-detail>'\
'<ns1:local-interface-name>Te 226/0/7'\
'</ns1:local-interface-name>'\
'<ns1:local-interface-ifindex>402882566'\
'</ns1:local-interface-ifindex>'\
'<ns1:local-interface-mac>0005.33e5.d764'\
'</ns1:local-interface-mac><ns1:remote-interface-name>'\
'port0</ns1:remote-interface-name>'\
'<ns1:remote-interface-mac>8c7c.ff02.f100'\
'</ns1:remote-interface-mac><ns1:dead-interval>120'\
'</ns1:dead-interval><ns1:remaining-life>102'\
'</ns1:remaining-life><ns1:remote-chassis-id>'\
'8c7c.ff02.f100</ns1:remote-chassis-id>'\
'<ns1:lldp-pdu-transmitted>5397'\
'</ns1:lldp-pdu-transmitted><ns1:lldp-pdu-received>'\
'5263</ns1:lldp-pdu-received>'\
'</ns1:lldp-neighbor-detail><ns1:has-more>false'\
'</ns1:has-more>'\
'</ns0:rpc-reply>'.format(self.netconf_namespace,
self.namespace,
message_id)
return ET.fromstring(neighbor_xml)
def test_neighbors(self):
expected = {'local-int-name': 'Te 226/0/7',
'local-int-mac': '0005.33e5.d764',
'remote-int-name': 'port0',
'remote-int-mac': '8c7c.ff02.f100',
'remote-chassis-id': '8c7c.ff02.f100',
'remote-system-name': 'placeholder'}
self.lldp._callback = self.lldp_neighbors_xml
results = self.lldp.neighbors
self.assertIsInstance(results, list)
self.assertDictEqual(expected, results[0])
|
smsaladi/moltemplate
|
refs/heads/master
|
moltemplate/bonds_by_type.py
|
1
|
#!/usr/bin/env python
# Author: Andrew Jewett (jewett.aij at g mail)
# License: MIT License (See LICENSE.md)
# Copyright (c) 2013
"""
bonds_by_type.py reads a LAMMPS data file (or an excerpt of a LAMMPS)
data file containing bonded many-body interactions by atom type
(and bond type), and generates a list of additional interactions
in LAMMPS format consistent with those type (to the standard out).
Typical Usage:
bonds_by_type.py -atoms atoms.data \\
-bonds bonds.data \\
-bondsbytype bonds_by_type.data \\
> new_bonds.data
"""
g_program_name = __file__.split('/')[-1] # = 'bonds_by_type.py'
g_date_str = '2016-12-21'
g_version_str = '0.12.0'
import sys
try:
from . import ttree_lex
from .lttree_styles import AtomStyle2ColNames, ColNames2AidAtypeMolid
except (ImportError, SystemError, ValueError):
# not installed as a package
import ttree_lex
from lttree_styles import AtomStyle2ColNames, ColNames2AidAtypeMolid
def LookupBondTypes(bond_types,
bond_ids,
bond_pairs,
lines_atoms,
lines_bonds,
lines_bondsbytype,
atom_style,
section_name,
prefix='',
suffix='',
bond_ids_offset=0):
# report_progress = False):
"""
LookupBondTypes() looks up bond types.
Output:
...It looks up the corresponding type of each bond and store it in the
"bond_types" list. (If the bond_ids were not specified by the user,
generate them and store them in the bond_ids list.)
Input (continued):
This function requires:
...a list of bonded pairs of atoms
stored in the lines_bonds variable (from the "Data Bond List"
or "Data Bonds AtomId AtomId" sections)
...and a list of atom types
stored in the lines_atoms variable (from the "Data Atoms" section)
...and a list of bond-types-as-a-function-of-atom-types
stored in the lines_bondsbytype (from the "Data Bonds By Type" section)
Generated bond_ids (if applicable) are of the form
prefix + str(number) + suffix
(where "number" begins at bond_ids_offset+1)
"""
column_names = AtomStyle2ColNames(atom_style)
i_atomid, i_atomtype, i_molid = ColNames2AidAtypeMolid(column_names)
atomids = []
atomtypes = []
atomids2types = {}
for iv in range(0, len(lines_atoms)):
line = lines_atoms[iv].strip()
if '#' in line:
icomment = line.find('#')
line = (line[:icomment]).strip()
if len(line) > 0:
tokens = ttree_lex.SplitQuotedString(line)
if ((len(tokens) <= i_atomid) or (len(tokens) <= i_atomtype)):
sys.stderr.write("\"" + line + "\"\n")
raise(ttree_lex.InputError(
'Error not enough columns on line ' + str(iv + 1) + ' of \"Atoms\" section.'))
tokens = ttree_lex.SplitQuotedString(line)
atomid = ttree_lex.EscCharStrToChar(tokens[i_atomid])
atomids.append(atomid)
atomtype = ttree_lex.EscCharStrToChar(tokens[i_atomtype])
atomtypes.append(atomtype)
atomids2types[atomid] = atomtype
assert(isinstance(bond_ids, list))
assert(isinstance(bond_types, list))
assert(isinstance(bond_pairs, list))
del bond_ids[:]
del bond_types[:]
del bond_pairs[:]
for ie in range(0, len(lines_bonds)):
line = lines_bonds[ie].strip()
if '#' in line:
icomment = line.find('#')
line = (line[:icomment]).strip()
if len(line) == 0:
continue
tokens = ttree_lex.SplitQuotedString(line)
if section_name == "Data Bonds AtomId AtomId":
if len(tokens) == 2:
bondid_n = bond_ids_offset + len(bond_ids) + 1
bond_ids.append(prefix + str(bondid_n) + suffix)
bond_pairs.append((ttree_lex.EscCharStrToChar(tokens[0]),
ttree_lex.EscCharStrToChar(tokens[1])))
else:
raise(ttree_lex.InputError('Incorrect number of columns on line ' +
str(ie + 1) + ' of \"' + section_name + '\" section.'))
elif section_name == "Data Bond List":
if len(tokens) == 3:
bond_ids.append(ttree_lex.EscCharStrToChar(tokens[0]))
bond_pairs.append((ttree_lex.EscCharStrToChar(tokens[1]),
ttree_lex.EscCharStrToChar(tokens[2])))
else:
raise(ttree_lex.InputError('Incorrect number of columns on line ' +
str(ie + 1) + ' of \"' + section_name + '\" section.'))
else:
raise(ttree_lex.InputError('Internal Error (' + g_program_name +
'): Unknown section name: \"' + section_name + '\"'))
assert(len(bond_types) == 0)
typepattern_to_coefftypes = []
for i in range(0, len(lines_bondsbytype)):
line = lines_bondsbytype[i].strip()
if '#' in line:
icomment = line.find('#')
line = (line[:icomment]).strip()
if len(line) > 0:
tokens = ttree_lex.SplitQuotedString(line)
if (len(tokens) != 3):
raise(ttree_lex.InputError('Error: Wrong number of columns in the \"Bonds By Type\" section of data file.\n'
'Offending line:\n' +
'\"' + line + '\"\n'
'Expected 3 columns\n'))
coefftype = ttree_lex.EscCharStrToChar(tokens[0])
typepattern = []
for typestr in tokens[1:]:
if ((len(typestr) >= 2) and
(typestr[0] == '/') and (typestr[-1] == '/')):
regex_str = typestr[1:-1]
typepattern.append(re.compile(regex_str))
else:
typepattern.append(ttree_lex.EscCharStrToChar(typestr))
typepattern_to_coefftypes.append([typepattern, coefftype])
assert(len(bond_ids) == len(bond_pairs))
for ie in range(0, len(bond_ids)):
bond_types.append(None)
for ie in range(0, len(bond_ids)):
bondid = bond_ids[ie]
(atomid1, atomid2) = bond_pairs[ie]
if atomid1 not in atomids2types:
raise ttree_lex.InputError('Error: atom \"' + atomid1 + '\" not defined in \"Data Atoms\".\n'
' This usually happens when the user mistypes one of the names of the\n'
' $atoms in either a \"Data Atoms\" or \"Data Bond List\" section.\n'
' To find out where the mistake occured, search the \n'
' \"ttree_assignments.txt\" file for:\n'
' \"' + atomid1 + '\"\n')
if atomid2 not in atomids2types:
raise ttree_lex.InputError('Error: atom \"' + atomid2 + '\" not defined in \"Data Atoms\".\n'
' This usually happens when the user mistypes one of the names of the\n'
' $atoms in either a \"Data Atoms\" or \"Data Bond List\" section.\n'
' To find out where the mistake occured, search the \n'
' \"ttree_assignments.txt\" file for:\n'
' \"' + atomid2 + '\"\n')
atomtype1 = atomids2types[atomid1]
atomtype2 = atomids2types[atomid2]
for typepattern, coefftype in typepattern_to_coefftypes:
# use string comparisons to check if atom types match the pattern
if (ttree_lex.MatchesAll((atomtype1, atomtype2), typepattern) or
ttree_lex.MatchesAll((atomtype2, atomtype1), typepattern)):
# ("MatchesAll()" defined in "ttree_lex.py")
bond_types[ie] = coefftype
for ie in range(0, len(bond_ids)):
if not bond_types[ie]:
(atomid1, atomid2) = bond_pairs[ie]
atomtype1 = atomids2types[atomid1]
atomtype2 = atomids2types[atomid2]
raise ttree_lex.InputError('Error: No bond types defined for the bond between\n'
' atoms ' + atomid1 +
' (type ' + atomtype1 + ')\n'
' and ' + atomid2 + ' (type ' + atomtype2 + ')\n'
'\n'
' (If you are using a force field, then it probably means that you made a\n'
' mistake choosing at least one of these two @atom types from the list\n'
' of available atom types supplied by the force field. To fix it, edit\n'
' the corresponding lines in the "Data Atoms" section of your LT file.)\n')
def main():
sys.stderr.write(g_program_name + ' v' +
g_version_str + ' ' + g_date_str + ' ')
if sys.version < '3':
sys.stderr.write(' (python version < 3)\n')
else:
sys.stderr.write('\n')
try:
fname_atoms = None
fname_bond_list = None
fname_bondsbytype = None
section_name = 'Data Bond List' # (This will be replaced later.)
atom_style = 'full'
prefix = ''
suffix = ''
bond_lack_types = False
argv = [arg for arg in sys.argv]
# Loop over the remaining arguments not processed yet.
# These arguments are specific to the lttree.py program
# and are not understood by ttree.py:
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if ((argv[i].lower() == '-?') or
(argv[i].lower() == '--?') or
(argv[i].lower() == '-help') or
(argv[i].lower() == '-help')):
if i + 1 >= len(argv):
sys.stdout.write(man_page_text + '\n')
sys.exit(0)
elif argv[i].lower() == '-atoms':
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a file name containing lines of\n'
' text which might appear in the "Atoms" section of a LAMMPS data file.\n')
fname_atoms = argv[i + 1]
del(argv[i:i + 2])
elif argv[i].lower() == '-bonds':
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a file name containing lines of\n'
' text which might appear in the "Bonds" section of a LAMMPS data file.\n')
fname_bond_list = argv[i + 1]
del(argv[i:i + 2])
elif argv[i].lower() == '-bond-list':
if i + 1 >= len(argv):
raise ttree_lex.InputError(
'Error: ' + argv[i] + ' flag should be followed by a file name\n')
# raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a file name containing lines of\n'
# ' text which might appear in the "Bonds No Types" section of a LAMMPS data file.\n')
fname_bond_list = argv[i + 1]
section_name = "Data Bond List"
del(argv[i:i + 2])
elif argv[i].lower() == '-bondsbytype':
if i + 1 >= len(argv):
raise ttree_lex.InputError(
'Error: ' + argv[i] + ' flag should be followed by a file name\n')
# raise ttree_lex.InputError('Error: '+argv[i]+' flag should be followed by a file name containing\n'
# ' text which might appear in the "'+section_name+' By Type" section\n'
# ' of a LAMMPS data file.\n')
fname_bondsbytype = argv[i + 1]
del(argv[i:i + 2])
elif ((argv[i].lower() == '-atom-style') or
(argv[i].lower() == '-atom_style')):
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a an atom_style name.\n'
' (Or single quoted string which includes a space-separated\n'
' list of column names.)\n')
atom_style = argv[i + 1]
del(argv[i:i + 2])
elif argv[i].lower() == '-prefix':
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a prefix string\n'
' (a string you want to appear to the left of the integer\n'
' which counts the bonded interactions you have generated.)\n')
prefix = argv[i + 1]
del(argv[i:i + 2])
elif argv[i].lower() == '-suffix':
if i + 1 >= len(argv):
raise ttree_lex.InputError('Error: ' + argv[i] + ' flag should be followed by a suffix string\n'
' (a string you want to appear to the right of the integer\n'
' which counts the bonded interactions you have generated.)\n')
prefix = argv[i + 1]
del(argv[i:i + 2])
elif argv[i][0] == '-':
raise ttree_lex.InputError('Error(' + g_program_name + '):\n'
'Unrecogized command line argument \"' + argv[i] + '\"\n')
else:
i += 1
if len(argv) != 1:
# if there are more than 2 remaining arguments,
problem_args = ['\"' + arg + '\"' for arg in argv[1:]]
raise ttree_lex.InputError('Syntax Error(' + g_program_name + '):\n\n'
' Problem with argument list.\n'
' The remaining arguments are:\n\n'
' ' +
(' '.join(problem_args)) + '\n\n'
' (The actual problem may be earlier in the argument list.)\n')
bond_types = []
bond_ids = []
bond_pairs = []
fatoms = open(fname_atoms, 'r')
fbonds = open(fname_bond_list, 'r')
fbondsbytype = open(fname_bondsbytype, 'r')
lines_atoms = fatoms.readlines()
lines_bonds = fbonds.readlines()
lines_bondsbytype = fbondsbytype.readlines()
fatoms.close()
fbonds.close()
fbondsbytype.close()
LookupBondTypes(bond_types,
bond_ids,
bond_pairs,
lines_atoms,
lines_bonds,
lines_bondsbytype,
atom_style,
section_name,
prefix='',
suffix='')
assert(len(bond_types) == len(bond_ids) == len(bond_pairs))
ie = 0
N = len(bond_types)
for ie in range(0, N):
sys.stdout.write(bond_ids[ie] + ' ' +
bond_types[ie] + ' ' +
bond_pairs[ie][0] + ' ' +
bond_pairs[ie][1] + '\n')
except (ValueError, ttree_lex.InputError) as err:
sys.stderr.write('\n' + str(err) + '\n')
sys.exit(-1)
return
if __name__ == "__main__":
main()
|
coreyoconnor/nixops
|
refs/heads/master
|
nixops/resources/azure_availability_set.py
|
6
|
# -*- coding: utf-8 -*-
# Automatic provisioning of Azure availability sets.
import os
import azure
from nixops.util import attr_property
from nixops.azure_common import ResourceDefinition, ResourceState, normalize_location
from azure.mgmt.compute import AvailabilitySet
class AzureVirtualNetworkDefinition(ResourceDefinition):
"""Definition of an Azure Availability Set"""
@classmethod
def get_type(cls):
return "azure-availability-set"
@classmethod
def get_resource_type(cls):
return "azureAvailabilitySets"
def __init__(self, xml):
ResourceDefinition.__init__(self, xml)
self.availability_set_name = self.get_option_value(xml, 'name', str)
self.copy_option(xml, 'resourceGroup', 'resource')
self.copy_location(xml)
self.copy_tags(xml)
self.copy_option(xml, 'platformUpdateDomainCount', int)
self.copy_option(xml, 'platformFaultDomainCount', int)
def show_type(self):
return "{0} [{1}]".format(self.get_type(), self.location)
class AzureAvailabilitySetState(ResourceState):
"""State of an Azure Availability Set"""
availability_set_name = attr_property("azure.name", None)
resource_group = attr_property("azure.resourceGroup", None)
location = attr_property("azure.location", None)
tags = attr_property("azure.tags", {}, 'json')
platform_update_domain_count = attr_property("azure.platformUpdateDomainCount", None, int)
platform_fault_domain_count = attr_property("azure.platformFaultDomainCount", None, int)
@classmethod
def get_type(cls):
return "azure-availability-set"
def show_type(self):
s = super(AzureAvailabilitySetState, self).show_type()
if self.state == self.UP: s = "{0} [{1}]".format(s, self.location)
return s
@property
def resource_id(self):
return self.availability_set_name
@property
def full_name(self):
return "Azure availability set '{0}'".format(self.resource_id)
def get_resource(self):
try:
return self.cmc().availability_sets.get(
self.resource_group, self.resource_id).availability_set
except azure.common.AzureMissingResourceHttpError:
return None
def destroy_resource(self):
self.cmc().availability_sets.delete(self.resource_group, self.resource_id)
def is_settled(self, resource):
return True
defn_properties = [ 'location', 'tags', 'platform_update_domain_count',
'platform_fault_domain_count' ]
def _create_or_update(self, defn):
self.cmc().availability_sets.create_or_update(
defn.resource_group,
AvailabilitySet(
name = defn.availability_set_name,
location = defn.location,
tags = defn.tags,
platform_update_domain_count = defn.platform_update_domain_count,
platform_fault_domain_count = defn.platform_fault_domain_count,
))
self.state = self.UP
self.copy_properties(defn)
def create(self, defn, check, allow_reboot, allow_recreate):
self.no_subscription_id_change(defn)
self.no_location_change(defn)
self.no_property_change(defn, 'resource_group')
self.no_property_change(defn, 'platform_update_domain_count')
self.no_property_change(defn, 'platform_fault_domain_count')
self.copy_mgmt_credentials(defn)
self.availability_set_name = defn.availability_set_name
self.resource_group = defn.resource_group
if check:
aset = self.get_settled_resource()
if not aset:
self.warn_missing_resource()
elif self.state == self.UP:
self.handle_changed_property('location', normalize_location(aset.location),
can_fix = False)
self.handle_changed_property('tags', aset.tags)
self.handle_changed_property('platform_update_domain_count',
aset.platform_update_domain_count,
can_fix = False)
self.handle_changed_property('platform_fault_domain_count',
aset.platform_fault_domain_count,
can_fix = False)
else:
self.warn_not_supposed_to_exist()
self.confirm_destroy()
if self.state != self.UP:
if self.get_settled_resource():
raise Exception("tried creating an availability set that already exists; "
"please run 'deploy --check' to fix this")
self.log("creating {0} in {1}...".format(self.full_name, defn.location))
self._create_or_update(defn)
if self.properties_changed(defn):
self.log("updating properties of {0}...".format(self.full_name))
self.get_settled_resource_assert_exists()
self._create_or_update(defn)
def create_after(self, resources, defn):
from nixops.resources.azure_resource_group import AzureResourceGroupState
return {r for r in resources
if isinstance(r, AzureResourceGroupState)}
|
tboyce021/home-assistant
|
refs/heads/dev
|
tests/components/ambient_station/test_config_flow.py
|
21
|
"""Define tests for the Ambient PWS config flow."""
import json
from unittest.mock import patch
import aioambient
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.ambient_station import CONF_APP_KEY, DOMAIN, config_flow
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_API_KEY
from tests.common import MockConfigEntry, load_fixture, mock_coro
@pytest.fixture
def get_devices_response():
"""Define a fixture for a successful /devices response."""
return mock_coro()
@pytest.fixture
def mock_aioambient(get_devices_response):
"""Mock the aioambient library."""
with patch("homeassistant.components.ambient_station.config_flow.Client") as Client:
Client().api.get_devices.return_value = get_devices_response
yield Client
async def test_duplicate_error(hass):
"""Test that errors are shown when duplicates are added."""
conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"}
MockConfigEntry(
domain=DOMAIN, unique_id="67890fghij67890fghij", data=conf
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
@pytest.mark.parametrize(
"get_devices_response", [mock_coro(exception=aioambient.errors.AmbientError)]
)
async def test_invalid_api_key(hass, mock_aioambient):
"""Test that an invalid API/App Key throws an error."""
conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"}
flow = config_flow.AmbientStationFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
result = await flow.async_step_user(user_input=conf)
assert result["errors"] == {"base": "invalid_key"}
@pytest.mark.parametrize("get_devices_response", [mock_coro(return_value=[])])
async def test_no_devices(hass, mock_aioambient):
"""Test that an account with no associated devices throws an error."""
conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"}
flow = config_flow.AmbientStationFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
result = await flow.async_step_user(user_input=conf)
assert result["errors"] == {"base": "no_devices"}
async def test_show_form(hass):
"""Test that the form is served with no input."""
flow = config_flow.AmbientStationFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
result = await flow.async_step_user(user_input=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
@pytest.mark.parametrize(
"get_devices_response",
[mock_coro(return_value=json.loads(load_fixture("ambient_devices.json")))],
)
async def test_step_import(hass, mock_aioambient):
"""Test that the import step works."""
conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"}
flow = config_flow.AmbientStationFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
result = await flow.async_step_import(import_config=conf)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "67890fghij67"
assert result["data"] == {
CONF_API_KEY: "12345abcde12345abcde",
CONF_APP_KEY: "67890fghij67890fghij",
}
@pytest.mark.parametrize(
"get_devices_response",
[mock_coro(return_value=json.loads(load_fixture("ambient_devices.json")))],
)
async def test_step_user(hass, mock_aioambient):
"""Test that the user step works."""
conf = {CONF_API_KEY: "12345abcde12345abcde", CONF_APP_KEY: "67890fghij67890fghij"}
flow = config_flow.AmbientStationFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
result = await flow.async_step_user(user_input=conf)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "67890fghij67"
assert result["data"] == {
CONF_API_KEY: "12345abcde12345abcde",
CONF_APP_KEY: "67890fghij67890fghij",
}
|
knoopr/ShotForTheHeart
|
refs/heads/master
|
mailin.py
|
2
|
import httplib2
import json
class Mailin:
""" This is the Mailin client class
"""
def __init__(self,base_url,api_key):
self.base_url = base_url
self.api_key = api_key
def do_request(self,resource,method,indata):
url = self.base_url + "/" + resource
h = httplib2.Http(disable_ssl_certificate_validation=True)
content_type = "application/json"
r,c = h.request(url,method,body=indata,headers={'api-key':self.api_key, 'content-type':content_type})
return json.loads(c)
def get(self,resource,indata):
return self.do_request(resource,"GET",indata)
def post(self,resource,indata):
return self.do_request(resource,"POST",indata)
def put(self,resource,indata):
return self.do_request(resource,"PUT",indata)
def delete(self,resource,indata):
return self.do_request(resource,"DELETE",indata)
# Get Account.
# No input required
def get_account(self,):
return self.get("account","")
# Get SMTP details.
# No input required
def get_smtp_details(self,):
return self.get("account/smtpdetail","")
# Create Child Account.
# @param {Array} data contains php array with key value pair.
# @options data {String} child_email: Email address of Reseller child [Mandatory]
# @options data {String} password: Password of Reseller child to login [Mandatory]
# @options data {String} company_org: Name of Reseller child's company [Mandatory]
# @options data {String} first_name: First name of Reseller child [Mandatory]
# @options data {String} last_name: Last name of Reseller child [Mandatory]
# @options data {Array} credits: Number of email & sms credits respectively, which will be assigned to the Reseller child's account [Optional]
# - email_credit {Integer} number of email credits
# - sms_credit {Integer} Number of sms credts
# @options data {Array} associate_ip: Associate dedicated IPs to reseller child. You can use commas to separate multiple IPs [Optional]
def create_child_account(self,data):
return self.post("account",json.dumps(data))
# Update Child Account.
# @param {Array} data contains php array with key value pair.
# @options data {String} auth_key: 16 character authorization key of Reseller child to be modified [Mandatory]
# @options data {String} company_org: Name of Reseller child's company [Optional]
# @options data {String} first_name: First name of Reseller child [Optional]
# @options data {String} last_name: Last name of Reseller child [Optional]
# @options data {String} password: Password of Reseller child to login [Optional]
# @options data {Array} associate_ip: Associate dedicated IPs to reseller child. You can use commas to separate multiple IPs [Optional]
# @options data {Array} disassociate_ip: Disassociate dedicated IPs from reseller child. You can use commas to separate multiple IPs [Optional]
def update_child_account(self,data):
return self.put("account",json.dumps(data))
# Delete Child Account.
# @param {Array} data contains php array with key value pair.
# @options data {String} auth_key: 16 character authorization key of Reseller child to be deleted [Mandatory]
def delete_child_account(self,data):
return self.delete("account/" + data['auth_key'],"")
# Get Reseller child Account.
# @param {Array} data contains php array with key value pair.
# @options data {String} auth_key: 16 character authorization key of Reseller child. Example : To get the details of more than one child account, use, {"key1":"abC01De2fGHI3jkL","key2":"mnO45Pq6rSTU7vWX"} [Mandatory]
def get_reseller_child(self,data):
return self.post("account/getchildv2",json.dumps(data))
# Add/Remove Reseller child's Email/Sms credits.
# @param {Array} data contains php array with key value pair.
# @options data {String} auth_key: 16 character authorization key of Reseller child to modify credits [Mandatory]
# @options data {Array} add_credit: Number of email & sms credits to be added. You can assign either email or sms credits, one at a time other will remain 0. [Mandatory: if rmv_credit is empty]
# - email_credit {Integer} number of email credits
# - sms_credit {Integer} Number of sms credts
# @options data {Array} rmv_credit: Number of email & sms credits to be removed. You can assign either email or sms credits, one at a time other will remain 0. [Mandatory: if add_credits is empty]
# - email_credit {Integer} number of email credits
# - sms_credit {Integer} Number of sms credts
def add_remove_child_credits(self,data):
return self.post("account/addrmvcredit",json.dumps(data))
# Send a transactional SMS.
# @param {Array} data contains php array with key value pair.
# @options data {String} to: The mobile number to send SMS to with country code [Mandatory]
# @options data {String} from: The name of the sender. The number of characters is limited to 11 (alphanumeric format) [Mandatory]
# @options data {String} text: The text of the message. The maximum characters used per SMS is 160, if used more than that, it will be counted as more than one SMS [Mandatory]
# @options data {String} web_url: The web URL that can be called once the message is successfully delivered [Optional]
# @options data {String} tag: The tag that you can associate with the message [Optional]
# @options data {String} type: Type of message. Possible values - marketing (default) & transactional. You can use marketing for sending marketing SMS, & for sending transactional SMS, use transactional type [Optional]
def send_sms(self,data):
return self.post("sms",json.dumps(data))
# Create & Schedule your SMS campaigns.
# @param {Array} data contains php array with key value pair.
# @options data {String} name: Name of the SMS campaign [Mandatory]
# @options data {String} sender: This allows you to customize the SMS sender. The number of characters is limited to 11 ( alphanumeric format ) [Optional]
# @options data {String} content: Content of the message. The maximum characters used per SMS is 160, if used more than that, it will be counted as more than one SMS [Optional]
# @options data {String} bat: Mobile number with the country code to send test SMS. The mobile number defined here should belong to one of your contacts in SendinBlue account and should not be blacklisted [Optional]
# @options data {Array} listid: These are the list ids to which the SMS campaign is sent [Mandatory: if scheduled_date is not empty]
# @options data {Array} exclude_list: These are the list ids which will be excluded from the SMS campaign [Optional]
# @options data {String} scheduled_date: The day on which the SMS campaign is supposed to run [Optional]
# @options data {Integer} send_now: Flag to send campaign now. Possible values = 0 (default) & 1. send_now = 0 means campaign can't be send now, & send_now = 1 means campaign ready to send now [Optional]
def create_sms_campaign(self,data):
return self.post("sms",json.dumps(data))
# Update your SMS campaigns.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of the SMS campaign [Mandatory]
# @options data {String} name: Name of the SMS campaign [Optional]
# @options data {String} sender: This allows you to customize the SMS sender. The number of characters is limited to 11 ( alphanumeric format ) [Optional]
# @options data {String} content: Content of the message. The maximum characters used per SMS is 160, if used more than that, it will be counted as more than one SMS [Optional]
# @options data {String} bat: Mobile number with the country code to send test SMS. The mobile number defined here should belong to one of your contacts in SendinBlue account and should not be blacklisted [Optional]
# @options data {Array} listid: hese are the list ids to which the SMS campaign is sent [Mandatory: if scheduled_date is not empty]
# @options data {Array} exclude_list: These are the list ids which will be excluded from the SMS campaign [Optional]
# @options data {String} scheduled_date: The day on which the SMS campaign is supposed to run [Optional]
# @options data {Integer} send_now: Flag to send campaign now. Possible values = 0 (default) & 1. send_now = 0 means campaign can't be send now, & send_now = 1 means campaign ready to send now [Optional]
def update_sms_campaign(self,data):
id = str(data['id'])
return self.put("sms/" + id,json.dumps(data))
# Send a Test SMS.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of the SMS campaign [Mandatory]
# @options data {String} to: Mobile number with the country code to send test SMS. The mobile number defined here should belong to one of your contacts in SendinBlue account and should not be blacklisted [Mandatory]
def send_bat_sms(self,data):
id = str(data['id'])
return self.get("sms/" + id,json.dumps(data))
# Get all campaigns detail.
# @param {Array} data contains php array with key value pair.
# @options data {String} type: Type of campaign. Possible values - classic, trigger, sms, template ( case sensitive ) [Optional]
# @options data {String} status: Status of campaign. Possible values - draft, sent, archive, queued, suspended, in_process, temp_active, temp_inactive ( case sensitive ) [Optional]
# @options data {Integer} page: Maximum number of records per request is 500, if there are more than 500 campaigns then you can use this parameter to get next 500 results [Optional]
# @options data {Integer} page_limit: This should be a valid number between 1-500 [Optional]
def get_campaigns_v2(self,data):
type = data.get("type")
status = data.get("status")
page = data.get("page")
page_limit = data.get("page_limit")
if type is None and status is None and page is None and page_limit is None:
return self.get("campaign/detailsv2/","")
else:
return self.get("campaign/detailsv2/type/" + type + "/status/" + status + "/page/" + str(page) + "/page_limit/" + str(page_limit) + "/","")
# Get a particular campaign detail.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Unique Id of the campaign [Mandatory]
def get_campaign_v2(self,data):
id = str(data['id'])
return self.get("campaign/" + id + "/detailsv2/","")
# Create and Schedule your campaigns. It returns the ID of the created campaign.
# @param {Array} data contains php array with key value pair.
# @options data {String} category: Tag name of the campaign [Optional]
# @options data {String} from_name: Sender name from which the campaign emails are sent [Mandatory: for Dedicated IP clients, please make sure that the sender details are defined here, and in case of no sender, you can add them also via API & for Shared IP clients, if sender exists]
# @options data {String} name: Name of the campaign [Mandatory]
# @options data {String} bat: Email address for test mail [Optional]
# @options data {String} html_content: Body of the content. The HTML content field must have more than 10 characters [Mandatory: if html_url is empty]
# @options data {String} html_url: Url which content is the body of content [Mandatory: if html_content is empty]
# @options data {Array} listid: These are the lists to which the campaign has been sent [Mandatory: if scheduled_date is not empty]
# @options data {String} scheduled_date: The day on which the campaign is supposed to run[Optional]
# @options data {String} subject: Subject of the campaign [Mandatory]
# @options data {String} from_email: Sender email from which the campaign emails are sent [Mandatory: for Dedicated IP clients, please make sure that the sender details are defined here, and in case of no sender, you can add them also via API & for Shared IP clients, if sender exists]
# @options data {String} reply_to: The reply to email in the campaign emails [Optional]
# @options data {String} to_field: This is to personalize the <<To>> Field. If you want to include the first name and last name of your recipient, add [PRENOM] [NOM] To use the contact attributes here, these should already exist in SendinBlue account [Optional]
# @options data {Array} exclude_list: These are the lists which must be excluded from the campaign [Optional]
# @options data {String} attachment_url: Provide the absolute url of the attachment [Optional]
# @options data {Integer} inline_image: Status of inline image. Possible values = 0 (default) & 1. inline_image = 0 means image can't be embedded, & inline_image = 1 means image can be embedded, in the email [Optional]
# @options data {Integer} mirror_active: Status of mirror links in campaign. Possible values = 0 & 1 (default). mirror_active = 0 means mirror links are deactivated, & mirror_active = 1 means mirror links are activated, in the campaign [Optional]
# @options data {Integer} send_now: Flag to send campaign now. Possible values = 0 (default) & 1. send_now = 0 means campaign can't be send now, & send_now = 1 means campaign ready to send now [Optional]
def create_campaign(self,data):
return self.post("campaign",json.dumps(data))
# Delete your campaigns.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of campaign to be deleted [Mandatory]
def delete_campaign(self,data):
id = str(data['id'])
return self.delete("campaign/" + id,"")
# Update your campaign.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of campaign to be modified [Mandatory]
# @options data {String} category: Tag name of the campaign [Optional]
# @options data {String} from_name: Sender name from which the campaign emails are sent [Mandatory: for Dedicated IP clients, please make sure that the sender details are defined here, and in case of no sender, you can add them also via API & for Shared IP clients, if sender exists]
# @options data {String} name: Name of the campaign [Optional]
# @options data {String} bat: Email address for test mail [Optional]
# @options data {String} html_content: Body of the content. The HTML content field must have more than 10 characters [Optional]
# @options data {String} html_url: Url which content is the body of content [Optional]
# @options data {Array} listid These are the lists to which the campaign has been sent [Mandatory: if scheduled_date is not empty]
# @options data {String} scheduled_date: The day on which the campaign is supposed to run[Optional]
# @options data {String} subject: Subject of the campaign.
# @options data {String} from_email: Sender email from which the campaign emails are sent [Mandatory: for Dedicated IP clients, please make sure that the sender details are defined here, and in case of no sender, you can add them also via API & for Shared IP clients, if sender exists]
# @options data {String} reply_to: The reply to email in the campaign emails [Optional]
# @options data {String} to_field: This is to personalize the <<T>> Field. If you want to include the first name and last name of your recipient, add [PRENOM] [NOM]. To use the contact attributes here, these should already exist in SendinBlue account [Optional]
# @options data {Array} exclude_list: These are the lists which must be excluded from the campaign [Optional]
# @options data {String} attachment_url: Provide the absolute url of the attachment [Optional]
# @options data {Integer} inline_image: Status of inline image. Possible values = 0 (default) & 1. inline_image = 0 means image can't be embedded, & inline_image = 1 means image can be embedded, in the email [Optional]
# @options data {Integer} mirror_active: Status of mirror links in campaign. Possible values = 0 & 1 (default). mirror_active = 0 means mirror links are deactivated, & mirror_active = 1 means mirror links are activated, in the campaign [Optional]
# @options data {Integer} send_now: Flag to send campaign now. Possible values = 0 (default) & 1. send_now = 0 means campaign can't be send now, & send_now = 1 means campaign ready to send now [Optional]
def update_campaign(self,data):
id = str(data['id'])
return self.put("campaign/" + id,json.dumps(data))
# Send report of Sent and Archived campaign.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of campaign to send its report [Mandatory]
# @options data {String} lang: Language of email content. Possible values - fr (default), en, es, it & pt [Optional]
# @options data {String} email_subject: Message subject [Mandatory]
# @options data {Array} email_to: Email address of the recipient(s). Example: "test@example.net". You can use commas to separate multiple recipients [Mandatory]
# @options data {String} email_content_type: Body of the message in text/HTML version. Possible values - text & html [Mandatory]
# @options data {Array} email_bcc: Same as email_to but for Bcc [Optional]
# @options data {Array} email_cc: Same as email_to but for Cc [Optional]
# @options data {String} email_body: Body of the message [Mandatory]
def campaign_report_email(self,data):
id = str(data['id'])
return self.post("campaign/" + id + "/report",json.dumps(data))
# Export the recipients of a specified campaign.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of campaign to export its recipients [Mandatory]
# @options data {String} notify_url: URL that will be called once the export process is finished [Mandatory]
# @options data {String} type: Type of recipients. Possible values - all, non_clicker, non_opener, clicker, opener, soft_bounces, hard_bounces & unsubscribes [Mandatory]
def campaign_recipients_export(self,data):
id = str(data['id'])
return self.post("campaign/" + id + "/recipients",json.dumps(data))
# Send a Test Campaign.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of the campaign [Mandatory]
# @options data {Array} emails: Email address of recipient(s) existing in the one of the lists & should not be blacklisted. Example: "test@example.net". You can use commas to separate multiple recipients [Mandatory]
def send_bat_email(self,data):
id = str(data['id'])
return self.post("campaign/" + id + "/test",json.dumps(data))
# Create and schedule your Trigger campaigns.
# @param {Array} data contains php array with key value pair.
# @options data {String} category: Tag name of the campaign [Optional]
# @options data {String} from_name: Sender name from which the campaign emails are sent [Mandatory: for Dedicated IP clients, please make sure that the sender details are defined here, and in case of no sender, you can add them also via API & for Shared IP clients, if sender exists]
# @options data {String} trigger_name: Name of the campaign [Mandatory]
# @options data {String} bat: Email address for test mail [Optional]
# @options data {String} html_content: Body of the content. The HTML content field must have more than 10 characters [Mandatory: if html_url is empty]
# @options data {String} html_url: Url which content is the body of content [Mandatory: if html_content is empty]
# @options data {Array} listid: These are the lists to which the campaign has been sent [Mandatory: if scheduled_date is not empty]
# @options data {String} scheduled_date: The day on which the campaign is supposed to run[Optional]
# @options data {String} subject: Subject of the campaign [Mandatory]
# @options data {String} from_email: Sender email from which the campaign emails are sent [Mandatory: for Dedicated IP clients, please make sure that the sender details are defined here, and in case of no sender, you can add them also via API & for Shared IP clients, if sender exists]
# @options data {String} reply_to: The reply to email in the campaign emails [Optional]
# @options data {String} to_field: This is to personalize the <<To>> Field. If you want to include the first name and last name of your recipient, add [PRENOM] [NOM]. To use the contact attributes here, these should already exist in SendinBlue account [Optional]
# @options data {Array} exclude_list: These are the lists which must be excluded from the campaign [Optional]
# @options data {Integer} recurring: Type of trigger campaign. Possible values = 0 (default) & 1. recurring = 0 means contact can receive the same Trigger campaign only once, & recurring = 1 means contact can receive the same Trigger campaign several times [Optional]
# @options data {String} attachment_url: Provide the absolute url of the attachment [Optional]
# @options data {Integer} inline_image: Status of inline image. Possible values = 0 (default) & 1. inline_image = 0 means image can't be embedded, & inline_image = 1 means image can be embedded, in the email [Optional]
# @options data {Integer} mirror_active: Status of mirror links in campaign. Possible values = 0 & 1 (default). mirror_active = 0 means mirror links are deactivated, & mirror_active = 1 means mirror links are activated, in the campaign [Optional]
# @options data {Integer} send_now: Flag to send campaign now. Possible values = 0 (default) & 1. send_now = 0 means campaign can't be send now, & send_now = 1 means campaign ready to send now [Optional]
def create_trigger_campaign(self,data):
return self.post("campaign",json.dumps(data))
# Update and schedule your Trigger campaigns.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of Trigger campaign to be modified [Mandatory]
# @options data {String} category: Tag name of the campaign [Optional]
# @options data {String} from_name: Sender name from which the campaign emails are sent [Mandatory: for Dedicated IP clients, please make sure that the sender details are defined here, and in case of no sender, you can add them also via API & for Shared IP clients, if sender exists]
# @options data {String} trigger_name: Name of the campaign [Mandatory]
# @options data {String} bat Email address for test mail [Optional]
# @options data {String} html_content: Body of the content. The HTML content field must have more than 10 characters [Mandatory: if html_url is empty]
# @options data {String} html_url: Url which content is the body of content [Mandatory: if html_content is empty]
# @options data {Array} listid: These are the lists to which the campaign has been sent [Mandatory: if scheduled_date is not empty]
# @options data {String} scheduled_date: The day on which the campaign is supposed to run[Optional]
# @options data {String} subject: Subject of the campaign [Mandatory]
# @options data {String} from_email: Sender email from which the campaign emails are sent [Mandatory: for Dedicated IP clients, please make sure that the sender details are defined here, and in case of no sender, you can add them also via API & for Shared IP clients, if sender exists]
# @options data {String} reply_to: The reply to email in the campaign emails [Optional]
# @options data {String} to_field: This is to personalize the <<To>> Field. If you want to include the first name and last name of your recipient, add [PRENOM] [NOM]. To use the contact attributes here, these should already exist in SendinBlue account [Optional]
# @options data {Array} exclude_list: These are the lists which must be excluded from the campaign [Optional]
# @options data {Integer} recurring: Type of trigger campaign. Possible values = 0 (default) & 1. recurring = 0 means contact can receive the same Trigger campaign only once, & recurring = 1 means contact can receive the same Trigger campaign several times [Optional]
# @options data {String} attachment_url: Provide the absolute url of the attachment [Optional]
# @options data {Integer} inline_image: Status of inline image. Possible values = 0 (default) & 1. inline_image = 0 means image can't be embedded, & inline_image = 1 means image can be embedded, in the email [Optional]
# @options data {Integer} mirror_active: Status of mirror links in campaign. Possible values = 0 & 1 (default). mirror_active = 0 means mirror links are deactivated, & mirror_active = 1 means mirror links are activated, in the campaign [Optional]
# @options data {Integer} send_now: Flag to send campaign now. Possible values = 0 (default) & 1. send_now = 0 means campaign can't be send now, & send_now = 1 means campaign ready to send now [Optional]
def update_trigger_campaign(self,data):
id = str(data['id'])
return self.put("campaign/" + id,json.dumps(data))
# Get the Campaign name, subject and share link of the classic type campaigns only which are sent, for those which are not sent and the rest of campaign types like trigger, template & sms, will return an error message of share link not available.
# @param {Array} data contains php array with key value pair.
# @options data {Array} camp_ids: Id of campaign to get share link. You can use commas to separate multiple ids [Mandatory]
def share_campaign(self,data):
return self.post("campaign/sharelinkv2",json.dumps(data))
# Update the Campaign status.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of campaign to update its status [Mandatory]
# @options data {String} status: Types of status. Possible values - suspended, archive, darchive, sent, queued, replicate and replicate_template ( case sensitive ) [Mandatory]
def update_campaign_status(self,data):
id = str(data['id'])
return self.put("campaign/" + id + "/updatecampstatus",json.dumps(data))
# Get all the processes information under the account.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} page: Maximum number of records per request is 50, if there are more than 50 processes then you can use this parameter to get next 50 results [Mandatory]
# @options data {Integer} page_limit: This should be a valid number between 1-50 [Mandatory]
def get_processes(self,data):
return self.get("process",json.dumps(data))
# Get all the processes information under the account.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} page: Maximum number of records per request is 50, if there are more than 50 processes then you can use this parameter to get next 50 results [Mandatory]
# @options data {Integer} page_limit: This should be a valid number between 1-50 [Mandatory]
def get_process(self,data):
id = str(data['id'])
return self.get("process/" + id,"")
# Get all lists detail.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} list_parent: This is the existing folder id & can be used to get all lists belonging to it [Optional]
# @options data {Integer} page: Maximum number of records per request is 50, if there are more than 50 processes then you can use this parameter to get next 50 results [Mandatory]
# @options data {Integer} page_limit: This should be a valid number between 1-50 [Mandatory]
def get_lists(self,data):
return self.get("list",json.dumps(data))
# Get a particular list detail.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of list to get details [Mandatory]
def get_list(self,data):
id = str(data['id'])
return self.get("list/" + id,"")
# Create a new list.
# @param {Array} data contains php array with key value pair.
# @options data {String} list_name: Desired name of the list to be created [Mandatory]
# @options data {Integer} list_parent: Folder ID [Mandatory]
def create_list(self,data):
return self.post("list",json.dumps(data))
# Delete a specific list.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of list to be deleted [Mandatory]
def delete_list(self,data):
id = str(data['id'])
return self.delete("list/" + id,"")
# Update a list.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of list to be modified [Mandatory]
# @options data {String} list_name: Desired name of the list to be modified [Optional]
# @options data {Integer} list_parent: Folder ID [Mandatory]
def update_list(self,data):
id = str(data['id'])
return self.put("list/" + id,json.dumps(data))
# Add already existing users in the SendinBlue contacts to the list.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of list to link users in it [Mandatory]
# @options data {Array} users: Email address of the already existing user(s) in the SendinBlue contacts. Example: "test@example.net". You can use commas to separate multiple users [Mandatory]
def add_users_list(self,data):
id = str(data['id'])
return self.post("list/" + id + "/users",json.dumps(data))
# Delete already existing users in the SendinBlue contacts from the list.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of list to unlink users from it [Mandatory]
# @options data {Array} users: Email address of the already existing user(s) in the SendinBlue contacts to be modified. Example: "test@example.net". You can use commas to separate multiple users [Mandatory]
def delete_users_list(self,data):
id = str(data['id'])
return self.delete("list/" + id + "/delusers",json.dumps(data))
# Send Transactional Email.
# @param {Array} data contains php array with key value pair.
# @options data {Array} to: Email address of the recipient(s). It should be sent as an associative array. Example: array("to@example.net"=>"to whom"). You can use commas to separate multiple recipients [Mandatory]
# @options data {String} subject: Message subject [Mandatory]
# @options data {Array} from Email address for From header. It should be sent as an array. Example: array("from@email.com","from email") [Mandatory]
# @options data {String} html: Body of the message. (HTML version) [Mandatory]. To send inline images, use <img src="{YourFileName.Extension}" alt="image" border="0" >, the 'src' attribute value inside {} (curly braces) should be same as the filename used in 'inline_image' parameter
# @options data {String} text: Body of the message. (text version) [Optional]
# @options data {Array} cc: Same as to but for Cc. Example: array("cc@example.net","cc whom") [Optional]
# @options data {Array} bcc: Same as to but for Bcc. Example: array("bcc@example.net","bcc whom") [Optional]
# @options data {Array} replyto: Same as from but for Reply To. Example: array("from@email.com","from email") [Optional]
# @options data {Array} attachment: Provide the absolute url of the attachment/s. Possible extension values = gif, png, bmp, cgm, jpg, jpeg, txt, css, shtml, html, htm, csv, zip, pdf, xml, doc, xls, ppt, tar, and ez. To send attachment/s generated on the fly you have to pass your attachment/s filename & its base64 encoded chunk data as an associative array. Example: array("YourFileName.Extension"=>"Base64EncodedChunkData"). You can use commas to separate multiple attachments [Optional]
# @options data {Array} headers: The headers will be sent along with the mail headers in original email. Example: array("Content-Type"=>"text/html; charset=iso-8859-1"). You can use commas to separate multiple headers [Optional]
# @options data {Array} inline_image: Pass your inline image/s filename & its base64 encoded chunk data as an associative array. Example: array("YourFileName.Extension"=>"Base64EncodedChunkData"). You can use commas to separate multiple inline images [Optional]
def send_email(self,data):
return self.post("email",json.dumps(data))
# To retrieve details of all webhooks.
# @param {Array} data contains php array with key value pair.
# @options data {String} is_plat: Flag to get webhooks. Possible values - 0 & 1. Example: to get Transactional webhooks, use $is_plat=0, to get Marketing webhooks, use $is_plat=1, & to get all webhooks, use $is_plat="" [Optional]
def get_webhooks(self,data):
return self.get("webhook",json.dumps(data))
# To retrieve details of any particular webhook.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of webhook to get details [Mandatory]
def get_webhook(self,data):
id = str(data['id'])
return self.get("webhook/" + id,"")
# Create a Webhook.
# @param {Array} data contains php array with key value pair.
# @options data {String} url: URL that will be triggered by a webhook [Mandatory]
# @options data {String} description: Webook description [Optional]
# @options data {Array} events: Set of events. You can use commas to separate multiple events. Possible values for Transcational webhook - request, delivered, hard_bounce, soft_bounce, blocked, spam, invalid_email, deferred, click, & opened and Possible Values for Marketing webhook - spam, opened, click, hard_bounce, unsubscribe, soft_bounce & list_addition ( case sensitive ) [Mandatory]
# @options data {Integer} is_plat: Flag to create webhook type. Possible values - 0 (default) & 1. Example: to create Transactional webhooks, use $is_plat=0, & to create Marketing webhooks, use $is_plat=1 [Optional]
def create_webhook(self,data):
return self.post("webhook",json.dumps(data))
# Delete a webhook.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of webhook to be deleted [Mandatory]
def delete_webhook(self,data):
id = str(data['id'])
return self.delete("webhook/" + id,"")
# Update a webhook.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of webhook to be modified [Mandatory]
# @options data {String} url: URL that will be triggered by a webhook [Mandatory]
# @options data {String} description: Webook description [Optional]
# @options data {Array} events: Set of events. You can use commas to separate multiple events. Possible values for Transcational webhook - request, delivered, hard_bounce, soft_bounce, blocked, spam, invalid_email, deferred, click, & opened and Possible Values for Marketing webhook - spam, opened, click, hard_bounce, unsubscribe, soft_bounce & list_addition ( case sensitive ) [Mandatory]
def update_webhook(self,data):
id = str(data['id'])
return self.put("webhook/" + id,json.dumps(data))
# Aggregate / date-wise report of the SendinBlue SMTP account.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} aggregate: This is used to indicate, you are interested in all-time totals. Possible values - 0 & 1. aggregate = 0 means it will not aggregate records, and will show stats per day/date wise [Optional]
# @options data {String} start_date: The start date to look up statistics. Date must be in YYYY-MM-DD format and should be before the end_date [Optional]
# @options data {String} end_date: The end date to look up statistics. Date must be in YYYY-MM-DD format and should be after the start_date [Optional]
# @options data {Integer} days: Number of days in the past to include statistics ( Includes today ). It must be an integer greater than 0 [Optional]
# @options data {String} tag: The tag you will specify to retrieve detailed stats. It must be an existing tag that has statistics [Optional]
def get_statistics(self,data):
return self.post("statistics",json.dumps(data))
# Get Access a specific user Information.
# @param {Array} data contains php array with key value pair.
# @options data {String} email: Email address of the already existing user in the SendinBlue contacts [Mandatory]
def get_user(self,data):
id = str(data['id'])
return self.get("user/" + id,"")
# Unlink existing user from all lists.
# @param {Array} data contains php array with key value pair.
# @options data {String} email: Email address of the already existing user in the SendinBlue contacts to be unlinked from all lists [Mandatory]
def delete_user(self,data):
id = str(data['id'])
return self.delete("user/" + id,"")
# Import Users Information.
# @param {Array} data contains php array with key value pair.
# @options data {String} url: The URL of the file to be imported. Possible file types - .txt, .csv [Mandatory: if body is empty]
# @options data {String} body: The Body with csv content to be imported. Example: 'NAME;SURNAME;EMAIL\n"Name1";"Surname1";"example1@example.net"\n"Name2";"Surname2";"example2@example.net"', where \n separates each user data. You can use semicolon to separate multiple attributes [Mandatory: if url is empty]
# @options data {Array} listids: These are the list ids in which the the users will be imported [Mandatory: if name is empty]
# @options data {String} notify_url: URL that will be called once the import process is finished [Optional] In notify_url, we are sending the content using POST method
# @options data {String} name: This is new list name which will be created first & then users will be imported in it [Mandatory: if listids is empty]
# @options data {Integer} list_parent: This is the existing folder id & can be used with name parameter to make newly created list's desired parent [Optional]
def import_users(self,data):
return self.post("user/import",json.dumps(data))
# Export Users Information.
# @param {Array} data contains php array with key value pair.
# @options data {String} export_attrib: The name of attribute present in your SendinBlue account. You can use commas to separate multiple attributes. Example: "EMAIL,NAME,SMS" [Optional]
# @options data {String} filter: Filter can be added to export users. Example: "{\"blacklisted\":1}", will export all blacklisted users [Mandatory]
# @options data {String} notify_url: URL that will be called once the export process is finished [Optional]
def export_users(self,data):
return self.post("user/export",json.dumps(data))
# Create a new user if an email provided as input, doesn't exists in the contact list of your SendinBlue account, otherwise it will update the existing user.
# @param {Array} data contains php array with key value pair.
# @options data {String} email: Email address of the user to be created in SendinBlue contacts. Already existing email address of user in the SendinBlue contacts to be modified [Mandatory]
# @options data {Array} attributes: The name of attribute present in your SendinBlue account. It should be sent as an associative array. Example: array("NAME"=>"name"). You can use commas to separate multiple attributes [Optional]
# @options data {Integer} blacklisted: This is used to blacklist/ Unblacklist a user. Possible values - 0 & 1. blacklisted = 1 means user has been blacklisted [Optional]
# @options data {Array} listid: The list id(s) to be linked from user [Optional]
# @options data {Array} listid_unlink: The list id(s) to be unlinked from user [Optional]
# @options data {Array} blacklisted_sms: This is used to blacklist/ Unblacklist a user's SMS number. Possible values - 0 & 1. blacklisted_sms = 1 means user's SMS number has been blacklisted [Optional]
def create_update_user(self,data):
return self.post("user/createdituser",json.dumps(data))
# Access all the attributes information under the account.
# No input required
def get_attributes(self,):
return self.get("attribute","")
# Access the specific type of attribute information.
# @param {Array} data contains php array with key value pair.
# @options data {String} type: Type of attribute. Possible values - normal, transactional, category, calculated & global [Optional]
def get_attribute(self,data):
type = data['type']
return self.get("attribute/" + type,"")
# Create an Attribute.
# @param {Array} data contains php array with key value pair.
# @options data {String} type: Type of attribute. Possible values - normal, transactional, category, calculated & global ( case sensitive ) [Mandatory]
# @options data {Array} data: The name and data type of 'normal' & 'transactional' attribute to be created in your SendinBlue account. It should be sent as an associative array. Example: array('ATTRIBUTE_NAME1' => 'DATA_TYPE1', 'ATTRIBUTE_NAME2'=> 'DATA_TYPE2').
# The name and data value of 'category', 'calculated' & 'global', should be sent as JSON string. Example: '[{ "name":"ATTRIBUTE_NAME1", "value":"Attribute_value1" }, { "name":"ATTRIBUTE_NAME2", "value":"Attribute_value2" }]'. You can use commas to separate multiple attributes [Mandatory]
def create_attribute(self,data):
return self.post("attribute",json.dumps(data))
# Delete a specific type of attribute information.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} type: Type of attribute to be deleted [Mandatory]
def delete_attribute(self,data):
type = data['type']
return self.post("attribute/" + type,json.dumps(data))
# Get Email Event report.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} limit: To limit the number of results returned. It should be an integer [Optional]
# @options data {String} start_date: The start date to get report from. Date must be in YYYY-MM-DD format and should be before the end_date [Optional]
# @options data {String} end_date: The end date to get report till date. Date must be in YYYY-MM-DD format and should be after the start_date [Optional]
# @options data {Integer} offset: Beginning point in the list to retrieve from. It should be an integer [Optional]
# @options data {String} date: Specific date to get its report. Date must be in YYYY-MM-DD format and should be earlier than todays date [Optional]
# @options data {Integer} days: Number of days in the past (includes today). If specified, must be an integer greater than 0 [Optional]
# @options data {String} email: Email address to search report for [Optional]
def get_report(self,data):
return self.post("report",json.dumps(data))
# Get all folders detail.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} page: Maximum number of records per request is 50, if there are more than 50 folders then you can use this parameter to get next 50 results [Mandatory]
# @options data {Integer} page_limit: This should be a valid number between 1-50 [Mandatory]
def get_folders(self,data):
return self.get("folder",json.dumps(data))
# Get a particular folder detail.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of folder to get details [Mandatory]
def get_folder(self,data):
id = str(data['id'])
return self.get("folder/" + id,"")
# Create a new folder.
# @param {Array} data contains php array with key value pair.
# @options data {String} name: Desired name of the folder to be created [Mandatory]
def create_folder(self,data):
return self.post("folder",json.dumps(data))
# Delete a specific folder information.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of folder to be deleted [Mandatory]
def delete_folder(self,data):
id = str(data['id'])
return self.delete("folder/" + id,"")
# Update an existing folder.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of folder to be modified [Mandatory]
# @options data {String} name: Desired name of the folder to be modified [Mandatory]
def update_folder(self,data):
id = str(data['id'])
return self.put("folder/" + id,json.dumps(data))
# Delete any hardbounce, which actually would have been blocked due to some temporary ISP failures.
# @param {Array} data contains php array with key value pair.
# @options data {String} start_date: The start date to get report from. Date must be in YYYY-MM-DD format and should be before the end_date [Optional]
# @options data {String} end_date: The end date to get report till date. Date must be in YYYY-MM-DD format and should be after the start_date [Optional]
# @options data {String} email: Email address to delete its bounces [Optional]
def delete_bounces(self,data):
return self.post("bounces",json.dumps(data))
# Send templates created on SendinBlue, through SendinBlue SMTP (transactional mails).
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of the template created on SendinBlue account [Mandatory]
# @options data {String} to: Email address of the recipient(s). You can use pipe ( | ) to separate multiple recipients. Example: "to-example@example.net|to2-example@example.net" [Mandatory]
# @options data {String} cc: Same as to but for Cc [Optional]
# @options data {String} bcc: Same as to but for Bcc [Optional]
# @options data {Array} attr: The name of attribute present in your SendinBlue account. It should be sent as an associative array. Example: array("NAME"=>"name"). You can use commas to separate multiple attributes [Optional]
# @options data {String} attachment_url: Provide the absolute url of the attachment. Url not allowed from local machine. File must be hosted somewhere [Optional]
# @options data {Array} attachment: To send attachment/s generated on the fly you have to pass your attachment/s filename & its base64 encoded chunk data as an associative array [Optional]
# @options data {Array} headers: The headers will be sent along with the mail headers in original email. Example: array("Content-Type"=>"text/html; charset=iso-8859-1"). You can use commas to separate multiple headers [Optional]
def send_transactional_template(self,data):
id = str(data['id'])
return self.put("template/" + id,json.dumps(data))
# Create a Template.
# @param {Array} data contains php array with key value pair.
# @options data {String} from_name: Sender name from which the campaign emails are sent [Mandatory: for Dedicated IP clients & for Shared IP clients, if sender exists]
# @options data {String} template_name: Name of the Template [Mandatory]
# @options data {String} bat: Email address for test mail [Optional]
# @options data {String} html_content: Body of the content. The HTML content field must have more than 10 characters [Mandatory: if html_url is empty]
# @options data {String} html_url Url: which content is the body of content [Mandatory: if html_content is empty]
# @options data {String} subject: Subject of the campaign [Mandatory]
# @options data {String} from_email: Sender email from which the campaign emails are sent [Mandatory: for Dedicated IP clients & for Shared IP clients, if sender exists]
# @options data {String} reply_to: The reply to email in the campaign emails [Optional]
# @options data {String} to_fieldv This is to personalize the <<To>> Field. If you want to include the first name and last name of your recipient, add [PRENOM] [NOM]. To use the contact attributes here, these should already exist in SendinBlue account [Optional]
# @options data {Integer} status: Status of template. Possible values = 0 (default) & 1. status = 0 means template is inactive, & status = 1 means template is active [Optional]
# @options data {Integer} attachment: Status of attachment. Possible values = 0 (default) & 1. attach = 0 means an attachment can't be sent, & attach = 1 means an attachment can be sent, in the email [Optional]
def create_template(self,data):
return self.post("template",json.dumps(data))
# Update a Template.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of Template to be modified [Mandatory]
# @options data {String} from_name: Sender name from which the campaign emails are sent [Mandatory: for Dedicated IP clients & for Shared IP clients, if sender exists]
# @options data {String} template_name: Name of the Template [Mandatory]
# @options data {String} bat: Email address for test mail [Optional]
# @options data {String} html_content: Body of the content. The HTML content field must have more than 10 characters [Mandatory: if html_url is empty]
# @options data {String} html_url: Url which content is the body of content [Mandatory: if html_content is empty]
# @options data {String} subject: Subject of the campaign [Mandatory]
# @options data {String} from_email: Sender email from which the campaign emails are sent [Mandatory: for Dedicated IP clients & for Shared IP clients, if sender exists]
# @options data {String} reply_to: The reply to email in the campaign emails [Optional]
# @options data {String} to_field: This is to personalize the <<To>> Field. If you want to include the first name and last name of your recipient, add [PRENOM] [NOM]. To use the contact attributes here, these should already exist in SendinBlue account [Optional]
# @options data {Integer} status: Status of template. Possible values = 0 (default) & 1. status = 0 means template is inactive, & status = 1 means template is active [Optional]
# @options data {Integer} attachment: Status of attachment. Possible values = 0 (default) & 1. attach = 0 means an attachment can't be sent, & attach = 1 means an attachment can be sent, in the email [Optional]
def update_template(self,data):
id = str(data['id'])
return self.put("template/" + id,json.dumps(data))
# Get Access of created senders information.
# @param {Array} data contains php array with key value pair.
# @options data {String} option: Options to get senders. Possible options - IP-wise, & Domain-wise ( only for dedicated IP clients ). Example: to get senders with specific IP, use $option='1.2.3.4', to get senders with specific domain use, $option='domain.com', & to get all senders, use $option="" [Optional]
def get_senders(self,data):
return self.get("advanced",json.dumps(data))
# Create your Senders.
# @param {Array} data contains php array with key value pair.
# @options data {String} name: Name of the sender [Mandatory]
# @options data {String} email: Email address of the sender [Mandatory]
# @options data {Array} ip_domain: Pass pipe ( | ) separated Dedicated IP and its associated Domain. Example: "1.2.3.4|mydomain.com". You can use commas to separate multiple ip_domain's [Mandatory: Only for Dedicated IP clients, for Shared IP clients, it should be kept blank]
def create_sender(self,data):
return self.post("advanced",json.dumps(data))
# Update your Senders.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of sender to be modified [Mandatory]
# @options data {String} name: Name of the sender [Mandatory]
# @options data {Array} ip_domain: Pass pipe ( | ) separated Dedicated IP and its associated Domain. Example: "1.2.3.4|mydomain.com". You can use commas to separate multiple ip_domain's [Mandatory: Only for Dedicated IP clients, for Shared IP clients, it should be kept blank]
def update_sender(self,data):
id = str(data['id'])
return self.put("advanced/" + id,json.dumps(data))
# Delete your Sender Information.
# @param {Array} data contains php array with key value pair.
# @options data {Integer} id: Id of sender to be deleted [Mandatory]
def delete_sender(self,data):
id = str(data['id'])
return self.delete("advanced/" + id,"")
|
bradojevic/django-prod
|
refs/heads/master
|
src/manage.py
|
404
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
jbouse-debian/paramiko
|
refs/heads/master
|
paramiko/sftp_server.py
|
3
|
# Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Server-mode SFTP support.
"""
import os
import errno
import sys
from hashlib import md5, sha1
from paramiko import util
from paramiko.sftp import (
BaseSFTP, Message, SFTP_FAILURE, SFTP_PERMISSION_DENIED, SFTP_NO_SUCH_FILE,
)
from paramiko.sftp_si import SFTPServerInterface
from paramiko.sftp_attr import SFTPAttributes
from paramiko.common import DEBUG
from paramiko.py3compat import long, string_types, bytes_types, b
from paramiko.server import SubsystemHandler
# known hash algorithms for the "check-file" extension
from paramiko.sftp import (
CMD_HANDLE, SFTP_DESC, CMD_STATUS, SFTP_EOF, CMD_NAME, SFTP_BAD_MESSAGE,
CMD_EXTENDED_REPLY, SFTP_FLAG_READ, SFTP_FLAG_WRITE, SFTP_FLAG_APPEND,
SFTP_FLAG_CREATE, SFTP_FLAG_TRUNC, SFTP_FLAG_EXCL, CMD_NAMES, CMD_OPEN,
CMD_CLOSE, SFTP_OK, CMD_READ, CMD_DATA, CMD_WRITE, CMD_REMOVE, CMD_RENAME,
CMD_MKDIR, CMD_RMDIR, CMD_OPENDIR, CMD_READDIR, CMD_STAT, CMD_ATTRS,
CMD_LSTAT, CMD_FSTAT, CMD_SETSTAT, CMD_FSETSTAT, CMD_READLINK, CMD_SYMLINK,
CMD_REALPATH, CMD_EXTENDED, SFTP_OP_UNSUPPORTED,
)
_hash_class = {
'sha1': sha1,
'md5': md5,
}
class SFTPServer (BaseSFTP, SubsystemHandler):
"""
Server-side SFTP subsystem support. Since this is a `.SubsystemHandler`,
it can be (and is meant to be) set as the handler for ``"sftp"`` requests.
Use `.Transport.set_subsystem_handler` to activate this class.
"""
def __init__(self, channel, name, server, sftp_si=SFTPServerInterface,
*largs, **kwargs):
"""
The constructor for SFTPServer is meant to be called from within the
`.Transport` as a subsystem handler. ``server`` and any additional
parameters or keyword parameters are passed from the original call to
`.Transport.set_subsystem_handler`.
:param .Channel channel: channel passed from the `.Transport`.
:param str name: name of the requested subsystem.
:param .ServerInterface server:
the server object associated with this channel and subsystem
:param sftp_si:
a subclass of `.SFTPServerInterface` to use for handling individual
requests.
"""
BaseSFTP.__init__(self)
SubsystemHandler.__init__(self, channel, name, server)
transport = channel.get_transport()
self.logger = util.get_logger(transport.get_log_channel() + '.sftp')
self.ultra_debug = transport.get_hexdump()
self.next_handle = 1
# map of handle-string to SFTPHandle for files & folders:
self.file_table = {}
self.folder_table = {}
self.server = sftp_si(server, *largs, **kwargs)
def _log(self, level, msg):
if issubclass(type(msg), list):
for m in msg:
super(SFTPServer, self)._log(
level,
"[chan " + self.sock.get_name() + "] " + m)
else:
super(SFTPServer, self)._log(
level,
"[chan " + self.sock.get_name() + "] " + msg)
def start_subsystem(self, name, transport, channel):
self.sock = channel
self._log(DEBUG, 'Started sftp server on channel {!r}'.format(channel))
self._send_server_version()
self.server.session_started()
while True:
try:
t, data = self._read_packet()
except EOFError:
self._log(DEBUG, 'EOF -- end of session')
return
except Exception as e:
self._log(DEBUG, 'Exception on channel: ' + str(e))
self._log(DEBUG, util.tb_strings())
return
msg = Message(data)
request_number = msg.get_int()
try:
self._process(t, request_number, msg)
except Exception as e:
self._log(DEBUG, 'Exception in server processing: ' + str(e))
self._log(DEBUG, util.tb_strings())
# send some kind of failure message, at least
try:
self._send_status(request_number, SFTP_FAILURE)
except:
pass
def finish_subsystem(self):
self.server.session_ended()
super(SFTPServer, self).finish_subsystem()
# close any file handles that were left open
# (so we can return them to the OS quickly)
for f in self.file_table.values():
f.close()
for f in self.folder_table.values():
f.close()
self.file_table = {}
self.folder_table = {}
@staticmethod
def convert_errno(e):
"""
Convert an errno value (as from an ``OSError`` or ``IOError``) into a
standard SFTP result code. This is a convenience function for trapping
exceptions in server code and returning an appropriate result.
:param int e: an errno code, as from ``OSError.errno``.
:return: an `int` SFTP error code like ``SFTP_NO_SUCH_FILE``.
"""
if e == errno.EACCES:
# permission denied
return SFTP_PERMISSION_DENIED
elif (e == errno.ENOENT) or (e == errno.ENOTDIR):
# no such file
return SFTP_NO_SUCH_FILE
else:
return SFTP_FAILURE
@staticmethod
def set_file_attr(filename, attr):
"""
Change a file's attributes on the local filesystem. The contents of
``attr`` are used to change the permissions, owner, group ownership,
and/or modification & access time of the file, depending on which
attributes are present in ``attr``.
This is meant to be a handy helper function for translating SFTP file
requests into local file operations.
:param str filename:
name of the file to alter (should usually be an absolute path).
:param .SFTPAttributes attr: attributes to change.
"""
if sys.platform != 'win32':
# mode operations are meaningless on win32
if attr._flags & attr.FLAG_PERMISSIONS:
os.chmod(filename, attr.st_mode)
if attr._flags & attr.FLAG_UIDGID:
os.chown(filename, attr.st_uid, attr.st_gid)
if attr._flags & attr.FLAG_AMTIME:
os.utime(filename, (attr.st_atime, attr.st_mtime))
if attr._flags & attr.FLAG_SIZE:
with open(filename, 'w+') as f:
f.truncate(attr.st_size)
# ...internals...
def _response(self, request_number, t, *arg):
msg = Message()
msg.add_int(request_number)
for item in arg:
if isinstance(item, long):
msg.add_int64(item)
elif isinstance(item, int):
msg.add_int(item)
elif isinstance(item, (string_types, bytes_types)):
msg.add_string(item)
elif type(item) is SFTPAttributes:
item._pack(msg)
else:
raise Exception(
'unknown type for {!r} type {!r}'.format(
item, type(item)))
self._send_packet(t, msg)
def _send_handle_response(self, request_number, handle, folder=False):
if not issubclass(type(handle), SFTPHandle):
# must be error code
self._send_status(request_number, handle)
return
handle._set_name(b('hx{:d}'.format(self.next_handle)))
self.next_handle += 1
if folder:
self.folder_table[handle._get_name()] = handle
else:
self.file_table[handle._get_name()] = handle
self._response(request_number, CMD_HANDLE, handle._get_name())
def _send_status(self, request_number, code, desc=None):
if desc is None:
try:
desc = SFTP_DESC[code]
except IndexError:
desc = 'Unknown'
# some clients expect a "langauge" tag at the end
# (but don't mind it being blank)
self._response(request_number, CMD_STATUS, code, desc, '')
def _open_folder(self, request_number, path):
resp = self.server.list_folder(path)
if issubclass(type(resp), list):
# got an actual list of filenames in the folder
folder = SFTPHandle()
folder._set_files(resp)
self._send_handle_response(request_number, folder, True)
return
# must be an error code
self._send_status(request_number, resp)
def _read_folder(self, request_number, folder):
flist = folder._get_next_files()
if len(flist) == 0:
self._send_status(request_number, SFTP_EOF)
return
msg = Message()
msg.add_int(request_number)
msg.add_int(len(flist))
for attr in flist:
msg.add_string(attr.filename)
msg.add_string(attr)
attr._pack(msg)
self._send_packet(CMD_NAME, msg)
def _check_file(self, request_number, msg):
# this extension actually comes from v6 protocol, but since it's an
# extension, i feel like we can reasonably support it backported.
# it's very useful for verifying uploaded files or checking for
# rsync-like differences between local and remote files.
handle = msg.get_binary()
alg_list = msg.get_list()
start = msg.get_int64()
length = msg.get_int64()
block_size = msg.get_int()
if handle not in self.file_table:
self._send_status(
request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
return
f = self.file_table[handle]
for x in alg_list:
if x in _hash_class:
algname = x
alg = _hash_class[x]
break
else:
self._send_status(
request_number, SFTP_FAILURE, 'No supported hash types found')
return
if length == 0:
st = f.stat()
if not issubclass(type(st), SFTPAttributes):
self._send_status(request_number, st, 'Unable to stat file')
return
length = st.st_size - start
if block_size == 0:
block_size = length
if block_size < 256:
self._send_status(
request_number, SFTP_FAILURE, 'Block size too small')
return
sum_out = bytes()
offset = start
while offset < start + length:
blocklen = min(block_size, start + length - offset)
# don't try to read more than about 64KB at a time
chunklen = min(blocklen, 65536)
count = 0
hash_obj = alg()
while count < blocklen:
data = f.read(offset, chunklen)
if not isinstance(data, bytes_types):
self._send_status(
request_number, data, 'Unable to hash file')
return
hash_obj.update(data)
count += len(data)
offset += count
sum_out += hash_obj.digest()
msg = Message()
msg.add_int(request_number)
msg.add_string('check-file')
msg.add_string(algname)
msg.add_bytes(sum_out)
self._send_packet(CMD_EXTENDED_REPLY, msg)
def _convert_pflags(self, pflags):
"""convert SFTP-style open() flags to Python's os.open() flags"""
if (pflags & SFTP_FLAG_READ) and (pflags & SFTP_FLAG_WRITE):
flags = os.O_RDWR
elif pflags & SFTP_FLAG_WRITE:
flags = os.O_WRONLY
else:
flags = os.O_RDONLY
if pflags & SFTP_FLAG_APPEND:
flags |= os.O_APPEND
if pflags & SFTP_FLAG_CREATE:
flags |= os.O_CREAT
if pflags & SFTP_FLAG_TRUNC:
flags |= os.O_TRUNC
if pflags & SFTP_FLAG_EXCL:
flags |= os.O_EXCL
return flags
def _process(self, t, request_number, msg):
self._log(DEBUG, 'Request: {}'.format(CMD_NAMES[t]))
if t == CMD_OPEN:
path = msg.get_text()
flags = self._convert_pflags(msg.get_int())
attr = SFTPAttributes._from_msg(msg)
self._send_handle_response(
request_number, self.server.open(path, flags, attr))
elif t == CMD_CLOSE:
handle = msg.get_binary()
if handle in self.folder_table:
del self.folder_table[handle]
self._send_status(request_number, SFTP_OK)
return
if handle in self.file_table:
self.file_table[handle].close()
del self.file_table[handle]
self._send_status(request_number, SFTP_OK)
return
self._send_status(
request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
elif t == CMD_READ:
handle = msg.get_binary()
offset = msg.get_int64()
length = msg.get_int()
if handle not in self.file_table:
self._send_status(
request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
return
data = self.file_table[handle].read(offset, length)
if isinstance(data, (bytes_types, string_types)):
if len(data) == 0:
self._send_status(request_number, SFTP_EOF)
else:
self._response(request_number, CMD_DATA, data)
else:
self._send_status(request_number, data)
elif t == CMD_WRITE:
handle = msg.get_binary()
offset = msg.get_int64()
data = msg.get_binary()
if handle not in self.file_table:
self._send_status(
request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
return
self._send_status(
request_number, self.file_table[handle].write(offset, data))
elif t == CMD_REMOVE:
path = msg.get_text()
self._send_status(request_number, self.server.remove(path))
elif t == CMD_RENAME:
oldpath = msg.get_text()
newpath = msg.get_text()
self._send_status(
request_number, self.server.rename(oldpath, newpath))
elif t == CMD_MKDIR:
path = msg.get_text()
attr = SFTPAttributes._from_msg(msg)
self._send_status(request_number, self.server.mkdir(path, attr))
elif t == CMD_RMDIR:
path = msg.get_text()
self._send_status(request_number, self.server.rmdir(path))
elif t == CMD_OPENDIR:
path = msg.get_text()
self._open_folder(request_number, path)
return
elif t == CMD_READDIR:
handle = msg.get_binary()
if handle not in self.folder_table:
self._send_status(
request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
return
folder = self.folder_table[handle]
self._read_folder(request_number, folder)
elif t == CMD_STAT:
path = msg.get_text()
resp = self.server.stat(path)
if issubclass(type(resp), SFTPAttributes):
self._response(request_number, CMD_ATTRS, resp)
else:
self._send_status(request_number, resp)
elif t == CMD_LSTAT:
path = msg.get_text()
resp = self.server.lstat(path)
if issubclass(type(resp), SFTPAttributes):
self._response(request_number, CMD_ATTRS, resp)
else:
self._send_status(request_number, resp)
elif t == CMD_FSTAT:
handle = msg.get_binary()
if handle not in self.file_table:
self._send_status(
request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
return
resp = self.file_table[handle].stat()
if issubclass(type(resp), SFTPAttributes):
self._response(request_number, CMD_ATTRS, resp)
else:
self._send_status(request_number, resp)
elif t == CMD_SETSTAT:
path = msg.get_text()
attr = SFTPAttributes._from_msg(msg)
self._send_status(request_number, self.server.chattr(path, attr))
elif t == CMD_FSETSTAT:
handle = msg.get_binary()
attr = SFTPAttributes._from_msg(msg)
if handle not in self.file_table:
self._response(
request_number, SFTP_BAD_MESSAGE, 'Invalid handle')
return
self._send_status(
request_number, self.file_table[handle].chattr(attr))
elif t == CMD_READLINK:
path = msg.get_text()
resp = self.server.readlink(path)
if isinstance(resp, (bytes_types, string_types)):
self._response(
request_number, CMD_NAME, 1, resp, '', SFTPAttributes())
else:
self._send_status(request_number, resp)
elif t == CMD_SYMLINK:
# the sftp 2 draft is incorrect here!
# path always follows target_path
target_path = msg.get_text()
path = msg.get_text()
self._send_status(
request_number, self.server.symlink(target_path, path))
elif t == CMD_REALPATH:
path = msg.get_text()
rpath = self.server.canonicalize(path)
self._response(
request_number, CMD_NAME, 1, rpath, '', SFTPAttributes())
elif t == CMD_EXTENDED:
tag = msg.get_text()
if tag == 'check-file':
self._check_file(request_number, msg)
elif tag == 'posix-rename@openssh.com':
oldpath = msg.get_text()
newpath = msg.get_text()
self._send_status(
request_number, self.server.posix_rename(oldpath, newpath)
)
else:
self._send_status(request_number, SFTP_OP_UNSUPPORTED)
else:
self._send_status(request_number, SFTP_OP_UNSUPPORTED)
from paramiko.sftp_handle import SFTPHandle
|
ibab/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/histogram_ops.py
|
9
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""## Histograms
@@histogram_fixed_width
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import math_ops
def histogram_fixed_width(values,
value_range,
nbins=100,
dtype=dtypes.int32,
name=None):
"""Return histogram of values.
Given the tensor `values`, this operation returns a rank 1 histogram counting
the number of entries in `values` that fell into every bin. The bins are
equal width and determined by the arguments `value_range` and `nbins`.
Args:
values: Numeric `Tensor`.
value_range: Shape [2] `Tensor`. new_values <= value_range[0] will be
mapped to hist[0], values >= value_range[1] will be mapped to hist[-1].
Must be same dtype as new_values.
nbins: Scalar `int32 Tensor`. Number of histogram bins.
dtype: dtype for returned histogram.
name: A name for this operation (defaults to 'histogram_fixed_width').
Returns:
A 1-D `Tensor` holding histogram of values.
Examples:
```python
# Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf)
nbins = 5
value_range = [0.0, 5.0]
new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15]
with tf.default_session() as sess:
hist = tf.histogram_fixed_width(new_values, value_range, nbins=5)
variables.initialize_all_variables().run()
sess.run(hist) => [2, 1, 1, 0, 2]
```
"""
with ops.op_scope([values, value_range, nbins], name,
'histogram_fixed_width') as scope:
values = ops.convert_to_tensor(values, name='values')
values = array_ops.reshape(values, [-1])
value_range = ops.convert_to_tensor(value_range, name='value_range')
nbins = ops.convert_to_tensor(nbins, dtype=dtypes.int32, name='nbins')
nbins_float = math_ops.to_float(nbins)
# Map tensor values that fall within value_range to [0, 1].
scaled_values = math_ops.truediv(values - value_range[0],
value_range[1] - value_range[0],
name='scaled_values')
# map tensor values within the open interval value_range to {0,.., nbins-1},
# values outside the open interval will be zero or less, or nbins or more.
indices = math_ops.floor(nbins_float * scaled_values, name='indices')
# Clip edge cases (e.g. value = value_range[1]) or "outliers."
indices = math_ops.cast(
clip_ops.clip_by_value(indices, 0, nbins_float - 1), dtypes.int32)
# TODO(langmore) This creates an array of ones to add up and place in the
# bins. This is inefficient, so replace when a better Op is available.
return math_ops.unsorted_segment_sum(
array_ops.ones_like(indices, dtype=dtype),
indices,
nbins,
name=scope)
|
shsingh/ansible
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/ansible_tower/tower_receive.py
|
57
|
#!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, John Westcott IV <john.westcott.iv@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_receive
author: "John Westcott IV (@john-westcott-iv)"
version_added: "2.8"
short_description: Receive assets from Ansible Tower.
description:
- Receive assets from Ansible Tower. See
U(https://www.ansible.com/tower) for an overview.
options:
all:
description:
- Export all assets
type: bool
default: 'False'
organization:
description:
- List of organization names to export
default: []
user:
description:
- List of user names to export
default: []
team:
description:
- List of team names to export
default: []
credential_type:
description:
- List of credential type names to export
default: []
credential:
description:
- List of credential names to export
default: []
notification_template:
description:
- List of notification template names to export
default: []
inventory_script:
description:
- List of inventory script names to export
default: []
inventory:
description:
- List of inventory names to export
default: []
project:
description:
- List of project names to export
default: []
job_template:
description:
- List of job template names to export
default: []
workflow:
description:
- List of workflow names to export
default: []
requirements:
- "ansible-tower-cli >= 3.3.0"
notes:
- Specifying a name of "all" for any asset type will export all items of that asset type.
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Export all tower assets
tower_receive:
all: True
tower_config_file: "~/tower_cli.cfg"
- name: Export all inventories
tower_receive:
inventory:
- all
- name: Export a job template named "My Template" and all Credentials
tower_receive:
job_template:
- "My Template"
credential:
- all
'''
RETURN = '''
assets:
description: The exported assets
returned: success
type: dict
sample: [ {}, {} ]
'''
from ansible.module_utils.ansible_tower import TowerModule, tower_auth_config, HAS_TOWER_CLI
try:
from tower_cli.cli.transfer.receive import Receiver
from tower_cli.cli.transfer.common import SEND_ORDER
from tower_cli.utils.exceptions import TowerCLIError
from tower_cli.conf import settings
TOWER_CLI_HAS_EXPORT = True
except ImportError:
TOWER_CLI_HAS_EXPORT = False
def main():
argument_spec = dict(
all=dict(type='bool', default=False),
credential=dict(type='list', default=[]),
credential_type=dict(type='list', default=[]),
inventory=dict(type='list', default=[]),
inventory_script=dict(type='list', default=[]),
job_template=dict(type='list', default=[]),
notification_template=dict(type='list', default=[]),
organization=dict(type='list', default=[]),
project=dict(type='list', default=[]),
team=dict(type='list', default=[]),
user=dict(type='list', default=[]),
workflow=dict(type='list', default=[]),
)
module = TowerModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
if not TOWER_CLI_HAS_EXPORT:
module.fail_json(msg='ansible-tower-cli version does not support export')
export_all = module.params.get('all')
assets_to_export = {}
for asset_type in SEND_ORDER:
assets_to_export[asset_type] = module.params.get(asset_type)
result = dict(
assets=None,
changed=False,
message='',
)
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
try:
receiver = Receiver()
result['assets'] = receiver.export_assets(all=export_all, asset_input=assets_to_export)
module.exit_json(**result)
except TowerCLIError as e:
result['message'] = e.message
module.fail_json(msg='Receive Failed', **result)
if __name__ == '__main__':
main()
|
akash1808/nova
|
refs/heads/master
|
nova/tests/functional/v3/test_create_backup.py
|
29
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import mock
from nova.tests.functional.v3 import test_servers
from nova.tests.unit.image import fake
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
class CreateBackupSamplesJsonTest(test_servers.ServersSampleBase):
extension_name = "os-create-backup"
extra_extensions_to_load = ["os-access-ips"]
# TODO(park): Overriding '_api_version' till all functional tests
# are merged between v2 and v2.1. After that base class variable
# itself can be changed to 'v2'
_api_version = 'v2'
def _get_flags(self):
f = super(CreateBackupSamplesJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.admin_actions.Admin_actions')
return f
def setUp(self):
"""setUp Method for PauseServer api samples extension
This method creates the server that will be used in each tests
"""
super(CreateBackupSamplesJsonTest, self).setUp()
self.uuid = self._post_server()
@mock.patch.object(fake._FakeImageService, 'detail', return_value=[])
def test_post_backup_server(self, mock_method):
# Get api samples to backup server request.
response = self._do_post('servers/%s/action' % self.uuid,
'create-backup-req', {})
self.assertEqual(202, response.status_code)
|
twitter/pants
|
refs/heads/master
|
src/python/pants/util/socket.py
|
2
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import errno
import io
import select
import socket
from builtins import object
def teardown_socket(s):
"""Shuts down and closes a socket."""
try:
s.shutdown(socket.SHUT_WR)
except socket.error:
pass
finally:
s.close()
def safe_select(*args, **kwargs):
# N.B. This while loop is purely to facilitate SA_RESTART-like behavior for select(), which is
# (apparently) not covered by signal.siginterrupt(signal.SIGINT, False) when a timeout is passed.
# This helps avoid an unhandled select.error(4, 'Interrupted system call') on SIGINT.
# See https://bugs.python.org/issue12224 for more info.
while 1:
try:
return select.select(*args, **kwargs)
except (OSError, select.error) as e:
if e[0] != errno.EINTR:
raise
class RecvBufferedSocket(object):
"""A socket wrapper that simplifies recv() buffering."""
def __init__(self, sock, chunk_size=io.DEFAULT_BUFFER_SIZE, select_timeout=None):
"""
:param socket sock: The socket.socket object to wrap.
:param int chunk_size: The smallest max read size for calls to recv() in bytes.
:param float select_timeout: The select timeout for a socket read in seconds. An integer value
effectively makes self.recv non-blocking (default: None, blocking).
"""
self._socket = sock
self._chunk_size = chunk_size
self._select_timeout = select_timeout
self._buffer = b''
self._maybe_tune_socket(sock)
def _maybe_tune_socket(self, sock):
try:
# Disable Nagle's algorithm to improve latency.
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except (OSError, IOError):
# This can fail in tests where `socket.socketpair()` is used, or potentially
# in odd environments - but we shouldn't ever crash over it.
return
def recv(self, bufsize):
"""Buffers up to _chunk_size bytes when the internal buffer has less than `bufsize` bytes."""
assert bufsize > 0, 'a positive bufsize is required'
if len(self._buffer) < bufsize:
readable, _, _ = safe_select([self._socket], [], [], self._select_timeout)
if readable:
recvd = self._socket.recv(max(self._chunk_size, bufsize))
self._buffer = self._buffer + recvd
return_buf, self._buffer = self._buffer[:bufsize], self._buffer[bufsize:]
return return_buf
def __getattr__(self, attr):
return getattr(self._socket, attr)
|
chirpradio/chirpradio-volunteers
|
refs/heads/master
|
site-packages/django/core/management/commands/sqlflush.py
|
36
|
from django.core.management.base import NoArgsCommand
class Command(NoArgsCommand):
help = "Returns a list of the SQL statements required to return all tables in the database to the state they were in just after they were installed."
output_transaction = True
def handle_noargs(self, **options):
from django.core.management.sql import sql_flush
return u'\n'.join(sql_flush(self.style, only_django=True)).encode('utf-8')
|
AleCandido/Lab3
|
refs/heads/master
|
Esercitazione7/Esercitazione7.py
|
1
|
import getpass
import sys
from uncertainties import unumpy
import numpy as np
if getpass.getuser() == "alessandro":
path = "/home/alessandro/Documents/Università/3°anno/Laboratorio3/Lab3/"
elif getpass.getuser() == "Roberto":
path = "C:\\Users\\Roberto\\Documents\\GitHub\\Lab3\\"
elif getpass.getuser() == "Studenti":
path = "C:\\Users\\Studenti\\Desktop\\Lab3\\"
else:
raise Error("unknown user, please specify it and the path in the file Esercitazione*.py")
sys.path = sys.path + [path]
from BobLightyear import *
dir= path + "Esercitazione7/"
###########################################################################
#GRAFICO GAIN-BANDWIDTH#
file="gain_bandwidth"
def f(x, a, b):
return a+b*x
VIN2 = umme(156e-3,"volt_ar","osc")
p0=[1,1]
def XYfun(a):
return unumpy.log10(a[1]), 20*unumpy.log10(a[0]/VIN2)
unit=[("volt_ar","osc"),("freq","osc")]
titolo="Grafico gain-bandwidth"
Xlab="Frequenza [decadi]"
Ylab="Guadagno [dB]"
tab=["$V_{OUT}$ [$V$]","Freq. [Hz]"]
fit(dir,file,unit,f,p0,titolo,Xlab,Ylab,XYfun, table=True, tab=tab, out=True)
#(a,b)=uncertainties.correlated_values_norm([(-19.5 , 0.7),(125 , 4)], [[ 1. ,-0.99871014],[-0.99871014, 1. ]])
#print ("Prodotto gain-bandwidth =", 10**(-b/a))
###########################################################################
##
file="V_t"
def f(x, a, b):
return a*np.log(x/b)
p0=[1,1]
def XYfun(a):
return a[0], a[1]
unit=[("volt_nc","osc"),("time","osc")]
titolo="$V_{IN}$ vs TOT"
Xlab="Tensione in ingresso [$V$]"
Ylab="Durata segnale in uscita [s]"
tab=["$V_{IN}$ [$V$]","TOT [s]"]
fit(dir,file,unit,f,p0,titolo,Xlab,Ylab,XYfun, table=True, tab=tab)
###########################################################################
|
wavicles/pycode-browser
|
refs/heads/master
|
Code/Physics/spring3d.py
|
6
|
#spring3d.py
from visual import *
base = box (pos=(0,-1,0), length=16, height=0.1, width=4, color=color.blue)
wall = box (pos=(0,0,0), length=0.1, height=2, width=4, color=color.white)
ball = sphere (pos=(4,0,0), radius=1, color=color.red)
spring = helix(pos=(0,0,0), axis=(4,0,0), radius=0.5, color=color.red)
ball2 = sphere (pos=(-4,0,0), radius=1, color=color.green)
spring2 = helix(pos=(0,0,0), axis=(-4,0,0), radius=0.5, color=color.green)
t = 0.0
dt = 0.01
x1 = 2.0
x2 = -2.0
v1 = 0.0
v2 = 0.0
k = 1000.0
m = 1.0
while 1:
rate(20)
f1 = -k * x1
v1 = v1 + (f1/m ) * dt # Acceleration = Force / mass ; dv = a.dt
f2 = -k * x2 - v2 # damping proportional to velocity
v2 = v2 + (f2/m ) * dt # Acceleration = Force / mass ; dv = a.dt
x1 = x1 + v1 * dt
x2 = x2 + v2 * dt
t = t + dt
spring.length = 4 + x1
ball.x = x1 + 4
spring2.length = 4 - x2
ball2.x = x2 - 4
|
wilebeast/FireFox-OS
|
refs/heads/master
|
B2G/gecko/testing/mozbase/moztest/moztest/output/base.py
|
12
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import with_statement
from contextlib import closing
from StringIO import StringIO
try:
from abc import abstractmethod
except ImportError:
# abc is python 2.6+
# from https://github.com/mozilla/mozbase/blob/master/mozdevice/mozdevice/devicemanager.py
def abstractmethod(method):
line = method.func_code.co_firstlineno
filename = method.func_code.co_filename
def not_implemented(*args, **kwargs):
raise NotImplementedError('Abstract method %s at File "%s", line %s should be implemented by a concrete class' %
(repr(method), filename,line))
return not_implemented
class Output(object):
""" Abstract base class for outputting test results """
@abstractmethod
def serialize(self, results_collection, file_obj):
""" Writes the string representation of the results collection
to the given file object"""
def dump_string(self, results_collection):
""" Returns the string representation of the results collection """
with closing(StringIO()) as s:
self.serialize(results_collection, s)
return s.getvalue()
# helper functions
def count(iterable):
""" Return the count of an iterable. Useful for generators. """
c = 0
for i in iterable:
c += 1
return c
def long_name(test):
if test.test_class:
return '%s.%s' % (test.test_class, test.name)
return test.name
|
ktok07b6/polyphony
|
refs/heads/master
|
tests/testbench/tb08.py
|
1
|
from polyphony import testbench
@testbench
def test():
sum = 0
for i in range(2, 5):
sum += i-1
print(sum)
assert sum == 6
test()
|
ProjectSWGCore/NGECore2
|
refs/heads/master
|
scripts/object/tangible/wearables/armor/clone_trooper/armor_clone_trooper_rebel_s01_bicep_l.py
|
14
|
import sys
def setup(core, object):
object.setStringAttribute('armor_category', '@obj_attr_n:armor_battle')
object.setStringAttribute('required_faction', 'Rebel')
object.setIntAttribute('cat_armor_standard_protection.kinetic', 5664)
object.setIntAttribute('cat_armor_standard_protection.energy', 5664)
object.setIntAttribute('cat_armor_special_protection.special_protection_type_heat', 5664)
object.setIntAttribute('cat_armor_special_protection.special_protection_type_cold', 5664)
object.setIntAttribute('cat_armor_special_protection.special_protection_type_acid', 5664)
object.setIntAttribute('cat_armor_special_protection.special_protection_type_electricity', 5664)
return
|
gevero/py_matrix
|
refs/heads/master
|
py_matrix/utils.py
|
1
|
import numpy as np
import scipy as sp
z0 = sp.constants.value('characteristic impedance of vacuum')
def rot_ell(m_rt_ps):
'''Utility to compute rotation and ellipticity
starting from reflection and transmission matrix
Parameters
----------
'm_RTsp' = sp reflection and transmission matrix
Returns
-------
'a dictionary' = {'theta_p':theta_p,
'eps_p':eps_p,
'theta_s':theta_s,
'eps_s':eps_s}
'''
# extracting values from the matrix
rt_pp = m_rt_ps[0,0]
rt_ps = m_rt_ps[0,1]
rt_sp = m_rt_ps[1,0]
rt_ss = m_rt_ps[1,1]
# calculating the values
theta_p = np.real(rt_sp/rt_pp)
eps_p = np.imag(rt_sp/rt_pp)
theta_s = np.real(rt_ps/rt_ss)
eps_s = np.imag(rt_ps/rt_ss)
out_dict = {'theta_p':theta_p,
'eps_p':eps_p,
'theta_s':theta_s,
'eps_s':eps_s}
return out_dict
def R_ps_rl(m_r_ps):
'''Utility to compute reflectance for p,s and right,left
circular polarization
Parameters
----------
'm_r_ps' = ps reflection matrix
Returns
-------
'a dictionary' = {'R_p':R_p, #reflectances
'R_s':R_s,
'R_l':R_l,
'R_r':R_r}
'''
# extracting values from the matrix
r_pp = m_r_ps[0,0]
r_ps = m_r_ps[0,1]
r_sp = m_r_ps[1,0]
r_ss = m_r_ps[1,1]
# calculating the reflectance
R_p = np.abs(r_pp)**2 + np.abs(r_sp)**2
R_s = np.abs(r_ss)**2 + np.abs(r_ps)**2
R_r = 0.5*(np.abs(r_pp)**2 + np.abs(r_ss)**2 +
np.abs(r_sp)**2 + np.abs(r_ps)**2 +
np.real(1j*r_pp*np.conj(r_ps)) +
np.real(1j*r_sp*np.conj(r_ss)))
R_l = 0.5*(np.abs(r_pp)**2 + np.abs(r_ss)**2 +
np.abs(r_sp)**2 + np.abs(r_ps)**2 -
np.real(1j*r_pp*np.conj(r_ps)) -
np.real(1j*r_sp*np.conj(r_ss)))
out_dict = {'R_p':R_p,
'R_s':R_s,
'R_r':R_r,
'R_l':R_l}
return out_dict
def T_ps_rl(m_t_ps,theta_0,n_0,n_s):
'''Utility to compute transmittance for p,s and right.left
circular polarization
Parameters
----------
'm_t_ps' = ps transmission matrix
Returns
-------
'a dictionary' = {'T_p':T_p, #transmittances
'T_s':T_s,
'T_l':T_l,
'T_r':T_r
'A_p':-np.log10(T_p), #absorbances
'A_s':-np.log10(T_s),
'A_r':-np.log10(T_r),
'A_l':-np.log10(T_l)}
'''
# extracting values from the matrix
t_pp = m_t_ps[0,0]
t_ps = m_t_ps[0,1]
t_sp = m_t_ps[1,0]
t_ss = m_t_ps[1,1]
# calculating the transmittance
theta_s = sp.arcsin(np.real_if_close(np.sin(theta_0)*n_0/n_s))
norm = (np.real(n_s*np.conj(np.cos(theta_s))) /
np.real(n_0*np.conj(np.cos(theta_0))))
T_p = norm*(np.abs(t_pp)**2 + np.abs(t_sp)**2)
T_s = norm*(np.abs(t_ss)**2 + np.abs(t_ps)**2)
T_r = 0.5*norm*(np.abs(t_pp)**2 + np.abs(t_ss)**2 +
np.abs(t_sp)**2 + np.abs(t_ps)**2 +
2.0*np.real(1j*t_pp*np.conj(t_ps)) +
2.0*np.real(1j*t_sp*np.conj(t_ss)))
T_l = 0.5*norm*(np.abs(t_pp)**2 + np.abs(t_ss)**2 +
np.abs(t_sp)**2 + np.abs(t_ps)**2 -
2.0*np.real(1j*t_pp*np.conj(t_ps)) -
2.0*np.real(1j*t_sp*np.conj(t_ss)))
out_dict = {'T_p':T_p,
'T_s':T_s,
'T_l':T_l,
'T_r':T_r,
'A_p':-np.log10(T_p),
'A_s':-np.log10(T_s),
'A_r':-np.log10(T_r),
'A_l':-np.log10(T_l)}
return out_dict
def field(m_K,m_E,m_H,e_list_3x3,d_list,x,y,z,pol):
'''Starting from field amplitudes and wavevectors in each layer, and from
the multilayer structure (e_list_3x3,d_list), calculates the complex
electric and magnetic field, and along with that the Poynting vector and
energy absorption at the location (x,y,z) in the multilayer
Parameters
----------
'm_K' = wavevectors shape = (n_layers,n_k=4,n_xyz=3)
'm_E,m_H' = field amplitudes shape = (n_layers,n_k=4,n_xyz=3,n_pol=2)
'e_list_3x3'= [n_layer+2,3,3] numpy array: it contains n_layers+2 3x3
dielectric tensors:
e_list_3x3[0]= 3x3 incident medium dielectric tensor: must be real,diagonal
and isotropic,
e_list_3x3[n_layers+1] = 3x3 substrate dielectric tensor: must be real,
diagonal and isotropic,
e_list_3x3[n]=3x3 dielectric tensor of the n_th layers: arbitrary
'd_list'= n_layers+2 numpy array: contains layer thinknesses:
d_list[0]=d_list[n_layers+1]=0: for the incident medium and substrate
d_list[n]=d_n n_th layer thickness in nm
x,y,z = coordinates in nm
pol = 'TE' or 'TM', polarization state
Returns
-------
'a dictionary'= {'E': v_E, # electric field vector
'H': v_H, # magnetic field vector
'S': v_S, # Poynting vector
'normE': np.linalg.norm(v_E), # normalized E
'normH': np.linalg.norm(v_H), # normalized H
'normS': np.linalg.norm(v_S), # normalized S
'absor':absor} # absorption at (x,y,z)
'''
# auxiliary computations
n_layers = len(e_list_3x3)-2 # recovering number of layers
v_z = np.cumsum(d_list)[:-1] # starting coordinate of each layers (except inc)
n_l = np.count_nonzero(z > v_z) # current layers
# output vectors
v_E = np.zeros(3,dtype=np.complex128)
v_H = np.zeros(3,dtype=np.complex128)
v_S = np.zeros(3)
v_dE = np.zeros(3,dtype=np.complex128)
v_dH = np.zeros(3,dtype=np.complex128)
# selecting the polarization
if pol == 'TE':
i_p = 0
else:
i_p = 1
# right surface coordinate
if n_l <= n_layers:
z_n = v_z[n_l]
else:
z_n = v_z[n_l-1]
# summing the four components of the fields
for m in range(4):
v_E = v_E + m_E[n_l,m,:,i_p]*np.exp(1j*(m_K[n_l,m,0]*x +
m_K[n_l,m,1]*y +
m_K[n_l,m,2]*(z-z_n)))
v_dE = v_dE + 1j*m_K[n_l,m,2]*m_E[n_l,m,:,i_p]*np.exp(1j*(m_K[n_l,m,0]*x +
m_K[n_l,m,1]*y +
m_K[n_l,m,2]*(z-z_n)))
v_H = v_H + m_H[n_l,m,:,i_p]*np.exp(1j*(m_K[n_l,m,0]*x +
m_K[n_l,m,1]*y +
m_K[n_l,m,2]*(z-z_n)))
v_dH = v_dH + 1j*m_K[n_l,m,2]*m_H[n_l,m,:,i_p]*np.exp(1j*(m_K[n_l,m,0]*x +
m_K[n_l,m,1]*y +
m_K[n_l,m,2]*(z-z_n)))
v_S = 0.5*np.real(np.cross(v_E,np.conj(v_H/z0)))
I_abs = (0.5*np.real(np.cross(v_dE,np.conj(v_H/z0)))[2]+
0.5*np.real(np.cross(v_E,np.conj(v_dH/z0)))[2])
return {'E': v_E, 'H': v_H,'S': v_S,'normE': np.linalg.norm(v_E), 'normH': np.linalg.norm(v_H),'normS': np.linalg.norm(v_S),'abs':I_abs}
|
stevehof/location-ninja
|
refs/heads/master
|
lib/sqlalchemy/ext/orderinglist.py
|
22
|
# ext/orderinglist.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""A custom list that manages index/position information for contained
elements.
:author: Jason Kirtland
``orderinglist`` is a helper for mutable ordered relationships. It will
intercept list operations performed on a :func:`.relationship`-managed
collection and
automatically synchronize changes in list position onto a target scalar
attribute.
Example: A ``slide`` table, where each row refers to zero or more entries
in a related ``bullet`` table. The bullets within a slide are
displayed in order based on the value of the ``position`` column in the
``bullet`` table. As entries are reordered in memory, the value of the
``position`` attribute should be updated to reflect the new sort order::
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position")
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
The standard relationship mapping will produce a list-like attribute on each
``Slide`` containing all related ``Bullet`` objects,
but coping with changes in ordering is not handled automatically.
When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position``
attribute will remain unset until manually assigned. When the ``Bullet``
is inserted into the middle of the list, the following ``Bullet`` objects
will also need to be renumbered.
The :class:`.OrderingList` object automates this task, managing the
``position`` attribute on all ``Bullet`` objects in the collection. It is
constructed using the :func:`.ordering_list` factory::
from sqlalchemy.ext.orderinglist import ordering_list
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
With the above mapping the ``Bullet.position`` attribute is managed::
s = Slide()
s.bullets.append(Bullet())
s.bullets.append(Bullet())
s.bullets[1].position
>>> 1
s.bullets.insert(1, Bullet())
s.bullets[2].position
>>> 2
The :class:`.OrderingList` construct only works with **changes** to a
collection, and not the initial load from the database, and requires that the
list be sorted when loaded. Therefore, be sure to specify ``order_by`` on the
:func:`.relationship` against the target ordering attribute, so that the
ordering is correct when first loaded.
.. warning::
:class:`.OrderingList` only provides limited functionality when a primary
key column or unique column is the target of the sort. Operations
that are unsupported or are problematic include:
* two entries must trade values. This is not supported directly in the
case of a primary key or unique constraint because it means at least
one row would need to be temporarily removed first, or changed to
a third, neutral value while the switch occurs.
* an entry must be deleted in order to make room for a new entry.
SQLAlchemy's unit of work performs all INSERTs before DELETEs within a
single flush. In the case of a primary key, it will trade
an INSERT/DELETE of the same primary key for an UPDATE statement in order
to lessen the impact of this limitation, however this does not take place
for a UNIQUE column.
A future feature will allow the "DELETE before INSERT" behavior to be
possible, allevating this limitation, though this feature will require
explicit configuration at the mapper level for sets of columns that
are to be handled in this way.
:func:`.ordering_list` takes the name of the related object's ordering
attribute as an argument. By default, the zero-based integer index of the
object's position in the :func:`.ordering_list` is synchronized with the
ordering attribute: index 0 will get position 0, index 1 position 1, etc. To
start numbering at 1 or some other integer, provide ``count_from=1``.
"""
from ..orm.collections import collection, collection_adapter
from .. import util
__all__ = ['ordering_list']
def ordering_list(attr, count_from=None, **kw):
"""Prepares an :class:`OrderingList` factory for use in mapper definitions.
Returns an object suitable for use as an argument to a Mapper
relationship's ``collection_class`` option. e.g.::
from sqlalchemy.ext.orderinglist import ordering_list
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
:param attr:
Name of the mapped attribute to use for storage and retrieval of
ordering information
:param count_from:
Set up an integer-based ordering, starting at ``count_from``. For
example, ``ordering_list('pos', count_from=1)`` would create a 1-based
list in SQL, storing the value in the 'pos' column. Ignored if
``ordering_func`` is supplied.
Additional arguments are passed to the :class:`.OrderingList` constructor.
"""
kw = _unsugar_count_from(count_from=count_from, **kw)
return lambda: OrderingList(attr, **kw)
# Ordering utility functions
def count_from_0(index, collection):
"""Numbering function: consecutive integers starting at 0."""
return index
def count_from_1(index, collection):
"""Numbering function: consecutive integers starting at 1."""
return index + 1
def count_from_n_factory(start):
"""Numbering function: consecutive integers starting at arbitrary start."""
def f(index, collection):
return index + start
try:
f.__name__ = 'count_from_%i' % start
except TypeError:
pass
return f
def _unsugar_count_from(**kw):
"""Builds counting functions from keyword arguments.
Keyword argument filter, prepares a simple ``ordering_func`` from a
``count_from`` argument, otherwise passes ``ordering_func`` on unchanged.
"""
count_from = kw.pop('count_from', None)
if kw.get('ordering_func', None) is None and count_from is not None:
if count_from == 0:
kw['ordering_func'] = count_from_0
elif count_from == 1:
kw['ordering_func'] = count_from_1
else:
kw['ordering_func'] = count_from_n_factory(count_from)
return kw
class OrderingList(list):
"""A custom list that manages position information for its children.
The :class:`.OrderingList` object is normally set up using the
:func:`.ordering_list` factory function, used in conjunction with
the :func:`.relationship` function.
"""
def __init__(self, ordering_attr=None, ordering_func=None,
reorder_on_append=False):
"""A custom list that manages position information for its children.
``OrderingList`` is a ``collection_class`` list implementation that
syncs position in a Python list with a position attribute on the
mapped objects.
This implementation relies on the list starting in the proper order,
so be **sure** to put an ``order_by`` on your relationship.
:param ordering_attr:
Name of the attribute that stores the object's order in the
relationship.
:param ordering_func: Optional. A function that maps the position in
the Python list to a value to store in the
``ordering_attr``. Values returned are usually (but need not be!)
integers.
An ``ordering_func`` is called with two positional parameters: the
index of the element in the list, and the list itself.
If omitted, Python list indexes are used for the attribute values.
Two basic pre-built numbering functions are provided in this module:
``count_from_0`` and ``count_from_1``. For more exotic examples
like stepped numbering, alphabetical and Fibonacci numbering, see
the unit tests.
:param reorder_on_append:
Default False. When appending an object with an existing (non-None)
ordering value, that value will be left untouched unless
``reorder_on_append`` is true. This is an optimization to avoid a
variety of dangerous unexpected database writes.
SQLAlchemy will add instances to the list via append() when your
object loads. If for some reason the result set from the database
skips a step in the ordering (say, row '1' is missing but you get
'2', '3', and '4'), reorder_on_append=True would immediately
renumber the items to '1', '2', '3'. If you have multiple sessions
making changes, any of whom happen to load this collection even in
passing, all of the sessions would try to "clean up" the numbering
in their commits, possibly causing all but one to fail with a
concurrent modification error.
Recommend leaving this with the default of False, and just call
``reorder()`` if you're doing ``append()`` operations with
previously ordered instances or when doing some housekeeping after
manual sql operations.
"""
self.ordering_attr = ordering_attr
if ordering_func is None:
ordering_func = count_from_0
self.ordering_func = ordering_func
self.reorder_on_append = reorder_on_append
# More complex serialization schemes (multi column, e.g.) are possible by
# subclassing and reimplementing these two methods.
def _get_order_value(self, entity):
return getattr(entity, self.ordering_attr)
def _set_order_value(self, entity, value):
setattr(entity, self.ordering_attr, value)
def reorder(self):
"""Synchronize ordering for the entire collection.
Sweeps through the list and ensures that each object has accurate
ordering information set.
"""
for index, entity in enumerate(self):
self._order_entity(index, entity, True)
# As of 0.5, _reorder is no longer semi-private
_reorder = reorder
def _order_entity(self, index, entity, reorder=True):
have = self._get_order_value(entity)
# Don't disturb existing ordering if reorder is False
if have is not None and not reorder:
return
should_be = self.ordering_func(index, self)
if have != should_be:
self._set_order_value(entity, should_be)
def append(self, entity):
super(OrderingList, self).append(entity)
self._order_entity(len(self) - 1, entity, self.reorder_on_append)
def _raw_append(self, entity):
"""Append without any ordering behavior."""
super(OrderingList, self).append(entity)
_raw_append = collection.adds(1)(_raw_append)
def insert(self, index, entity):
super(OrderingList, self).insert(index, entity)
self._reorder()
def remove(self, entity):
super(OrderingList, self).remove(entity)
adapter = collection_adapter(self)
if adapter and adapter._referenced_by_owner:
self._reorder()
def pop(self, index=-1):
entity = super(OrderingList, self).pop(index)
self._reorder()
return entity
def __setitem__(self, index, entity):
if isinstance(index, slice):
step = index.step or 1
start = index.start or 0
if start < 0:
start += len(self)
stop = index.stop or len(self)
if stop < 0:
stop += len(self)
for i in range(start, stop, step):
self.__setitem__(i, entity[i])
else:
self._order_entity(index, entity, True)
super(OrderingList, self).__setitem__(index, entity)
def __delitem__(self, index):
super(OrderingList, self).__delitem__(index)
self._reorder()
def __setslice__(self, start, end, values):
super(OrderingList, self).__setslice__(start, end, values)
self._reorder()
def __delslice__(self, start, end):
super(OrderingList, self).__delslice__(start, end)
self._reorder()
def __reduce__(self):
return _reconstitute, (self.__class__, self.__dict__, list(self))
for func_name, func in list(locals().items()):
if (util.callable(func) and func.__name__ == func_name and
not func.__doc__ and hasattr(list, func_name)):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
def _reconstitute(cls, dict_, items):
""" Reconstitute an :class:`.OrderingList`.
This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for
unpickling :class:`.OrderingList` objects.
"""
obj = cls.__new__(cls)
obj.__dict__.update(dict_)
list.extend(obj, items)
return obj
|
geerlingguy/ansible
|
refs/heads/devel
|
test/units/plugins/inventory/test_constructed.py
|
48
|
# -*- coding: utf-8 -*-
# Copyright 2019 Alan Rominger <arominge@redhat.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import pytest
from ansible.errors import AnsibleParserError
from ansible.plugins.inventory.constructed import InventoryModule
from ansible.inventory.data import InventoryData
from ansible.template import Templar
@pytest.fixture()
def inventory_module():
r = InventoryModule()
r.inventory = InventoryData()
r.templar = Templar(None)
return r
def test_group_by_value_only(inventory_module):
inventory_module.inventory.add_host('foohost')
inventory_module.inventory.set_variable('foohost', 'bar', 'my_group_name')
host = inventory_module.inventory.get_host('foohost')
keyed_groups = [
{
'prefix': '',
'separator': '',
'key': 'bar'
}
]
inventory_module._add_host_to_keyed_groups(
keyed_groups, host.vars, host.name, strict=False
)
assert 'my_group_name' in inventory_module.inventory.groups
group = inventory_module.inventory.groups['my_group_name']
assert group.hosts == [host]
def test_keyed_group_separator(inventory_module):
inventory_module.inventory.add_host('farm')
inventory_module.inventory.set_variable('farm', 'farmer', 'mcdonald')
inventory_module.inventory.set_variable('farm', 'barn', {'cow': 'betsy'})
host = inventory_module.inventory.get_host('farm')
keyed_groups = [
{
'prefix': 'farmer',
'separator': '_old_',
'key': 'farmer'
},
{
'separator': 'mmmmmmmmmm',
'key': 'barn'
}
]
inventory_module._add_host_to_keyed_groups(
keyed_groups, host.vars, host.name, strict=False
)
for group_name in ('farmer_old_mcdonald', 'mmmmmmmmmmcowmmmmmmmmmmbetsy'):
assert group_name in inventory_module.inventory.groups
group = inventory_module.inventory.groups[group_name]
assert group.hosts == [host]
def test_keyed_group_empty_construction(inventory_module):
inventory_module.inventory.add_host('farm')
inventory_module.inventory.set_variable('farm', 'barn', {})
host = inventory_module.inventory.get_host('farm')
keyed_groups = [
{
'separator': 'mmmmmmmmmm',
'key': 'barn'
}
]
inventory_module._add_host_to_keyed_groups(
keyed_groups, host.vars, host.name, strict=True
)
assert host.groups == []
def test_keyed_group_host_confusion(inventory_module):
inventory_module.inventory.add_host('cow')
inventory_module.inventory.add_group('cow')
host = inventory_module.inventory.get_host('cow')
host.vars['species'] = 'cow'
keyed_groups = [
{
'separator': '',
'prefix': '',
'key': 'species'
}
]
inventory_module._add_host_to_keyed_groups(
keyed_groups, host.vars, host.name, strict=True
)
group = inventory_module.inventory.groups['cow']
# group cow has host of cow
assert group.hosts == [host]
def test_keyed_parent_groups(inventory_module):
inventory_module.inventory.add_host('web1')
inventory_module.inventory.add_host('web2')
inventory_module.inventory.set_variable('web1', 'region', 'japan')
inventory_module.inventory.set_variable('web2', 'region', 'japan')
host1 = inventory_module.inventory.get_host('web1')
host2 = inventory_module.inventory.get_host('web2')
keyed_groups = [
{
'prefix': 'region',
'key': 'region',
'parent_group': 'region_list'
}
]
for host in [host1, host2]:
inventory_module._add_host_to_keyed_groups(
keyed_groups, host.vars, host.name, strict=False
)
assert 'region_japan' in inventory_module.inventory.groups
assert 'region_list' in inventory_module.inventory.groups
region_group = inventory_module.inventory.groups['region_japan']
all_regions = inventory_module.inventory.groups['region_list']
assert all_regions.child_groups == [region_group]
assert region_group.hosts == [host1, host2]
def test_parent_group_templating(inventory_module):
inventory_module.inventory.add_host('cow')
inventory_module.inventory.set_variable('cow', 'sound', 'mmmmmmmmmm')
inventory_module.inventory.set_variable('cow', 'nickname', 'betsy')
host = inventory_module.inventory.get_host('cow')
keyed_groups = [
{
'key': 'sound',
'prefix': 'sound',
'parent_group': '{{ nickname }}'
},
{
'key': 'nickname',
'prefix': '',
'separator': '',
'parent_group': 'nickname' # statically-named parent group, conflicting with hostvar
},
{
'key': 'nickname',
'separator': '',
'parent_group': '{{ location | default("field") }}'
}
]
inventory_module._add_host_to_keyed_groups(
keyed_groups, host.vars, host.name, strict=True
)
# first keyed group, "betsy" is a parent group name dynamically generated
betsys_group = inventory_module.inventory.groups['betsy']
assert [child.name for child in betsys_group.child_groups] == ['sound_mmmmmmmmmm']
# second keyed group, "nickname" is a statically-named root group
nicknames_group = inventory_module.inventory.groups['nickname']
assert [child.name for child in nicknames_group.child_groups] == ['betsy']
# second keyed group actually generated the parent group of the first keyed group
# assert that these are, in fact, the same object
assert nicknames_group.child_groups[0] == betsys_group
# second keyed group has two parents
locations_group = inventory_module.inventory.groups['field']
assert [child.name for child in locations_group.child_groups] == ['betsy']
def test_parent_group_templating_error(inventory_module):
inventory_module.inventory.add_host('cow')
inventory_module.inventory.set_variable('cow', 'nickname', 'betsy')
host = inventory_module.inventory.get_host('cow')
keyed_groups = [
{
'key': 'nickname',
'separator': '',
'parent_group': '{{ location.barn-yard }}'
}
]
with pytest.raises(AnsibleParserError) as err_message:
inventory_module._add_host_to_keyed_groups(
keyed_groups, host.vars, host.name, strict=True
)
assert 'Could not generate parent group' in err_message
# invalid parent group did not raise an exception with strict=False
inventory_module._add_host_to_keyed_groups(
keyed_groups, host.vars, host.name, strict=False
)
# assert group was never added with invalid parent
assert 'betsy' not in inventory_module.inventory.groups
|
amwelch/a10sdk-python
|
refs/heads/master
|
a10sdk/core/vrrp/vrrp_a_vrid_tracking_options_gateway_ipv6_gateway.py
|
2
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Ipv6Gateway(A10BaseClass):
"""Class Description::
IPv6 Gateway.
Class ipv6-gateway supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param ipv6_address: {"optional": false, "type": "string", "description": "IPV6 address", "format": "ipv6-address"}
:param priority_cost: {"description": "The amount the priority will decrease. help-val Priority", "format": "number", "type": "number", "maximum": 255, "minimum": 1, "optional": true}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/vrrp-a/vrid/{vrid_val}/tracking-options/gateway/ipv6-gateway/{ipv6_address}`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required = [ "ipv6_address"]
self.b_key = "ipv6-gateway"
self.a10_url="/axapi/v3/vrrp-a/vrid/{vrid_val}/tracking-options/gateway/ipv6-gateway/{ipv6_address}"
self.DeviceProxy = ""
self.ipv6_address = ""
self.priority_cost = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
alirizakeles/zato
|
refs/heads/dsuch-f-gh723-add-exe-agent
|
code/zato-web-admin/src/zato/admin/static/brython/_brython/Lib/external_import.py
|
9
|
import os
from browser import doc
import urllib.request
## this module is able to download modules that are external to
## localhost/src
## so we could download from any URL
class ModuleFinder:
def __init__(self, path_entry):
print("external_import here..")
#print(path_entry)
self._module=None
if path_entry.startswith('http://'):
self.path_entry=path_entry
else:
raise ImportError()
def __str__(self):
return '<%s for "%s">' % (self.__class__.__name__, self.path_entry)
def find_module(self, fullname, path=None):
path = path or self.path_entry
#print('looking for "%s" in %s ...' % (fullname, path))
for _ext in ['js', 'pyj', 'py']:
_fp,_url,_headers=urllib.request.urlopen(path + '/' + '%s.%s' % (fullname, _ext))
self._module=_fp.read()
_fp.close()
if self._module is not None:
print("module found at %s:%s" % (path, fullname))
return ModuleLoader(path, fullname, self._module)
print('module %s not found' % fullname)
raise ImportError()
return None
class ModuleLoader:
"""Load source for modules"""
def __init__(self, filepath, name, module_source):
self._filepath=filepath
self._name=name
self._module_source=module_source
def get_source(self):
return self._module_source
def is_package(self):
return '.' in self._name
def load_module(self):
if self._name in sys.modules:
#print('reusing existing module from previous import of "%s"' % fullname)
mod = sys.modules[self._name]
return mod
_src=self.get_source()
if self._filepath.endswith('.js'):
mod=JSObject(import_js_module(_src, self._filepath, self._name))
elif self._filepath.endswith('.py'):
mod=JSObject(import_py_module(_src, self._filepath, self._name))
elif self._filepath.endswith('.pyj'):
mod=JSObject(import_pyj_module(_src, self._filepath, self._name))
else:
raise ImportError('Invalid Module: %s' % self._filepath)
# Set a few properties required by PEP 302
mod.__file__ = self._filepath
mod.__name__ = self._name
mod.__path__ = os.path.abspath(self._filepath)
mod.__loader__ = self
mod.__package__ = '.'.join(self._name.split('.')[:-1])
if self.is_package():
print('adding path for package')
# Set __path__ for packages
# so we can find the sub-modules.
mod.__path__ = [ self._filepath ]
else:
print('imported as regular module')
print('creating a new module object for "%s"' % self._name)
sys.modules.setdefault(self._name, mod)
JSObject(__BRYTHON__.imported)[self._name]=mod
return mod
|
san-mate/python-social-auth
|
refs/heads/master
|
examples/pyramid_example/example/views.py
|
62
|
from pyramid.view import view_config
@view_config(route_name='home', renderer='templates/home.pt')
def home(request):
return {}
@view_config(route_name='done', renderer='templates/done.pt')
def done(request):
return {}
|
dgzurita/odoo
|
refs/heads/8.0
|
addons/web_kanban_gauge/__openerp__.py
|
428
|
{
'name': 'Gauge Widget for Kanban',
'category': 'Hidden',
'description': """
This widget allows to display gauges using justgage library.
""",
'version': '1.0',
'depends': ['web_kanban'],
'data' : [
'views/web_kanban_gauge.xml',
],
'qweb': [
],
'auto_install': True,
}
|
otherness-space/myProject003
|
refs/heads/master
|
my_project_003/lib/python2.7/site-packages/django/contrib/gis/tests/inspectapp/tests.py
|
109
|
from __future__ import absolute_import
import os
from django.db import connections
from django.test import TestCase
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geometry.test_data import TEST_DATA
from django.contrib.gis.tests.utils import HAS_SPATIAL_DB
from django.utils.unittest import skipUnless
if HAS_GDAL:
from django.contrib.gis.gdal import Driver
from django.contrib.gis.utils.ogrinspect import ogrinspect
from .models import AllOGRFields
@skipUnless(HAS_GDAL and HAS_SPATIAL_DB, "GDAL and spatial db are required.")
class OGRInspectTest(TestCase):
maxDiff = 1024
def test_poly(self):
shp_file = os.path.join(TEST_DATA, 'test_poly', 'test_poly.shp')
model_def = ogrinspect(shp_file, 'MyModel')
expected = [
'# This is an auto-generated Django model module created by ogrinspect.',
'from django.contrib.gis.db import models',
'',
'class MyModel(models.Model):',
' float = models.FloatField()',
' int = models.FloatField()',
' str = models.CharField(max_length=80)',
' geom = models.PolygonField(srid=-1)',
' objects = models.GeoManager()',
]
self.assertEqual(model_def, '\n'.join(expected))
def test_date_field(self):
shp_file = os.path.join(TEST_DATA, 'cities', 'cities.shp')
model_def = ogrinspect(shp_file, 'City')
expected = [
'# This is an auto-generated Django model module created by ogrinspect.',
'from django.contrib.gis.db import models',
'',
'class City(models.Model):',
' name = models.CharField(max_length=80)',
' population = models.FloatField()',
' density = models.FloatField()',
' created = models.DateField()',
' geom = models.PointField(srid=-1)',
' objects = models.GeoManager()',
]
self.assertEqual(model_def, '\n'.join(expected))
def test_time_field(self):
# Only possible to test this on PostGIS at the momemnt. MySQL
# complains about permissions, and SpatiaLite/Oracle are
# insanely difficult to get support compiled in for in GDAL.
if not connections['default'].ops.postgis:
self.skipTest("This database does not support 'ogrinspect'ion")
# Getting the database identifier used by OGR, if None returned
# GDAL does not have the support compiled in.
ogr_db = get_ogr_db_string()
if not ogr_db:
self.skipTest("Your GDAL installation does not support PostGIS databases")
# Writing shapefiles via GDAL currently does not support writing OGRTime
# fields, so we need to actually use a database
model_def = ogrinspect(ogr_db, 'Measurement',
layer_key=AllOGRFields._meta.db_table,
decimal=['f_decimal'])
self.assertTrue(model_def.startswith(
'# This is an auto-generated Django model module created by ogrinspect.\n'
'from django.contrib.gis.db import models\n'
'\n'
'class Measurement(models.Model):\n'
))
# The ordering of model fields might vary depending on several factors (version of GDAL, etc.)
self.assertIn(' f_decimal = models.DecimalField(max_digits=0, decimal_places=0)', model_def)
self.assertIn(' f_int = models.IntegerField()', model_def)
self.assertIn(' f_datetime = models.DateTimeField()', model_def)
self.assertIn(' f_time = models.TimeField()', model_def)
self.assertIn(' f_float = models.FloatField()', model_def)
self.assertIn(' f_char = models.CharField(max_length=10)', model_def)
self.assertIn(' f_date = models.DateField()', model_def)
self.assertTrue(model_def.endswith(
' geom = models.PolygonField()\n'
' objects = models.GeoManager()'
))
def get_ogr_db_string():
"""
Construct the DB string that GDAL will use to inspect the database.
GDAL will create its own connection to the database, so we re-use the
connection settings from the Django test.
"""
db = connections.databases['default']
# Map from the django backend into the OGR driver name and database identifier
# http://www.gdal.org/ogr/ogr_formats.html
#
# TODO: Support Oracle (OCI).
drivers = {
'django.contrib.gis.db.backends.postgis': ('PostgreSQL', "PG:dbname='%(db_name)s'", ' '),
'django.contrib.gis.db.backends.mysql': ('MySQL', 'MYSQL:"%(db_name)s"', ','),
'django.contrib.gis.db.backends.spatialite': ('SQLite', '%(db_name)s', '')
}
drv_name, db_str, param_sep = drivers[db['ENGINE']]
# Ensure that GDAL library has driver support for the database.
try:
Driver(drv_name)
except:
return None
# SQLite/Spatialite in-memory databases
if db['NAME'] == ":memory:":
return None
# Build the params of the OGR database connection string
params = [db_str % {'db_name': db['NAME']}]
def add(key, template):
value = db.get(key, None)
# Don't add the parameter if it is not in django's settings
if value:
params.append(template % value)
add('HOST', "host='%s'")
add('PORT', "port='%s'")
add('USER', "user='%s'")
add('PASSWORD', "password='%s'")
return param_sep.join(params)
|
ncloudioj/splice
|
refs/heads/master
|
migrations/versions/171b15035012_.py
|
3
|
"""empty message
Revision ID: 171b15035012
Revises: 456611d35239
Create Date: 2016-03-02 16:14:18.962790
"""
# revision identifiers, used by Alembic.
revision = '171b15035012'
down_revision = '456611d35239'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade_():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def upgrade_stats():
### commands auto generated by Alembic - please adjust! ###
op.create_table('activity_stream_stats_daily',
sa.Column('client_id', sa.String(length=64), nullable=False),
sa.Column('tab_id', sa.String(length=64), nullable=False),
sa.Column('addon_version', sa.String(length=16), nullable=False),
sa.Column('load_reason', sa.String(length=64), nullable=False),
sa.Column('source', sa.String(length=64), nullable=False),
sa.Column('unload_reason', sa.String(length=64), nullable=False),
sa.Column('max_scroll_depth', sa.Integer(), nullable=False),
sa.Column('load_latency', sa.Integer(), nullable=False),
sa.Column('click_position', sa.Integer(), nullable=False),
sa.Column('total_bookmarks', sa.Integer(), nullable=False),
sa.Column('total_history_size', sa.Integer(), nullable=False),
sa.Column('session_duration', sa.Integer(), nullable=False),
sa.Column('receive_at', sa.DateTime(), nullable=False),
sa.Column('locale', sa.String(length=14), nullable=False),
sa.Column('country_code', sa.String(length=5), nullable=False),
sa.Column('os', sa.String(length=64), nullable=False),
sa.Column('browser', sa.String(length=64), nullable=False),
sa.Column('version', sa.String(length=64), nullable=False),
sa.Column('device', sa.String(length=64), nullable=False)
)
### end Alembic commands ###
def downgrade_stats():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('activity_stream_stats_daily')
### end Alembic commands ###
|
merfii/PythonExercises
|
refs/heads/master
|
llluiop/0005/changeResolution.py
|
38
|
#!/usr/bin/env python
from __future__ import division
import os
import Image
resolution_IP4 = (800, 600)
def ChgResolution(image):
img = Image.open(image)
ratio = max(img.size[0]/resolution_IP4[0], img.size[1]/resolution_IP4[1])
if ratio > 1:
imgNew = img.resize(( int(img.size[0]/ratio), int(img.size[1]/ratio) ))
imgNew.save(os.path.split(image)[-1])
def ImageFiles(path):
if os.path.exists(path):
for file in os.listdir(path):
if 'jpg' in file:
yield os.path.join(path, file)
def main():
for image in ImageFiles("images"):
ChgResolution(image)
if __name__ == "__main__":
main()
|
peri-source/peri
|
refs/heads/master
|
peri/viz/plots.py
|
1
|
# monkey-batch the matplotlibrc for peri:
from peri.viz import base
from builtins import range
import matplotlib as mpl
import matplotlib.pylab as pl
from matplotlib import ticker
from matplotlib.gridspec import GridSpec
from matplotlib.offsetbox import AnchoredText
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1 import ImageGrid
from matplotlib.patches import Circle, Rectangle
from peri.test import analyze
from peri import util
from peri.logger import log
import numpy as np
import time
import pickle
plt = mpl.pyplot
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Matt's uncommented plots for generating figures
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
def lbl(axis, label, size=22):
""" Put a figure label in an axis """
at = AnchoredText(label, loc=2, prop=dict(size=size), frameon=True)
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.0")
axis.add_artist(at)
def summary_plot(state, samples, zlayer=None, xlayer=None, truestate=None):
def MAD(d):
return np.median(np.abs(d - np.median(d)))
s = state
t = s.get_model_image()
if zlayer is None:
zlayer = t.shape[0]//2
if xlayer is None:
xlayer = t.shape[2]//2
mu = samples.mean(axis=0)
std = samples.std(axis=0)
fig, axs = pl.subplots(3,3, figsize=(20,12))
axs[0][0].imshow(s.image[zlayer], vmin=0, vmax=1)
axs[0][1].imshow(t[zlayer], vmin=0, vmax=1)
axs[0][2].imshow((s.image-t)[zlayer], vmin=-1, vmax=1)
axs[0][0].set_xticks([])
axs[0][0].set_yticks([])
axs[0][1].set_xticks([])
axs[0][1].set_yticks([])
axs[0][2].set_xticks([])
axs[0][2].set_yticks([])
axs[1][0].imshow(s.image[:,:,xlayer], vmin=0, vmax=1)
axs[1][1].imshow(t[:,:,xlayer], vmin=0, vmax=1)
axs[1][2].imshow((s.image-t)[:,:,xlayer], vmin=-1, vmax=1)
axs[1][0].set_xticks([])
axs[1][0].set_yticks([])
axs[1][1].set_xticks([])
axs[1][1].set_yticks([])
axs[1][2].set_xticks([])
axs[1][2].set_yticks([])
try:
alpha = 0.5 if truestate is not None else 0.8
axs[2][0].hist(std[s.b_rad], bins=np.logspace(-3,0,50), label='Radii',
histtype='stepfilled', alpha=alpha, color='red')
if truestate is not None:
d = np.abs(mu - truestate)
axs[2][0].hist(d[s.b_pos], bins=np.logspace(-3,0,50), color='red',
histtype='step', alpha=1)
axs[2][0].semilogx()
axs[2][0].hist(std[s.b_pos], bins=np.logspace(-3,0,50), label='Positions',
histtype='stepfilled', alpha=alpha, color='blue')
if truestate is not None:
d = np.abs(mu - truestate)
axs[2][0].hist(d[s.b_rad], bins=np.logspace(-3,0,50), color='blue',
histtype='step', alpha=1)
axs[2][0].semilogx()
axs[2][0].legend(loc='upper right')
axs[2][0].set_xlabel("Estimated standard deviation")
axs[2][0].set_ylim(bottom=0)
except Exception as e:
pass
d = s.state[s.b_rad]
m = 2*1.4826 * MAD(d)
mb = d.mean()
d = d[(d > mb - m) & (d < mb +m)]
d = s.state[s.b_rad]
axs[2][1].hist(d, bins=50, histtype='stepfilled', alpha=0.8)
axs[2][1].set_xlabel("Radii")
axs[2][1].set_ylim(bottom=0)
if truestate is not None:
axs[2][1].hist(truestate[s.b_rad], bins=50, histtype='step', alpha=0.8)
axs[2][2].hist((s.image-t)[s.image_mask==1].ravel(), bins=150,
histtype='stepfilled', alpha=0.8)
axs[2][2].set_xlim(-0.35, 0.35)
axs[2][2].semilogy()
axs[2][2].set_ylim(bottom=0)
axs[2][2].set_xlabel("Pixel value differences")
pl.subplots_adjust(left=0, right=1, bottom=0, top=1, wspace=0.05, hspace=0.05)
pl.tight_layout()
def pretty_summary(state, samples, zlayer=None, xlayer=None, vertical=False):
s = state
h = np.array(samples)
slicez = zlayer or s.image.shape[0]//2
slicex = xlayer or s.image.shape[2]//2
slicer1 = np.s_[slicez,s.pad:-s.pad,s.pad:-s.pad]
slicer2 = np.s_[s.pad:-s.pad,s.pad:-s.pad,slicex]
center = (slicez, s.image.shape[1]//2, slicex)
if vertical:
fig = pl.figure(figsize=(12,24))
else:
fig = pl.figure(figsize=(24,8))
#=========================================================================
#=========================================================================
if vertical:
gs1 = ImageGrid(fig, rect=[0.02, 0.55, 0.99, 0.40], nrows_ncols=(2,3), axes_pad=0.1)
else:
gs1 = ImageGrid(fig, rect=[0.02, 0.0, 0.4, 1.00], nrows_ncols=(2,3), axes_pad=0.1)
for i,slicer in enumerate([slicer1, slicer2]):
ax_real = gs1[3*i+0]
ax_fake = gs1[3*i+1]
ax_diff = gs1[3*i+2]
diff = s.get_model_image() - s.image
ax_real.imshow(s.image[slicer], cmap=pl.cm.bone_r)
ax_real.set_xticks([])
ax_real.set_yticks([])
ax_fake.imshow(s.get_model_image()[slicer], cmap=pl.cm.bone_r)
ax_fake.set_xticks([])
ax_fake.set_yticks([])
ax_diff.imshow(diff[slicer], cmap=pl.cm.RdBu, vmin=-1.0, vmax=1.0)
ax_diff.set_xticks([])
ax_diff.set_yticks([])
if i == 0:
ax_real.set_title("Confocal image", fontsize=24)
ax_fake.set_title("Model image", fontsize=24)
ax_diff.set_title("Difference", fontsize=24)
ax_real.set_ylabel('x-y')
else:
ax_real.set_ylabel('x-z')
#=========================================================================
#=========================================================================
mu = h.mean(axis=0)
std = h.std(axis=0)
if vertical:
gs2 = GridSpec(2,2, left=0.10, bottom=0.10, right=0.99, top=0.52,
wspace=0.45, hspace=0.45)
else:
gs2 = GridSpec(2,2, left=0.50, bottom=0.12, right=0.95, top=0.95,
wspace=0.35, hspace=0.35)
ax_hist = pl.subplot(gs2[0,0])
ax_hist.hist(std[s.b_pos], bins=np.logspace(-2.5, 0, 50), alpha=0.7, label='POS', histtype='stepfilled')
ax_hist.hist(std[s.b_rad], bins=np.logspace(-2.5, 0, 50), alpha=0.7, label='RAD', histtype='stepfilled')
ax_hist.set_xlim((10**-2.4, 1))
ax_hist.semilogx()
ax_hist.set_xlabel(r"$\bar{\sigma}$")
ax_hist.set_ylabel(r"$P(\bar{\sigma})$")
ax_hist.legend(loc='upper right')
ax_diff = pl.subplot(gs2[0,1])
ax_diff.hist((s.get_model_image() - s.image)[s.image_mask==1.].ravel(), bins=1000, histtype='stepfilled', alpha=0.7)
ax_diff.semilogy()
ax_diff.set_ylabel(r"$P(\delta)$")
ax_diff.set_xlabel(r"$\delta = M_i - d_i$")
ax_diff.locator_params(axis='x', nbins=5)
pos = mu[s.b_pos].reshape(-1,3)
rad = mu[s.b_rad]
mask = analyze.trim_box(s, pos)
pos = pos[mask]
rad = rad[mask]
gx, gy = analyze.gofr(pos, rad, mu[s.b_zscale][0], resolution=5e-2,mask_start=0.5)
mask = gx < 5
gx = gx[mask]
gy = gy[mask]
ax_gofr = pl.subplot(gs2[1,0])
ax_gofr.plot(gx, gy, '-', lw=1)
ax_gofr.set_xlabel(r"$r/d$")
ax_gofr.set_ylabel(r"$g(r/d)$")
ax_gofr.locator_params(axis='both', nbins=5)
gx, gy = analyze.gofr(pos, rad, mu[s.b_zscale][0], method='surface')
mask = gx < 5
gx = gx[mask]
gy = gy[mask]
gy[gy <= 0.] = gy[gy>0].min()
ax_gofrs = pl.subplot(gs2[1,1])
ax_gofrs.plot(gx, gy, '-', lw=1)
ax_gofrs.set_xlabel(r"$r/d$")
ax_gofrs.set_ylabel(r"$g_{\rm{surface}}(r/d)$")
ax_gofrs.locator_params(axis='both', nbins=5)
ax_gofrs.grid(b=False, which='minor', axis='y')
#ax_gofrs.semilogy()
ylim = ax_gofrs.get_ylim()
ax_gofrs.set_ylim(gy.min(), ylim[1])
#gy = gy[mask] / s.state[s.b_typ].sum()/(0.64 /(1.333*np.pi*rad.mean()**3))
#gy /= gy[-1]
def scan(im, cycles=1, sleep=0.3, vmin=0, vmax=1, cmap='bone'):
pl.figure(1)
pl.show()
time.sleep(3)
for c in range(cycles):
for i, sl in enumerate(im):
log.info('{}'.format(i))
pl.clf()
pl.imshow(sl, cmap=cmap, interpolation='nearest',
origin='lower', vmin=vmin, vmax=vmax)
pl.draw()
time.sleep(sleep)
def scan_together(im, p, delay=2, vmin=0, vmax=1, cmap='bone'):
pl.figure(1)
pl.show()
time.sleep(3)
z,y,x = p.T
for i in range(len(im)):
log.info('{}'.format(i))
sl = im[i]
pl.clf()
pl.imshow(sl, cmap=cmap, interpolation='nearest', origin='lower',
vmin=vmin, vmax=vmax)
m = z.astype('int') == i
pl.plot(x[m], y[m], 'o')
pl.xlim(0, sl.shape[0])
pl.ylim(0, sl.shape[1])
pl.draw()
time.sleep(delay)
def sample_compare(N, samples, truestate, burn=0):
h = samples[burn:]
strue = truestate
mu = h.mean(axis=0)
std = h.std(axis=0)
pl.figure(figsize=(20,4))
pl.errorbar(range(len(mu)), (mu-strue), yerr=5*std/np.sqrt(h.shape[0]),
fmt='.', lw=0.15, alpha=0.5)
pl.vlines([0,3*N-0.5, 4*N-0.5], -1, 1, linestyle='dashed', lw=4, alpha=0.5)
pl.hlines(0, 0, len(mu), linestyle='dashed', lw=5, alpha=0.5)
pl.xlim(0, len(mu))
pl.ylim(-0.02, 0.02)
pl.show()
def generative_model(s,x,y,z,r, factor=1.1):
"""
Samples x,y,z,r are created by:
b = s.blocks_particle(#)
h = runner.sample_state(s, b, stepout=0.05, N=2000, doprint=True)
z,y,x,r = h.get_histogram().T
"""
pl.close('all')
slicez = int(round(z.mean()))
slicex = s.image.shape[2]//2
slicer1 = np.s_[slicez,s.pad:-s.pad,s.pad:-s.pad]
slicer2 = np.s_[s.pad:-s.pad,s.pad:-s.pad,slicex]
center = (slicez, s.image.shape[1]//2, slicex)
fig = pl.figure(figsize=(factor*13,factor*10))
#=========================================================================
#=========================================================================
gs1 = ImageGrid(fig, rect=[0.0, 0.6, 1.0, 0.35], nrows_ncols=(1,3),
axes_pad=0.1)
ax_real = gs1[0]
ax_fake = gs1[1]
ax_diff = gs1[2]
diff = s.get_model_image() - s.image
ax_real.imshow(s.image[slicer1], cmap=pl.cm.bone_r)
ax_real.set_xticks([])
ax_real.set_yticks([])
ax_real.set_title("Confocal image", fontsize=24)
ax_fake.imshow(s.get_model_image()[slicer1], cmap=pl.cm.bone_r)
ax_fake.set_xticks([])
ax_fake.set_yticks([])
ax_fake.set_title("Model image", fontsize=24)
ax_diff.imshow(diff[slicer1], cmap=pl.cm.RdBu, vmin=-0.1, vmax=0.1)
ax_diff.set_xticks([])
ax_diff.set_yticks([])
ax_diff.set_title("Difference", fontsize=24)
#=========================================================================
#=========================================================================
gs2 = ImageGrid(fig, rect=[0.1, 0.0, 0.4, 0.55], nrows_ncols=(3,2),
axes_pad=0.1)
ax_plt1 = fig.add_subplot(gs2[0])
ax_plt2 = fig.add_subplot(gs2[1])
ax_ilm1 = fig.add_subplot(gs2[2])
ax_ilm2 = fig.add_subplot(gs2[3])
ax_psf1 = fig.add_subplot(gs2[4])
ax_psf2 = fig.add_subplot(gs2[5])
c = int(z.mean()), int(y.mean())+s.pad, int(x.mean())+s.pad
if s.image.shape[0] > 2*s.image.shape[1]//3:
w = s.image.shape[2] - 2*s.pad
h = 2*w//3
else:
h = s.image.shape[0] - 2*s.pad
w = 3*h//2
w,h = w//2, h//2
xyslice = np.s_[slicez, c[1]-h:c[1]+h, c[2]-w:c[2]+w]
yzslice = np.s_[c[0]-h:c[0]+h, c[1]-w:c[1]+w, slicex]
#h = s.image.shape[2]/2 - s.image.shape[0]/2
#slicer2 = np.s_[s.pad:-s.pad, s.pad:-s.pad, slicex]
#slicer3 = np.s_[slicez, s.pad+h:-s.pad-h, s.pad:-s.pad]
ax_plt1.imshow(1-s.obj.get_field()[xyslice], cmap=pl.cm.bone_r, vmin=0, vmax=1)
ax_plt1.set_xticks([])
ax_plt1.set_yticks([])
ax_plt1.set_ylabel("Platonic", fontsize=22)
ax_plt1.set_title("x-y", fontsize=24)
ax_plt2.imshow(1-s._platonic_image()[yzslice], cmap=pl.cm.bone_r, vmin=0, vmax=1)
ax_plt2.set_xticks([])
ax_plt2.set_yticks([])
ax_plt2.set_title("y-z", fontsize=24)
ax_ilm1.imshow(s.ilm.get_field()[xyslice], cmap=pl.cm.bone_r)
ax_ilm1.set_xticks([])
ax_ilm1.set_yticks([])
ax_ilm1.set_ylabel("ILM", fontsize=22)
ax_ilm2.imshow(s.ilm.get_field()[yzslice], cmap=pl.cm.bone_r)
ax_ilm2.set_xticks([])
ax_ilm2.set_yticks([])
t = s.ilm.get_field().copy()
t *= 0
t[c] = 1
s.psf.set_tile(util.Tile(t.shape))
psf = (s.psf.execute(t)+5e-5)**0.1
ax_psf1.imshow(psf[xyslice], cmap=pl.cm.bone)
ax_psf1.set_xticks([])
ax_psf1.set_yticks([])
ax_psf1.set_ylabel("PSF", fontsize=22)
ax_psf2.imshow(psf[yzslice], cmap=pl.cm.bone)
ax_psf2.set_xticks([])
ax_psf2.set_yticks([])
#=========================================================================
#=========================================================================
ax_zoom = fig.add_axes([0.48, 0.018, 0.45, 0.52])
#s.model_to_true_image()
im = s.image[slicer1]
sh = np.array(im.shape)
cx = x.mean()
cy = y.mean()
extent = [0,sh[0],0,sh[1]]
ax_zoom.set_xticks([])
ax_zoom.set_yticks([])
ax_zoom.imshow(im, extent=extent, cmap=pl.cm.bone_r)
ax_zoom.set_xlim(cx-12, cx+12)
ax_zoom.set_ylim(cy-12, cy+12)
ax_zoom.set_title("Sampled positions", fontsize=24)
ax_zoom.hexbin(x,y, gridsize=32, mincnt=0, cmap=pl.cm.hot)
zoom1 = zoomed_inset_axes(ax_zoom, 30, loc=3)
zoom1.imshow(im, extent=extent, cmap=pl.cm.bone_r)
zoom1.set_xlim(cx-1.0/6, cx+1.0/6)
zoom1.set_ylim(cy-1.0/6, cy+1.0/6)
zoom1.hexbin(x,y,gridsize=32, mincnt=5, cmap=pl.cm.hot)
zoom1.set_xticks([])
zoom1.set_yticks([])
zoom1.hlines(cy-1.0/6 + 1.0/32, cx-1.0/6+5e-2, cx-1.0/6+5e-2+1e-1, lw=3)
zoom1.text(cx-1.0/6 + 1.0/24, cy-1.0/6+5e-2, '0.1px')
mark_inset(ax_zoom, zoom1, loc1=2, loc2=4, fc="none", ec="0.0")
#zoom2 = zoomed_inset_axes(ax_zoom, 10, loc=4)
#zoom2.imshow(im, extent=extent, cmap=pl.cm.bone_r)
#zoom2.set_xlim(cx-1.0/2, cx+1.0/2)
#zoom2.set_ylim(cy-1.0/2, cy+1.0/2)
#zoom2.hexbin(x,y,gridsize=32, mincnt=1, cmap=pl.cm.hot)
#zoom2.set_xticks([])
#zoom2.set_yticks([])
#mark_inset(zoom1, zoom2, loc1=1, loc2=3, fc="none", ec="0.5")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Visualizations for Quality of Fits
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
def examine_unexplained_noise(state, bins=1000, xlim=(-10,10)):
"""
Compares a state's residuals in real and Fourier space with a Gaussian.
Point out that Fourier space should always be Gaussian and white
Parameters
----------
state : `peri.states.State`
The state to examine.
bins : int or sequence of scalars or str, optional
The number of bins in the histogram, as passed to numpy.histogram
Default is 1000
xlim : 2-element tuple, optional
The range, in sigma, of the x-axis on the plot. Default (-10,10).
Returns
-------
list
The axes handles for the real and Fourier space subplots.
"""
r = state.residuals
q = np.fft.fftn(r)
#Get the expected values of `sigma`:
calc_sig = lambda x: np.sqrt(np.dot(x,x) / x.size)
rh, xr = np.histogram(r.ravel() / calc_sig(r.ravel()), bins=bins,
density=True)
bigq = np.append(q.real.ravel(), q.imag.ravel())
qh, xq = np.histogram(bigq / calc_sig(q.real.ravel()), bins=bins,
density=True)
xr = 0.5*(xr[1:] + xr[:-1])
xq = 0.5*(xq[1:] + xq[:-1])
gauss = lambda t : np.exp(-t*t*0.5) / np.sqrt(2*np.pi)
plt.figure(figsize=[16,8])
axes = []
for a, (x, r, lbl) in enumerate([[xr, rh, 'Real'], [xq, qh, 'Fourier']]):
ax = plt.subplot(1,2,a+1)
ax.semilogy(x, r, label='Data')
ax.plot(x, gauss(x), label='Gauss Fit', scalex=False, scaley=False)
ax.set_xlabel('Residuals value $r/\sigma$')
ax.set_ylabel('Probability $P(r/\sigma)$')
ax.legend(loc='upper right')
ax.set_title('{}-Space'.format(lbl))
ax.set_xlim(xlim)
axes.append(ax)
return axes
def compare_data_model_residuals(s, tile, data_vmin='calc', data_vmax='calc',
res_vmin=-0.1, res_vmax=0.1, edgepts='calc', do_imshow=True,
data_cmap=plt.cm.bone, res_cmap=plt.cm.RdBu):
"""
Compare the data, model, and residuals of a state.
Makes an image of any 2D slice of a state that compares the data,
model, and residuals. The upper left portion of the image is the raw
data, the central portion the model, and the lower right portion the
image. Either plots the image using plt.imshow() or returns a
np.ndarray of the image pixels for later use.
Parameters
----------
st : peri.ImageState object
The state to plot.
tile : peri.util.Tile object
The slice of the image to plot. Can be any xy, xz, or yz
projection, but it must return a valid 2D slice (the slice is
squeezed internally).
data_vmin : {Float, `calc`}, optional
vmin for the imshow for the data and generative model (shared).
Default is 'calc' = 0.5(data.min() + model.min())
data_vmax : {Float, `calc`}, optional
vmax for the imshow for the data and generative model (shared).
Default is 'calc' = 0.5(data.max() + model.max())
res_vmin : Float, optional
vmin for the imshow for the residuals. Default is -0.1
Default is 'calc' = 0.5(data.min() + model.min())
res_vmax : Float, optional
vmax for the imshow for the residuals. Default is +0.1
edgepts : {Nested list-like, Float, 'calc'}, optional.
The vertices of the triangles which determine the splitting of
the image. The vertices are at (image corner, (edge, y), and
(x,edge), where edge is the appropriate edge of the image.
edgepts[0] : (x,y) points for the upper edge
edgepts[1] : (x,y) points for the lower edge
where `x` is the coordinate along the image's 0th axis and `y`
along the images 1st axis. Default is 'calc,' which calculates
edge points by splitting the image into 3 regions of equal
area. If edgepts is a float scalar, calculates the edge points
based on a constant fraction of distance from the edge.
do_imshow : Bool
If True, imshow's and returns the returned handle.
If False, returns the array as a [M,N,4] array.
data_cmap : matplotlib colormap instance
The colormap to use for the data and model.
res_cmap : matplotlib colormap instance
The colormap to use for the residuals.
Returns
-------
image : {matplotlib.pyplot.AxesImage, numpy.ndarray}
If `do_imshow` == True, the returned handle from imshow.
If `do_imshow` == False, an [M,N,4] np.ndarray of the image
pixels.
"""
# This could be modified to alpha the borderline... or to embiggen
# the image and slice it more finely
residuals = s.residuals[tile.slicer].squeeze()
data = s.data[tile.slicer].squeeze()
model = s.model[tile.slicer].squeeze()
if data.ndim != 2:
raise ValueError('tile does not give a 2D slice')
im = np.zeros([data.shape[0], data.shape[1], 4])
if data_vmin == 'calc':
data_vmin = 0.5*(data.min() + model.min())
if data_vmax == 'calc':
data_vmax = 0.5*(data.max() + model.max())
#1. Get masks:
upper_mask, center_mask, lower_mask = trisect_image(im.shape, edgepts)
#2. Get colorbar'd images
gm = data_cmap(center_data(model, data_vmin, data_vmax))
dt = data_cmap(center_data(data, data_vmin, data_vmax))
rs = res_cmap(center_data(residuals, res_vmin, res_vmax))
for a in range(4):
im[:,:,a][upper_mask] = rs[:,:,a][upper_mask]
im[:,:,a][center_mask] = gm[:,:,a][center_mask]
im[:,:,a][lower_mask] = dt[:,:,a][lower_mask]
if do_imshow:
return plt.imshow(im)
else:
return im
def trisect_image(imshape, edgepts='calc'):
"""
Returns 3 masks that trisect an image into 3 triangular portions.
Parameters
----------
imshape : 2-element list-like of ints
The shape of the image. Elements after the first 2 are ignored.
edgepts : Nested list-like, float, or `calc`, optional.
The vertices of the triangles which determine the splitting of
the image. The vertices are at (image corner, (edge, y), and
(x,edge), where edge is the appropriate edge of the image.
edgepts[0] : (x,y) points for the upper edge
edgepts[1] : (x,y) points for the lower edge
where `x` is the coordinate along the image's 0th axis and `y`
along the images 1st axis. Default is 'calc,' which calculates
edge points by splitting the image into 3 regions of equal
area. If edgepts is a float scalar, calculates the edge points
based on a constant fraction of distance from the edge.
Returns
-------
upper_mask : numpy.ndarray
Boolean array; True in the image's upper region.
center_mask : numpy.ndarray
Boolean array; True in the image's center region.
lower_mask : numpy.ndarray
Boolean array; True in the image's lower region.
"""
im_x, im_y = np.meshgrid(np.arange(imshape[0]), np.arange(imshape[1]),
indexing='ij')
if np.size(edgepts) == 1:
#Gets equal-area sections, at sqrt(2/3) of the sides
f = np.sqrt(2./3.) if edgepts == 'calc' else edgepts
# f = np.sqrt(2./3.)
lower_edge = (imshape[0] * (1-f), imshape[1] * f)
upper_edge = (imshape[0] * f, imshape[1] * (1-f))
else:
upper_edge, lower_edge = edgepts
#1. Get masks
lower_slope = lower_edge[1] / max(float(imshape[0] - lower_edge[0]), 1e-9)
upper_slope = (imshape[1] - upper_edge[1]) / float(upper_edge[0])
#and the edge points are the x or y intercepts
lower_intercept = -lower_slope * lower_edge[0]
upper_intercept = upper_edge[1]
lower_mask = im_y < (im_x * lower_slope + lower_intercept)
upper_mask = im_y > (im_x * upper_slope + upper_intercept)
center_mask= -(lower_mask | upper_mask)
return upper_mask, center_mask, lower_mask
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# More of Matt's uncommented plots for generating figures
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
def center_data(data, vmin, vmax):
"""Clips data on [vmin, vmax]; then rescales to [0,1]"""
ans = data - vmin
ans /= (vmax - vmin)
return np.clip(ans, 0, 1)
def sim_crb_diff(std0, std1, N=10000):
""" each element of std0 should correspond with the element of std1 """
a = std0*np.random.randn(N, len(std0))
b = std1*np.random.randn(N, len(std1))
return a - b
def diag_crb_particles(state):
crbpos = []
crbrad = []
for i in np.arange(state.N)[state.state[state.b_typ]==1.]:
log.info('{}'.format(i))
bl = state.blocks_particle(i)
for b in bl[:-1]:
crbpos.append(np.sqrt(1.0/state.fisher_information(blocks=[b])))
crbrad.append(np.sqrt(1.0/state.fisher_information(blocks=[bl[-1]])))
cx, cr = np.array(crbpos).reshape(-1,3), np.squeeze(np.array(crbrad))
cx[np.isinf(cx)] = 0
cr[np.isinf(cr)] = 0
return cx, cr
def crb_compare(state0, samples0, state1, samples1, crb0=None, crb1=None,
zlayer=None, xlayer=None):
"""
To run, do:
s,h = pickle...
s1,h1 = pickle...
i.e. /media/scratch/bamf/vacancy/vacancy_zoom-1.tif_t002.tif-featured-v2.pkl
i.e. /media/scratch/bamf/frozen-particles/0.tif-featured-full.pkl
crb0 = diag_crb_particles(s); crb1 = diag_crb_particles(s1)
crb_compare(s,h[-25:],s1,h1[-25:], crb0, crb1)
"""
s0 = state0
s1 = state1
h0 = np.array(samples0)
h1 = np.array(samples1)
slicez = zlayer or s0.image.shape[0]//2
slicex = xlayer or s0.image.shape[2]//2
slicer1 = np.s_[slicez,s0.pad:-s0.pad,s0.pad:-s0.pad]
slicer2 = np.s_[s0.pad:-s0.pad,s0.pad:-s0.pad,slicex]
center = (slicez, s0.image.shape[1]//2, slicex)
mu0 = h0.mean(axis=0)
mu1 = h1.mean(axis=0)
std0 = h0.std(axis=0)
std1 = h1.std(axis=0)
mask0 = (s0.state[s0.b_typ]==1.) & (
analyze.trim_box(s0, mu0[s0.b_pos].reshape(-1,3)))
mask1 = (s1.state[s1.b_typ]==1.) & (
analyze.trim_box(s1, mu1[s1.b_pos].reshape(-1,3)))
active0 = np.arange(s0.N)[mask0]#s0.state[s0.b_typ]==1.]
active1 = np.arange(s1.N)[mask1]#s1.state[s1.b_typ]==1.]
pos0 = mu0[s0.b_pos].reshape(-1,3)[active0]
pos1 = mu1[s1.b_pos].reshape(-1,3)[active1]
rad0 = mu0[s0.b_rad][active0]
rad1 = mu1[s1.b_rad][active1]
link = analyze.nearest(pos0, pos1)
dpos = pos0 - pos1[link]
drad = rad0 - rad1[link]
drift = dpos.mean(axis=0)
log.info('drift {}'.format(drift))
dpos -= drift
fig = pl.figure(figsize=(24,10))
#=========================================================================
#=========================================================================
gs0 = ImageGrid(fig, rect=[0.02, 0.4, 0.4, 0.60], nrows_ncols=(2,3), axes_pad=0.1)
lbl(gs0[0], 'A')
for i,slicer in enumerate([slicer1, slicer2]):
ax_real = gs0[3*i+0]
ax_fake = gs0[3*i+1]
ax_diff = gs0[3*i+2]
diff0 = s0.get_model_image() - s0.image
diff1 = s1.get_model_image() - s1.image
a = (s0.image - s1.image)
b = (s0.get_model_image() - s1.get_model_image())
c = (diff0 - diff1)
ptp = 0.7*max([np.abs(a).max(), np.abs(b).max(), np.abs(c).max()])
cmap = pl.cm.RdBu_r
ax_real.imshow(a[slicer], cmap=cmap, vmin=-ptp, vmax=ptp)
ax_real.set_xticks([])
ax_real.set_yticks([])
ax_fake.imshow(b[slicer], cmap=cmap, vmin=-ptp, vmax=ptp)
ax_fake.set_xticks([])
ax_fake.set_yticks([])
ax_diff.imshow(c[slicer], cmap=cmap, vmin=-ptp, vmax=ptp)#cmap=pl.cm.RdBu, vmin=-1.0, vmax=1.0)
ax_diff.set_xticks([])
ax_diff.set_yticks([])
if i == 0:
ax_real.set_title(r"$\Delta$ Confocal image", fontsize=24)
ax_fake.set_title(r"$\Delta$ Model image", fontsize=24)
ax_diff.set_title(r"$\Delta$ Difference", fontsize=24)
ax_real.set_ylabel('x-y')
else:
ax_real.set_ylabel('x-z')
#=========================================================================
#=========================================================================
gs1 = GridSpec(1,3, left=0.05, bottom=0.125, right=0.42, top=0.37,
wspace=0.15, hspace=0.05)
spos0 = std0[s0.b_pos].reshape(-1,3)[active0]
spos1 = std1[s1.b_pos].reshape(-1,3)[active1]
srad0 = std0[s0.b_rad][active0]
srad1 = std1[s1.b_rad][active1]
def hist(ax, vals, bins, *args, **kwargs):
y,x = np.histogram(vals, bins=bins)
x = (x[1:] + x[:-1])/2
y /= len(vals)
ax.plot(x,y, *args, **kwargs)
def pp(ind, tarr, tsim, tcrb, var='x'):
bins = 10**np.linspace(-3, 0.0, 30)
bin2 = 10**np.linspace(-3, 0.0, 100)
bins = np.linspace(0.0, 0.2, 30)
bin2 = np.linspace(0.0, 0.2, 100)
xlim = (0.0, 0.12)
#xlim = (1e-3, 1e0)
ylim = (1e-2, 30)
ticks = ticker.FuncFormatter(lambda x, pos: '{:0.0f}'.format(np.log10(x)))
scaler = lambda x: x #np.log10(x)
ax_crb = pl.subplot(gs1[0,ind])
ax_crb.hist(scaler(np.abs(tarr)), bins=bins,
normed=True, alpha=0.7, histtype='stepfilled', lw=1)
ax_crb.hist(scaler(np.abs(tcrb)).ravel(), bins=bin2,
normed=True, alpha=1.0, histtype='step', ls='solid', lw=1.5, color='k')
ax_crb.hist(scaler(np.abs(tsim).ravel()), bins=bin2,
normed=True, alpha=1.0, histtype='step', lw=3)
ax_crb.set_xlabel(r"$\Delta = |%s(t_1) - %s(t_0)|$" % (var,var), fontsize=24)
#ax_crb.semilogx()
ax_crb.set_xlim(xlim)
#ax_crb.semilogy()
#ax_crb.set_ylim(ylim)
#ax_crb.xaxis.set_major_formatter(ticks)
ax_crb.grid(b=False, which='both', axis='both')
if ind == 0:
lbl(ax_crb, 'B')
ax_crb.set_ylabel(r"$P(\Delta)$")
else:
ax_crb.set_yticks([])
ax_crb.locator_params(axis='x', nbins=3)
f,g = 1.5, 1.95
sim = f*sim_crb_diff(spos0[:,1], spos1[:,1][link])
crb = g*sim_crb_diff(crb0[0][:,1][active0], crb1[0][:,1][active1][link])
pp(0, dpos[:,1], sim, crb, 'x')
sim = f*sim_crb_diff(spos0[:,0], spos1[:,0][link])
crb = g*sim_crb_diff(crb0[0][:,0][active0], crb1[0][:,0][active1][link])
pp(1, dpos[:,0], sim, crb, 'z')
sim = f*sim_crb_diff(srad0, srad1[link])
crb = g*sim_crb_diff(crb0[1][active0], crb1[1][active1][link])
pp(2, drad, sim, crb, 'a')
#ax_crb_r.locator_params(axis='both', nbins=3)
#gs1.tight_layout(fig)
#=========================================================================
#=========================================================================
gs2 = GridSpec(2,2, left=0.48, bottom=0.12, right=0.99, top=0.95,
wspace=0.35, hspace=0.35)
ax_hist = pl.subplot(gs2[0,0])
ax_hist.hist(std0[s0.b_pos], bins=np.logspace(-3.0, 0, 50), alpha=0.7, label='POS', histtype='stepfilled')
ax_hist.hist(std0[s0.b_rad], bins=np.logspace(-3.0, 0, 50), alpha=0.7, label='RAD', histtype='stepfilled')
ax_hist.set_xlim((10**-3.0, 1))
ax_hist.semilogx()
ax_hist.set_xlabel(r"$\bar{\sigma}$")
ax_hist.set_ylabel(r"$P(\bar{\sigma})$")
ax_hist.legend(loc='upper right')
lbl(ax_hist, 'C')
imdiff = ((s0.get_model_image() - s0.image)/s0._sigma_field)[s0.image_mask==1.].ravel()
mu = imdiff.mean()
#sig = imdiff.std()
#print mu, sig
x = np.linspace(-5,5,10000)
ax_diff = pl.subplot(gs2[0,1])
ax_diff.plot(x, 1.0/np.sqrt(2*np.pi) * np.exp(-(x-mu)**2 / 2), '-', alpha=0.7, color='k', lw=2)
ax_diff.hist(imdiff, bins=1000, histtype='step', alpha=0.7, normed=True)
ax_diff.semilogy()
ax_diff.set_ylabel(r"$P(\delta)$")
ax_diff.set_xlabel(r"$\delta = (M_i - d_i)/\sigma_i$")
ax_diff.locator_params(axis='x', nbins=5)
ax_diff.grid(b=False, which='minor', axis='y')
ax_diff.set_xlim(-5, 5)
ax_diff.set_ylim(1e-4, 1e0)
lbl(ax_diff, 'D')
pos = mu0[s0.b_pos].reshape(-1,3)
rad = mu0[s0.b_rad]
mask = analyze.trim_box(s0, pos)
pos = pos[mask]
rad = rad[mask]
gx, gy = analyze.gofr(pos, rad, mu0[s0.b_zscale][0], resolution=5e-2,mask_start=0.5)
mask = gx < 5
gx = gx[mask]
gy = gy[mask]
ax_gofr = pl.subplot(gs2[1,0])
ax_gofr.plot(gx, gy, '-', lw=1)
ax_gofr.set_xlabel(r"$r/a$")
ax_gofr.set_ylabel(r"$g(r/a)$")
ax_gofr.locator_params(axis='both', nbins=5)
#ax_gofr.semilogy()
lbl(ax_gofr, 'E')
gx, gy = analyze.gofr(pos, rad, mu0[s0.b_zscale][0], method='surface')
mask = gx < 5
gx = gx[mask]
gy = gy[mask]
gy[gy <= 0.] = gy[gy>0].min()
ax_gofrs = pl.subplot(gs2[1,1])
ax_gofrs.plot(gx, gy, '-', lw=1)
ax_gofrs.set_xlabel(r"$r/a$")
ax_gofrs.set_ylabel(r"$g_{\rm{surface}}(r/a)$")
ax_gofrs.locator_params(axis='both', nbins=5)
ax_gofrs.grid(b=False, which='minor', axis='y')
#ax_gofrs.semilogy()
lbl(ax_gofrs, 'F')
ylim = ax_gofrs.get_ylim()
ax_gofrs.set_ylim(gy.min(), ylim[1])
#gs2.tight_layout(fig)
def crb_rad(state0, samples0, state1, samples1, crb0, crb1):
s0 = state0
s1 = state1
h0 = np.array(samples0)
h1 = np.array(samples1)
mu0 = h0.mean(axis=0)
mu1 = h1.mean(axis=0)
std0 = h0.std(axis=0)
std1 = h1.std(axis=0)
mask0 = (s0.state[s0.b_typ]==1.) & (
analyze.trim_box(s0, mu0[s0.b_pos].reshape(-1,3)))
mask1 = (s1.state[s1.b_typ]==1.) & (
analyze.trim_box(s1, mu1[s1.b_pos].reshape(-1,3)))
active0 = np.arange(s0.N)[mask0]#s0.state[s0.b_typ]==1.]
active1 = np.arange(s1.N)[mask1]#s1.state[s1.b_typ]==1.]
pos0 = mu0[s0.b_pos].reshape(-1,3)[active0]
pos1 = mu1[s1.b_pos].reshape(-1,3)[active1]
rad0 = mu0[s0.b_rad][active0]
rad1 = mu1[s1.b_rad][active1]
link = analyze.nearest(pos0, pos1)
dpos = pos0 - pos1[link]
drad = rad0 - rad1[link]
spos0 = std0[s0.b_pos].reshape(-1,3)[active0]
spos1 = std1[s1.b_pos].reshape(-1,3)[active1]
srad0 = std0[s0.b_rad][active0]
srad1 = std1[s1.b_rad][active1]
def pp(ax, tarr, tsim, tcrb, var='x'):
bins = 10**np.linspace(-3, 0.0, 30)
bin2 = 10**np.linspace(-3, 0.0, 100)
bins = np.linspace(0.0, 0.1, 30)
bin2 = np.linspace(0.0, 0.1, 100)
xlim = (0, 0.1)
#xlim = (1e-3, 1e0)
ylim = (1e-2, 30)
ticks = ticker.FuncFormatter(lambda x, pos: '{:0.0f}'.format(np.log10(x)))
scaler = lambda x: x #np.log10(x)
ax_crb = ax
ax_crb.hist(scaler(np.abs(tarr)), bins=bins,
normed=True, alpha=0.7, histtype='stepfilled', lw=1, label='Radii differences')
y,x = np.histogram(np.abs(tcrb).ravel(), bins=bin2, normed=True)
x = (x[1:] + x[:-1])/2
ax_crb.step(x, y, lw=3, color='k', ls='solid', label='CRB')
y,x = np.histogram(np.abs(tsim).ravel(), bins=bin2, normed=True)
x = (x[1:] + x[:-1])/2
ax_crb.step(x, y, lw=3, ls='solid', label='Estimated Error')
ax_crb.set_xlabel(r"$\Delta = |%s(t_1) - %s(t_0)|$" % (var,var), fontsize=28)
ax_crb.set_ylabel(r"$P(\Delta)$", fontsize=28)
ax_crb.set_xlim(xlim)
ax_crb.grid(b=False, which='both', axis='both')
ax_crb.legend(loc='best', ncol=1)
fig = pl.figure()
ax = pl.gca()
f,g = 1.5, 1.85
sim = f*sim_crb_diff(srad0, srad1[link])
crb = g*sim_crb_diff(crb0[1][active0], crb1[1][active1][link])
pp(ax, drad, sim, crb, 'a')
def twoslice(field, center=None, size=6.0, cmap='bone_r', vmin=0, vmax=1,
orientation='vertical', figpad=1.09, off=0.01):
"""
Plot two parts of the ortho view, the two sections given by ``orientation``.
"""
center = center or [i//2 for i in field.shape]
slices = []
for i,c in enumerate(center):
blank = [np.s_[:]]*len(center)
blank[i] = c
slices.append(tuple(blank))
z,y,x = [float(i) for i in field.shape]
w = float(x + z)
h = float(y + z)
def show(field, ax, slicer, transpose=False):
tmp = field[slicer] if not transpose else field[slicer].T
ax.imshow(
tmp, cmap=cmap, interpolation='nearest',
vmin=vmin, vmax=vmax
)
ax.set_xticks([])
ax.set_yticks([])
ax.grid('off')
if orientation.startswith('v'):
# rect = l,b,w,h
log.info('{} {} {} {} {} {}'.format(x, y, z, w, h, x/h))
r = x/h
q = y/h
f = 1 / (1 + 3*off)
fig = pl.figure(figsize=(size*r, size*f))
ax1 = fig.add_axes((off, f*(1-q)+2*off, f, f*q))
ax2 = fig.add_axes((off, off, f, f*(1-q)))
show(field, ax1, slices[0])
show(field, ax2, slices[1])
else:
# rect = l,b,w,h
r = y/w
q = x/w
f = 1 / (1 + 3*off)
fig = pl.figure(figsize=(size*f, size*r))
ax1 = fig.add_axes((off, off, f*q, f))
ax2 = fig.add_axes((2*off+f*q, off, f*(1-q), f))
show(field, ax1, slices[0])
show(field, ax2, slices[2], transpose=True)
return fig, ax1, ax2
def twoslice_overlay(s, zlayer=None, xlayer=None, size=6.0,
cmap='bone_r', vmin=0, vmax=1, showimage=False, solid=False, pad=None):
pad = pad or s.pad
trim = (np.s_[pad:-pad],)*3
field = s.image[trim]
slicez = zlayer or field.shape[0]//2
slicex = xlayer or field.shape[2]//2
slicer1 = np.s_[slicez,:,:]
slicer2 = np.s_[:,:,slicex]
sh = field.shape
q = float(sh[1]) / (sh[0]+sh[1])
r = float(sh[1] + sh[0]) / sh[1]
fig = pl.figure(figsize=(size, size*r*1.05))
ax1 = fig.add_axes((0, 1-q, 1, q))
ax2 = fig.add_axes((0, 0, 1, 1-q))
mu = s.state.copy()
active = np.arange(s.N)[s.state[s.b_typ]==1.]
pos = mu[s.b_pos].reshape(-1,3)[active]
rad = mu[s.b_rad][active]
def show(ax, slicer):
talpha = 0.0 if not showimage else 1.0
ax.imshow(field[slicer], cmap=cmap, interpolation='nearest', vmin=vmin, vmax=vmax, alpha=talpha)
ax.set_xticks([])
ax.set_yticks([])
ax.set_axis_bgcolor('black')
ax.grid('off')
def circles(ax, layer, axis):
# get the index of the particles we want to include
talpha = 1.0 if not showimage else 0.8
cedge = 'white' if not showimage else 'black'
cface = 'white' if solid else 'none'
particles = np.arange(len(pos))[np.abs(pos[:,axis] - layer) < rad]
# for each of these particles display the effective radius
# in the proper place
for i in particles:
p = pos[i].copy()
r = 2*np.sqrt(rad[i]**2 - (p[axis] - layer)**2)
if axis==0:
c = Circle((p[2]-pad,p[1]-pad), radius=r/2, fc=cface, ec=cedge, alpha=talpha)
if axis==2:
c = Circle((p[1]-pad,p[0]-pad), radius=r/2, fc=cface, ec=cedge, alpha=talpha)
ax.add_patch(c)
show(ax1, slicer1)
show(ax2, slicer2)
circles(ax1, slicez+pad, 0)
circles(ax2, slicex+pad, 2)
def deconstruction(s):
s.model_to_true_image()
twoslice(s.image, pad=s.pad)
twoslice(s.get_model_image(), pad=s.pad)
twoslice(s.ilm.get_field() - s.offset*s.obj.get_field(), pad=s.pad, vmin=None, vmax=None)
twoslice(1 - s.offset*s.obj.get_field(), pad=s.pad)
twoslice_overlay(s)
def circles(st, layer, axis, ax=None, talpha=1.0, cedge='white', cface='white'):
"""
Plots a set of circles corresponding to a slice through the platonic
structure. Copied from twoslice_overlay with comments, standaloneness.
Inputs
------
pos : array of particle positions; [N,3]
rad : array of particle radii; [N]
ax : plt.axis instance
layer : Which layer of the slice to use.
axis : The slice of the image, 0, 1, or 2.
cedge : edge color
cface : face color
talpha : Alpha of the thing
"""
pos = st.obj_get_positions()
rad = st.obj_get_radii()
shape = st.ishape.shape.tolist()
shape.pop(axis) #shape is now the shape of the image
if ax is None:
fig = plt.figure()
axisbg = 'white' if cface == 'black' else 'black'
sx, sy = ((1,shape[1]/float(shape[0])) if shape[0] > shape[1] else
(shape[0]/float(shape[1]), 1))
ax = fig.add_axes((0,0, sx, sy), axisbg=axisbg)
# get the index of the particles we want to include
particles = np.arange(len(pos))[np.abs(pos[:,axis] - layer) < rad]
# for each of these particles display the effective radius
# in the proper place
scale = 1.0 #np.max(shape).astype('float')
for i in particles:
p = pos[i].copy()
r = 2*np.sqrt(rad[i]**2 - (p[axis] - layer)**2)
#CIRCLE IS IN FIGURE COORDINATES!!!
if axis==0:
ix = 1; iy = 2
elif axis == 1:
ix = 0; iy = 2
elif axis==2:
ix = 0; iy = 1
c = Circle((p[ix]/scale, p[iy]/scale), radius=r/2/scale, fc=cface,
ec=cedge, alpha=talpha)
ax.add_patch(c)
# plt.axis([0,1,0,1])
plt.axis('equal') #circles not ellipses
return ax
|
super7ramp/vertaal
|
refs/heads/master
|
projects/views.py
|
2
|
# -*- coding: utf-8 -*-
"""Copyright (c) 2012 Sergio Gabriel Teves
All rights reserved.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import threading
import os
from django.utils.translation import ugettext as _
from django.contrib.auth.decorators import login_required, permission_required
from django.core.urlresolvers import reverse
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from common.decorators import permission_required_with_403
from common.middleware.exceptions import Http403
from projects.forms import ProjectForm
from components.models import Component
from releases.models import Release
from languages.models import Language
from models import Project
from versioncontrol.manager import Manager, POTUpdater
from versioncontrol.models import BuildCache
from common.utils import lock
from django.contrib.auth.models import User
from projects.util import get_build_log_file, check_project
from files.models import POTFile, POFile
from django.contrib import messages
from djangopm.utils import send_pm
from django.views.generic.detail import DetailView
import logging
logger = logging.getLogger('vertaal.projects')
@login_required
def project_create_update(request, slug=None):
res = {}
if request.method == 'POST':
if slug:
p = get_object_or_404(Project, slug=slug)
res['project']=p
form = ProjectForm(request.POST, instance=p)
else:
form = ProjectForm(request.POST)
if form.is_valid():
if not unicode(request.user.id) in form.cleaned_data["maintainers"]:
form.cleaned_data["maintainers"].append(unicode(request.user.id))
p = form.save(True)
if slug:
messages.success(request, _('Updated project %s') % p.name)
else:
messages.success(request, _('Created project %s') % p.name)
return HttpResponseRedirect(p.get_absolute_url())
messages.warning(request, _('Please, correct the following errors and try again.'))
res['form'] = form
else:
if slug:
p = get_object_or_404(Project, slug=slug)
# CHECK IF THE USER CAN EDIT THIS PROJECT
if not p.is_maintainer(request.user):
raise Http403
res['project']=p
form = ProjectForm(instance=p)
else:
if not request.user.has_perm('projects.can_add'):
raise Http403
form = ProjectForm(initial={'maintainers': [ request.user.pk ]})
res['form'] = form
return render_to_response("projects/project_form.html",
res,
context_instance = RequestContext(request))
@permission_required_with_403('projects.can_delete')
def project_delete(request, slug=None):
'''
projects can be deleted by staff members only
'''
p = get_object_or_404(Project, slug=slug)
p.delete()
messages.success(request, _('Removed project %s') % p.name)
return HttpResponseRedirect(reverse('project_list'))
@login_required
def project_build(request, release_slug):
release = get_object_or_404(Release, slug=release_slug)
if not release.project.is_maintainer(request.user):
raise Http403
back = HttpResponseRedirect(reverse('release_detail',kwargs={'slug': release_slug}))
try:
b = BuildCache.objects.get_locked(release)
if b.count()>0:
messages.warning(request, message=_('Build is already running.'))
return back
except:
pass
#__build_repo(release.project, release, component, request.user, b)
t = threading.Thread(target=__build_repo,
name="build_%s" % release.slug,
kwargs={'project': release.project,
'release': release,
'user': request.user})
t.start()
messages.info(request, message=_('Build started.'))
return back
def __build_repo(project, release, user):
logfile = get_build_log_file(project.slug, release.slug)
if os.path.exists(logfile):
try:
os.unlink(logfile)
except Exception, u:
logger.error("Unable to remove current logfile [%s]." % logfile)
# # create a lock for console
# lockfile = logfile + ".lock"
# open(lockfile).close()
#
# the components will remain locked for all the process
locks = []
try:
for component in project.components.all():
canbuild = True
rev = None
try:
b = BuildCache.objects.get(component=component,
release=release)
if b.is_locked:
canbuild = False
send_pm(user, subject=_('Component %s is locked at this moment.') % component.name)
logger.debug('Component %s is locked.' % component.name)
else:
b.lock()
except BuildCache.DoesNotExist:
b = BuildCache.objects.create(component=component,
release=release)
b.lock()
except Exception, e:
send_pm(user, _("Build error"), str(e))
logger.error(e)
raise
if canbuild:
locks.append(b)
for team in project.teams.all():
if (POFile.objects.filter(release=release,
component=component,
language=team.language).count()) == 0:
logger.debug("New language %s" % team.language)
new_team = True
else:
logger.debug("Rebuild language %s" % team.language)
new_team = False
repo = Manager(project, release, component, team.language, logfile)
try:
b.setrev(repo.build())
# send_pm(user, subject=_('Build of %(component)s on release %(release)s for team %(team)s completed.')
# % {'component': component.name,
# 'release': release.name,
# 'team': team.language.name})
except lock.LockException, l:
repo.notify_callback(l)
logger.error(l)
send_pm(user, _("Project locked"), message=_('Project locked for %(team)s. Try again in a few minutes. If the problem persist contact the administrator.') % {
'team': team.language.name})
except Exception, e:
repo.notify_callback(e)
logger.error(e)
send_pm(user, _('Error building team %(team)s.') % {'team': team.language.name}, _('Reason: %(error)s') % {'error': e.args} )
finally:
del repo
send_pm(user, _('Finished build cache for component %(component)s.') %
{'component': component.name})
if component.potlocation:
repo = POTUpdater(project, release, component, logfile)
try:
repo.build()
if new_team:
potfiles = POTFile.objects.filter(release=release,
component=component)
for potfile in potfiles:
repo.add_pofiles(potfile)
except lock.LockException, l:
repo.notify_callback(l)
logger.error(l)
send_pm(user, _("Project locked"), message=_('Project %s locked. Try again in a few minutes. If the problem persist contact the administrator.') % project.name)
except Exception, e:
repo.notify_callback(e)
logger.error(e)
send_pm(user, _("Build error"), message=_('Build error. Reason: %(error)s') % {
'error': e.args})
finally:
del repo
except Exception, e:
logger.error(e)
finally:
# unlock all components
for l in locks:
l.unlock()
try:
del repo
except:
pass
message=_('Finished build cache for release %(release)s.') % {'release': release.name}
send_pm(user, "Build complete", message=message)
user.email_user(_('Build complete'), message)
def project_list(request):
plist = Project.objects.by_authorized(request.user)
return render_to_response('projects/project_list.html',
{'project_list': plist},
context_instance = RequestContext(request))
def project_detail(request, slug):
p = get_object_or_404(Project, slug=slug)
if not p.enabled:
if p.is_maintainer(request.user):
messages.info(request, message=_('This project is disabled. Only maintainers can view it.'))
else:
raise Http403
data = {'project': p}
if p.is_maintainer(request.user):
data['languages'] = Language.objects.get_unused(p)
return render_to_response('projects/project_detail.html',
data,
context_instance = RequestContext(request))
|
mattrobenolt/django
|
refs/heads/master
|
tests/gis_tests/geo3d/views.py
|
6027
|
# Create your views here.
|
s0enke/boto
|
refs/heads/develop
|
tests/integration/datapipeline/test_layer1.py
|
136
|
#!/usr/bin/env python
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import time
from tests.unit import unittest
from boto.datapipeline import layer1
class TestDataPipeline(unittest.TestCase):
datapipeline = True
def setUp(self):
self.connection = layer1.DataPipelineConnection()
self.sample_pipeline_objects = [
{'fields': [
{'key': 'workerGroup', 'stringValue': 'MyworkerGroup'}],
'id': 'Default',
'name': 'Default'},
{'fields': [
{'key': 'startDateTime', 'stringValue': '2012-09-25T17:00:00'},
{'key': 'type', 'stringValue': 'Schedule'},
{'key': 'period', 'stringValue': '1 hour'},
{'key': 'endDateTime', 'stringValue': '2012-09-25T18:00:00'}],
'id': 'Schedule',
'name': 'Schedule'},
{'fields': [
{'key': 'type', 'stringValue': 'ShellCommandActivity'},
{'key': 'command', 'stringValue': 'echo hello'},
{'key': 'parent', 'refValue': 'Default'},
{'key': 'schedule', 'refValue': 'Schedule'}],
'id': 'SayHello',
'name': 'SayHello'}
]
self.connection.auth_service_name = 'datapipeline'
def create_pipeline(self, name, unique_id, description=None):
response = self.connection.create_pipeline(name, unique_id,
description)
pipeline_id = response['pipelineId']
self.addCleanup(self.connection.delete_pipeline, pipeline_id)
return pipeline_id
def get_pipeline_state(self, pipeline_id):
response = self.connection.describe_pipelines([pipeline_id])
for attr in response['pipelineDescriptionList'][0]['fields']:
if attr['key'] == '@pipelineState':
return attr['stringValue']
def test_can_create_and_delete_a_pipeline(self):
response = self.connection.create_pipeline('name', 'unique_id',
'description')
self.connection.delete_pipeline(response['pipelineId'])
def test_validate_pipeline(self):
pipeline_id = self.create_pipeline('name2', 'unique_id2')
self.connection.validate_pipeline_definition(
self.sample_pipeline_objects, pipeline_id)
def test_put_pipeline_definition(self):
pipeline_id = self.create_pipeline('name3', 'unique_id3')
self.connection.put_pipeline_definition(self.sample_pipeline_objects,
pipeline_id)
# We should now be able to get the pipeline definition and see
# that it matches what we put.
response = self.connection.get_pipeline_definition(pipeline_id)
objects = response['pipelineObjects']
self.assertEqual(len(objects), 3)
self.assertEqual(objects[0]['id'], 'Default')
self.assertEqual(objects[0]['name'], 'Default')
self.assertEqual(objects[0]['fields'],
[{'key': 'workerGroup', 'stringValue': 'MyworkerGroup'}])
def test_activate_pipeline(self):
pipeline_id = self.create_pipeline('name4', 'unique_id4')
self.connection.put_pipeline_definition(self.sample_pipeline_objects,
pipeline_id)
self.connection.activate_pipeline(pipeline_id)
attempts = 0
state = self.get_pipeline_state(pipeline_id)
while state != 'SCHEDULED' and attempts < 10:
time.sleep(10)
attempts += 1
state = self.get_pipeline_state(pipeline_id)
if attempts > 10:
self.fail("Pipeline did not become scheduled "
"after 10 attempts.")
objects = self.connection.describe_objects(['Default'], pipeline_id)
field = objects['pipelineObjects'][0]['fields'][0]
self.assertDictEqual(field, {'stringValue': 'COMPONENT', 'key': '@sphere'})
def test_list_pipelines(self):
pipeline_id = self.create_pipeline('name5', 'unique_id5')
pipeline_id_list = [p['id'] for p in
self.connection.list_pipelines()['pipelineIdList']]
self.assertTrue(pipeline_id in pipeline_id_list)
if __name__ == '__main__':
unittest.main()
|
dliessi/frescobaldi
|
refs/heads/master
|
frescobaldi_app/scorewiz/parts/brass.py
|
3
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
Brass part types.
"""
from . import _base
from . import register
class BrassPart(_base.SingleVoicePart):
"""Base class for brass types."""
class HornF(BrassPart):
@staticmethod
def title(_=_base.translate):
return _("Horn in F")
@staticmethod
def short(_=_base.translate):
return _("abbreviation for Horn in F", "Hn.F.")
midiInstrument = 'french horn'
transposition = (-1, 3, 0)
class TrumpetC(BrassPart):
@staticmethod
def title(_=_base.translate):
return _("Trumpet in C")
@staticmethod
def short(_=_base.translate):
return _("abbreviation for Trumpet in C", "Tr.C.")
midiInstrument = 'trumpet'
class TrumpetBb(TrumpetC):
@staticmethod
def title(_=_base.translate):
return _("Trumpet in Bb")
@staticmethod
def short(_=_base.translate):
return _("abbreviation for Trumpet in Bb", "Tr.Bb.")
transposition = (-1, 6, -1)
class CornetBb(BrassPart):
@staticmethod
def title(_=_base.translate):
return _("Cornet in Bb")
@staticmethod
def short(_=_base.translate):
return _("abbreviation for Cornet in Bb", "Crt.Bb.")
transposition = (-1, 6, -1)
class Flugelhorn(BrassPart):
@staticmethod
def title(_=_base.translate):
return _("Flugelhorn")
@staticmethod
def short(_=_base.translate):
return _("abbreviation for Flugelhorn", "Fgh.")
midiInstrument = 'trumpet'
class Mellophone(BrassPart):
@staticmethod
def title(_=_base.translate):
return _("Mellophone")
@staticmethod
def short(_=_base.translate):
return _("abbreviation for Mellophone", "Mph.")
midiInstrument = 'french horn'
transposition = (-1, 3, 0)
class Trombone(BrassPart):
@staticmethod
def title(_=_base.translate):
return _("Trombone")
@staticmethod
def short(_=_base.translate):
return _("abbreviation for Trombone", "Trb.")
midiInstrument = 'trombone'
clef = 'bass'
octave = -1
class Baritone(BrassPart):
@staticmethod
def title(_=_base.translate):
return _("Baritone")
@staticmethod
def short(_=_base.translate):
return _("abbreviation for Baritone", "Bar.")
midiInstrument = 'trombone'
clef = 'bass'
octave = -1
class Euphonium(BrassPart):
@staticmethod
def title(_=_base.translate):
return _("Euphonium")
@staticmethod
def short(_=_base.translate):
return _("abbreviation for Euphonium", "Euph.")
midiInstrument = 'trombone'
clef = 'bass'
octave = -1
class Tuba(BrassPart):
@staticmethod
def title(_=_base.translate):
return _("Tuba")
@staticmethod
def short(_=_base.translate):
return _("abbreviation for Tuba", "Tb.")
midiInstrument = 'tuba'
transposition = (-2, 6, -1)
class BassTuba(BrassPart):
@staticmethod
def title(_=_base.translate):
return _("Bass Tuba")
@staticmethod
def short(_=_base.translate):
return _("abbreviation for Bass Tuba", "B.Tb.")
midiInstrument = 'tuba'
clef = 'bass'
octave = -1
transposition = (-2, 0, 0)
register(
lambda: _("Brass"),
[
HornF,
TrumpetC,
TrumpetBb,
CornetBb,
Flugelhorn,
Mellophone,
Trombone,
Baritone,
Euphonium,
Tuba,
BassTuba,
])
|
aajanki/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/youjizz.py
|
148
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class YouJizzIE(InfoExtractor):
_VALID_URL = r'https?://(?:\w+\.)?youjizz\.com/videos/[^/#?]+-(?P<id>[0-9]+)\.html(?:$|[?#])'
_TEST = {
'url': 'http://www.youjizz.com/videos/zeichentrick-1-2189178.html',
'md5': '07e15fa469ba384c7693fd246905547c',
'info_dict': {
'id': '2189178',
'ext': 'flv',
"title": "Zeichentrick 1",
"age_limit": 18,
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
age_limit = self._rta_search(webpage)
video_title = self._html_search_regex(
r'<title>\s*(.*)\s*</title>', webpage, 'title')
embed_page_url = self._search_regex(
r'(https?://www.youjizz.com/videos/embed/[0-9]+)',
webpage, 'embed page')
webpage = self._download_webpage(
embed_page_url, video_id, note='downloading embed page')
# Get the video URL
m_playlist = re.search(r'so.addVariable\("playlist", ?"(?P<playlist>.+?)"\);', webpage)
if m_playlist is not None:
playlist_url = m_playlist.group('playlist')
playlist_page = self._download_webpage(playlist_url, video_id,
'Downloading playlist page')
m_levels = list(re.finditer(r'<level bitrate="(\d+?)" file="(.*?)"', playlist_page))
if len(m_levels) == 0:
raise ExtractorError('Unable to extract video url')
videos = [(int(m.group(1)), m.group(2)) for m in m_levels]
(_, video_url) = sorted(videos)[0]
video_url = video_url.replace('%252F', '%2F')
else:
video_url = self._search_regex(r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);',
webpage, 'video URL')
return {
'id': video_id,
'url': video_url,
'title': video_title,
'ext': 'flv',
'format': 'flv',
'player_url': embed_page_url,
'age_limit': age_limit,
}
|
cherez/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/rtlnl.py
|
102
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_duration,
)
class RtlNlIE(InfoExtractor):
IE_NAME = 'rtl.nl'
IE_DESC = 'rtl.nl and rtlxl.nl'
_VALID_URL = r'''(?x)
https?://(?:www\.)?
(?:
rtlxl\.nl/\#!/[^/]+/|
rtl\.nl/system/videoplayer/(?:[^/]+/)+(?:video_)?embed\.html\b.+?\buuid=
)
(?P<id>[0-9a-f-]+)'''
_TESTS = [{
'url': 'http://www.rtlxl.nl/#!/rtl-nieuws-132237/6e4203a6-0a5e-3596-8424-c599a59e0677',
'md5': 'cc16baa36a6c169391f0764fa6b16654',
'info_dict': {
'id': '6e4203a6-0a5e-3596-8424-c599a59e0677',
'ext': 'mp4',
'title': 'RTL Nieuws - Laat',
'description': 'md5:6b61f66510c8889923b11f2778c72dc5',
'timestamp': 1408051800,
'upload_date': '20140814',
'duration': 576.880,
},
}, {
'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed/autoplay=false',
'md5': 'dea7474214af1271d91ef332fb8be7ea',
'info_dict': {
'id': '84ae5571-ac25-4225-ae0c-ef8d9efb2aed',
'ext': 'mp4',
'timestamp': 1424039400,
'title': 'RTL Nieuws - Nieuwe beelden Kopenhagen: chaos direct na aanslag',
'thumbnail': 're:^https?://screenshots\.rtl\.nl/system/thumb/sz=[0-9]+x[0-9]+/uuid=84ae5571-ac25-4225-ae0c-ef8d9efb2aed$',
'upload_date': '20150215',
'description': 'Er zijn nieuwe beelden vrijgegeven die vlak na de aanslag in Kopenhagen zijn gemaakt. Op de video is goed te zien hoe omstanders zich bekommeren om één van de slachtoffers, terwijl de eerste agenten ter plaatse komen.',
}
}, {
# empty synopsis and missing episodes (see https://github.com/rg3/youtube-dl/issues/6275)
'url': 'http://www.rtl.nl/system/videoplayer/derden/rtlnieuws/video_embed.html#uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a/autoplay=false',
'info_dict': {
'id': 'f536aac0-1dc3-4314-920e-3bd1c5b3811a',
'ext': 'mp4',
'title': 'RTL Nieuws - Meer beelden van overval juwelier',
'thumbnail': 're:^https?://screenshots\.rtl\.nl/system/thumb/sz=[0-9]+x[0-9]+/uuid=f536aac0-1dc3-4314-920e-3bd1c5b3811a$',
'timestamp': 1437233400,
'upload_date': '20150718',
'duration': 30.474,
},
'params': {
'skip_download': True,
},
}, {
# encrypted m3u8 streams, georestricted
'url': 'http://www.rtlxl.nl/#!/afl-2-257632/52a74543-c504-4cde-8aa8-ec66fe8d68a7',
'only_matching': True,
}, {
'url': 'http://www.rtl.nl/system/videoplayer/derden/embed.html#!/uuid=bb0353b0-d6a4-1dad-90e9-18fe75b8d1f0',
'only_matching': True,
}]
def _real_extract(self, url):
uuid = self._match_id(url)
info = self._download_json(
'http://www.rtl.nl/system/s4m/vfd/version=2/uuid=%s/fmt=adaptive/' % uuid,
uuid)
material = info['material'][0]
title = info['abstracts'][0]['name']
subtitle = material.get('title')
if subtitle:
title += ' - %s' % subtitle
description = material.get('synopsis')
meta = info.get('meta', {})
# m3u8 streams are encrypted and may not be handled properly by older ffmpeg/avconv.
# To workaround this previously adaptive -> flash trick was used to obtain
# unencrypted m3u8 streams (see https://github.com/rg3/youtube-dl/issues/4118)
# and bypass georestrictions as well.
# Currently, unencrypted m3u8 playlists are (intentionally?) invalid and therefore
# unusable albeit can be fixed by simple string replacement (see
# https://github.com/rg3/youtube-dl/pull/6337)
# Since recent ffmpeg and avconv handle encrypted streams just fine encrypted
# streams are used now.
videopath = material['videopath']
m3u8_url = meta.get('videohost', 'http://manifest.us.rtl.nl') + videopath
formats = self._extract_m3u8_formats(m3u8_url, uuid, ext='mp4')
video_urlpart = videopath.split('/adaptive/')[1][:-5]
PG_URL_TEMPLATE = 'http://pg.us.rtl.nl/rtlxl/network/%s/progressive/%s.mp4'
formats.extend([
{
'url': PG_URL_TEMPLATE % ('a2m', video_urlpart),
'format_id': 'pg-sd',
},
{
'url': PG_URL_TEMPLATE % ('a3m', video_urlpart),
'format_id': 'pg-hd',
'quality': 0,
}
])
self._sort_formats(formats)
thumbnails = []
for p in ('poster_base_url', '"thumb_base_url"'):
if not meta.get(p):
continue
thumbnails.append({
'url': self._proto_relative_url(meta[p] + uuid),
'width': int_or_none(self._search_regex(
r'/sz=([0-9]+)', meta[p], 'thumbnail width', fatal=False)),
'height': int_or_none(self._search_regex(
r'/sz=[0-9]+x([0-9]+)',
meta[p], 'thumbnail height', fatal=False))
})
return {
'id': uuid,
'title': title,
'formats': formats,
'timestamp': material['original_date'],
'description': description,
'duration': parse_duration(material.get('duration')),
'thumbnails': thumbnails,
}
|
PennyDreadfulMTG/Penny-Dreadful-Discord-Bot
|
refs/heads/master
|
decksite/data/models/person.py
|
1
|
from typing import List
from decksite.data import deck
from shared.container import Container
class Person(Container):
__decks = None
@property
def decks(self) -> List[deck.Deck]:
if self.__decks is None:
self.__decks = deck.load_decks(f'd.person_id = {self.id}', season_id=self.season_id)
return self.__decks
|
Thraxis/SickRage
|
refs/heads/master
|
lib/unidecode/x04d.py
|
252
|
data = (
'[?] ', # 0x00
'[?] ', # 0x01
'[?] ', # 0x02
'[?] ', # 0x03
'[?] ', # 0x04
'[?] ', # 0x05
'[?] ', # 0x06
'[?] ', # 0x07
'[?] ', # 0x08
'[?] ', # 0x09
'[?] ', # 0x0a
'[?] ', # 0x0b
'[?] ', # 0x0c
'[?] ', # 0x0d
'[?] ', # 0x0e
'[?] ', # 0x0f
'[?] ', # 0x10
'[?] ', # 0x11
'[?] ', # 0x12
'[?] ', # 0x13
'[?] ', # 0x14
'[?] ', # 0x15
'[?] ', # 0x16
'[?] ', # 0x17
'[?] ', # 0x18
'[?] ', # 0x19
'[?] ', # 0x1a
'[?] ', # 0x1b
'[?] ', # 0x1c
'[?] ', # 0x1d
'[?] ', # 0x1e
'[?] ', # 0x1f
'[?] ', # 0x20
'[?] ', # 0x21
'[?] ', # 0x22
'[?] ', # 0x23
'[?] ', # 0x24
'[?] ', # 0x25
'[?] ', # 0x26
'[?] ', # 0x27
'[?] ', # 0x28
'[?] ', # 0x29
'[?] ', # 0x2a
'[?] ', # 0x2b
'[?] ', # 0x2c
'[?] ', # 0x2d
'[?] ', # 0x2e
'[?] ', # 0x2f
'[?] ', # 0x30
'[?] ', # 0x31
'[?] ', # 0x32
'[?] ', # 0x33
'[?] ', # 0x34
'[?] ', # 0x35
'[?] ', # 0x36
'[?] ', # 0x37
'[?] ', # 0x38
'[?] ', # 0x39
'[?] ', # 0x3a
'[?] ', # 0x3b
'[?] ', # 0x3c
'[?] ', # 0x3d
'[?] ', # 0x3e
'[?] ', # 0x3f
'[?] ', # 0x40
'[?] ', # 0x41
'[?] ', # 0x42
'[?] ', # 0x43
'[?] ', # 0x44
'[?] ', # 0x45
'[?] ', # 0x46
'[?] ', # 0x47
'[?] ', # 0x48
'[?] ', # 0x49
'[?] ', # 0x4a
'[?] ', # 0x4b
'[?] ', # 0x4c
'[?] ', # 0x4d
'[?] ', # 0x4e
'[?] ', # 0x4f
'[?] ', # 0x50
'[?] ', # 0x51
'[?] ', # 0x52
'[?] ', # 0x53
'[?] ', # 0x54
'[?] ', # 0x55
'[?] ', # 0x56
'[?] ', # 0x57
'[?] ', # 0x58
'[?] ', # 0x59
'[?] ', # 0x5a
'[?] ', # 0x5b
'[?] ', # 0x5c
'[?] ', # 0x5d
'[?] ', # 0x5e
'[?] ', # 0x5f
'[?] ', # 0x60
'[?] ', # 0x61
'[?] ', # 0x62
'[?] ', # 0x63
'[?] ', # 0x64
'[?] ', # 0x65
'[?] ', # 0x66
'[?] ', # 0x67
'[?] ', # 0x68
'[?] ', # 0x69
'[?] ', # 0x6a
'[?] ', # 0x6b
'[?] ', # 0x6c
'[?] ', # 0x6d
'[?] ', # 0x6e
'[?] ', # 0x6f
'[?] ', # 0x70
'[?] ', # 0x71
'[?] ', # 0x72
'[?] ', # 0x73
'[?] ', # 0x74
'[?] ', # 0x75
'[?] ', # 0x76
'[?] ', # 0x77
'[?] ', # 0x78
'[?] ', # 0x79
'[?] ', # 0x7a
'[?] ', # 0x7b
'[?] ', # 0x7c
'[?] ', # 0x7d
'[?] ', # 0x7e
'[?] ', # 0x7f
'[?] ', # 0x80
'[?] ', # 0x81
'[?] ', # 0x82
'[?] ', # 0x83
'[?] ', # 0x84
'[?] ', # 0x85
'[?] ', # 0x86
'[?] ', # 0x87
'[?] ', # 0x88
'[?] ', # 0x89
'[?] ', # 0x8a
'[?] ', # 0x8b
'[?] ', # 0x8c
'[?] ', # 0x8d
'[?] ', # 0x8e
'[?] ', # 0x8f
'[?] ', # 0x90
'[?] ', # 0x91
'[?] ', # 0x92
'[?] ', # 0x93
'[?] ', # 0x94
'[?] ', # 0x95
'[?] ', # 0x96
'[?] ', # 0x97
'[?] ', # 0x98
'[?] ', # 0x99
'[?] ', # 0x9a
'[?] ', # 0x9b
'[?] ', # 0x9c
'[?] ', # 0x9d
'[?] ', # 0x9e
'[?] ', # 0x9f
'[?] ', # 0xa0
'[?] ', # 0xa1
'[?] ', # 0xa2
'[?] ', # 0xa3
'[?] ', # 0xa4
'[?] ', # 0xa5
'[?] ', # 0xa6
'[?] ', # 0xa7
'[?] ', # 0xa8
'[?] ', # 0xa9
'[?] ', # 0xaa
'[?] ', # 0xab
'[?] ', # 0xac
'[?] ', # 0xad
'[?] ', # 0xae
'[?] ', # 0xaf
'[?] ', # 0xb0
'[?] ', # 0xb1
'[?] ', # 0xb2
'[?] ', # 0xb3
'[?] ', # 0xb4
'[?] ', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
|
tanmaykm/edx-platform
|
refs/heads/master
|
openedx/core/lib/block_structure/tests/test_block_structure.py
|
5
|
"""
Tests for block_structure.py
"""
# pylint: disable=protected-access
from collections import namedtuple
from copy import deepcopy
import ddt
import itertools
from nose.plugins.attrib import attr
from unittest import TestCase
from openedx.core.lib.graph_traversals import traverse_post_order
from ..block_structure import BlockStructure, BlockStructureModulestoreData
from ..exceptions import TransformerException
from .helpers import MockXBlock, MockTransformer, ChildrenMapTestMixin
@attr(shard=2)
@ddt.ddt
class TestBlockStructure(TestCase, ChildrenMapTestMixin):
"""
Tests for BlockStructure
"""
@ddt.data(
[],
ChildrenMapTestMixin.SIMPLE_CHILDREN_MAP,
ChildrenMapTestMixin.LINEAR_CHILDREN_MAP,
ChildrenMapTestMixin.DAG_CHILDREN_MAP,
)
def test_relations(self, children_map):
block_structure = self.create_block_structure(children_map, BlockStructure)
# get_children
for parent, children in enumerate(children_map):
self.assertSetEqual(set(block_structure.get_children(parent)), set(children))
# get_parents
for child, parents in enumerate(self.get_parents_map(children_map)):
self.assertSetEqual(set(block_structure.get_parents(child)), set(parents))
# __contains__
for node in range(len(children_map)):
self.assertIn(node, block_structure)
self.assertNotIn(len(children_map) + 1, block_structure)
@attr(shard=2)
@ddt.ddt
class TestBlockStructureData(TestCase, ChildrenMapTestMixin):
"""
Tests for BlockStructureBlockData and BlockStructureModulestoreData
"""
def test_non_versioned_transformer(self):
class TestNonVersionedTransformer(MockTransformer):
"""
Test transformer with default version number (0).
"""
VERSION = 0
block_structure = BlockStructureModulestoreData(root_block_usage_key=0)
with self.assertRaisesRegexp(TransformerException, "VERSION attribute is not set"):
block_structure._add_transformer(TestNonVersionedTransformer())
def test_transformer_data(self):
# transformer test cases
TransformerInfo = namedtuple("TransformerInfo", "transformer structure_wide_data block_specific_data") # pylint: disable=invalid-name
transformers_info = [
TransformerInfo(
transformer=MockTransformer(),
structure_wide_data=[("t1.global1", "t1.g.val1"), ("t1.global2", "t1.g.val2")],
block_specific_data={
"B1": [("t1.key1", "t1.b1.val1"), ("t1.key2", "t1.b1.val2")],
"B2": [("t1.key1", "t1.b2.val1"), ("t1.key2", "t1.b2.val2")],
"B3": [("t1.key1", True), ("t1.key2", False)],
"B4": [("t1.key1", None), ("t1.key2", False)],
},
),
TransformerInfo(
transformer=MockTransformer(),
structure_wide_data=[("t2.global1", "t2.g.val1"), ("t2.global2", "t2.g.val2")],
block_specific_data={
"B1": [("t2.key1", "t2.b1.val1"), ("t2.key2", "t2.b1.val2")],
"B2": [("t2.key1", "t2.b2.val1"), ("t2.key2", "t2.b2.val2")],
},
),
]
# create block structure
block_structure = BlockStructureModulestoreData(root_block_usage_key=0)
# set transformer data
for t_info in transformers_info:
block_structure._add_transformer(t_info.transformer)
for key, val in t_info.structure_wide_data:
block_structure.set_transformer_data(t_info.transformer, key, val)
for block, block_data in t_info.block_specific_data.iteritems():
for key, val in block_data:
block_structure.set_transformer_block_field(block, t_info.transformer, key, val)
# verify transformer data
for t_info in transformers_info:
self.assertEquals(
block_structure._get_transformer_data_version(t_info.transformer),
MockTransformer.VERSION
)
for key, val in t_info.structure_wide_data:
self.assertEquals(
block_structure.get_transformer_data(t_info.transformer, key),
val,
)
for block, block_data in t_info.block_specific_data.iteritems():
for key, val in block_data:
self.assertEquals(
block_structure.get_transformer_block_field(block, t_info.transformer, key),
val,
)
def test_xblock_data(self):
# block test cases
blocks = [
MockXBlock("A", {}),
MockXBlock("B", {"field1": "B.val1"}),
MockXBlock("C", {"field1": "C.val1", "field2": "C.val2"}),
MockXBlock("D", {"field1": True, "field2": False}),
MockXBlock("E", {"field1": None, "field2": False}),
]
# add each block
block_structure = BlockStructureModulestoreData(root_block_usage_key=0)
for block in blocks:
block_structure._add_xblock(block.location, block)
# request fields
fields = ["field1", "field2", "field3"]
block_structure.request_xblock_fields(*fields)
# verify fields have not been collected yet
for block in blocks:
bs_block = block_structure[block.location]
for field in fields:
self.assertIsNone(getattr(bs_block, field, None))
# collect fields
block_structure._collect_requested_xblock_fields()
# verify values of collected fields
for block in blocks:
bs_block = block_structure[block.location]
for field in fields:
self.assertEquals(
getattr(bs_block, field, None),
block.field_map.get(field),
)
@ddt.data(
*itertools.product(
[True, False],
range(7),
[
ChildrenMapTestMixin.SIMPLE_CHILDREN_MAP,
ChildrenMapTestMixin.LINEAR_CHILDREN_MAP,
ChildrenMapTestMixin.DAG_CHILDREN_MAP,
],
)
)
@ddt.unpack
def test_remove_block(self, keep_descendants, block_to_remove, children_map):
### skip test if invalid
if (block_to_remove >= len(children_map)) or (keep_descendants and block_to_remove == 0):
return
### create structure
block_structure = self.create_block_structure(children_map)
parents_map = self.get_parents_map(children_map)
### verify blocks pre-exist
self.assert_block_structure(block_structure, children_map)
### remove block
block_structure.remove_block(block_to_remove, keep_descendants)
missing_blocks = [block_to_remove]
### compute and verify updated children_map
removed_children_map = deepcopy(children_map)
removed_children_map[block_to_remove] = []
for parent in parents_map[block_to_remove]:
removed_children_map[parent].remove(block_to_remove)
if keep_descendants:
# update the graph connecting the old parents to the old children
for child in children_map[block_to_remove]:
for parent in parents_map[block_to_remove]:
removed_children_map[parent].append(child)
self.assert_block_structure(block_structure, removed_children_map, missing_blocks)
### prune the structure
block_structure._prune_unreachable()
### compute and verify updated children_map
pruned_children_map = deepcopy(removed_children_map)
if not keep_descendants:
pruned_parents_map = self.get_parents_map(pruned_children_map)
# update all descendants
for child in children_map[block_to_remove]:
# if the child has another parent, continue
if pruned_parents_map[child]:
continue
for block in traverse_post_order(child, get_children=lambda block: pruned_children_map[block]):
# add descendant to missing blocks and empty its
# children
missing_blocks.append(block)
pruned_children_map[block] = []
self.assert_block_structure(block_structure, pruned_children_map, missing_blocks)
def test_remove_block_traversal(self):
block_structure = self.create_block_structure(ChildrenMapTestMixin.LINEAR_CHILDREN_MAP)
block_structure.remove_block_traversal(lambda block: block == 2)
self.assert_block_structure(block_structure, [[1], [], [], []], missing_blocks=[2])
def test_copy(self):
def _set_value(structure, value):
"""
Sets a test transformer block field to the given value in the given structure.
"""
structure.set_transformer_block_field(1, 'transformer', 'test_key', value)
def _get_value(structure):
"""
Returns the value of the test transformer block field in the given structure.
"""
return structure[1].transformer_data['transformer'].test_key
# create block structure and verify blocks pre-exist
block_structure = self.create_block_structure(ChildrenMapTestMixin.LINEAR_CHILDREN_MAP)
self.assert_block_structure(block_structure, [[1], [2], [3], []])
_set_value(block_structure, 'original_value')
# create a new copy of the structure and verify they are equivalent
new_copy = block_structure.copy()
self.assertEquals(block_structure.root_block_usage_key, new_copy.root_block_usage_key)
for block in block_structure:
self.assertIn(block, new_copy)
self.assertEquals(block_structure.get_parents(block), new_copy.get_parents(block))
self.assertEquals(block_structure.get_children(block), new_copy.get_children(block))
self.assertEquals(_get_value(block_structure), _get_value(new_copy))
# verify edits to original block structure do not affect the copy
block_structure.remove_block(2, keep_descendants=True)
self.assert_block_structure(block_structure, [[1], [3], [], []], missing_blocks=[2])
self.assert_block_structure(new_copy, [[1], [2], [3], []])
_set_value(block_structure, 'edit1')
self.assertEquals(_get_value(block_structure), 'edit1')
self.assertEquals(_get_value(new_copy), 'original_value')
# verify edits to copy do not affect the original
new_copy.remove_block(3, keep_descendants=True)
self.assert_block_structure(block_structure, [[1], [3], [], []], missing_blocks=[2])
self.assert_block_structure(new_copy, [[1], [2], [], []], missing_blocks=[3])
_set_value(new_copy, 'edit2')
self.assertEquals(_get_value(block_structure), 'edit1')
self.assertEquals(_get_value(new_copy), 'edit2')
|
guru81/test
|
refs/heads/master
|
2.py
|
1
|
import numpy as np
f = open('data.txt', 'w')
for a in np.arange(0.0, 10, 1):
for b in np.arange(0.0, 10, 1):
data = (a,b,a/b)
f.close()
|
tecan/xchat-rt
|
refs/heads/master
|
plugins/scripts/Supybot-0.83.4.1-bitcoinotc-bot/src/__init__.py
|
5
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import sys
import os.path
import dynamicScope
import supybot.utils as utils
__builtins__['format'] = utils.str.format
class Author(object):
def __init__(self, name=None, nick=None, email=None, **kwargs):
self.__dict__.update(kwargs)
self.name = name
self.nick = nick
self.email = email
def __str__(self):
return '%s (%s) <%s>' % (self.name, self.nick,
utils.web.mungeEmail(self.email))
class authors(object): # This is basically a bag.
jemfinch = Author('Jeremy Fincher', 'jemfinch', 'jemfinch@users.sf.net')
jamessan = Author('James Vega', 'jamessan', 'jamessan@users.sf.net')
strike = Author('Daniel DiPaolo', 'Strike', 'ddipaolo@users.sf.net')
baggins = Author('William Robinson', 'baggins', 'airbaggins@users.sf.net')
skorobeus = Author('Kevin Murphy', 'Skorobeus', 'skoro@skoroworld.com')
inkedmn = Author('Brett Kelly', 'inkedmn', 'inkedmn@users.sf.net')
bwp = Author('Brett Phipps', 'bwp', 'phippsb@gmail.com')
bear = Author('Mike Taylor', 'bear', 'bear@code-bear.com')
grantbow = Author('Grant Bowman', 'Grantbow', 'grantbow@grantbow.com')
unknown = Author('Unknown author', 'unknown', 'unknown@supybot.org')
# Let's be somewhat safe about this.
def __getattr__(self, attr):
try:
return getattr(super(authors, self), attr.lower())
except AttributeError:
return self.unknown
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
lufan82/p2pool
|
refs/heads/master
|
wstools/Namespaces.py
|
292
|
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
"""Namespace module, so you don't need PyXML
"""
ident = "$Id$"
try:
from xml.ns import SOAP, SCHEMA, WSDL, XMLNS, DSIG, ENCRYPTION
DSIG.C14N = "http://www.w3.org/TR/2001/REC-xml-c14n-20010315"
except:
class SOAP:
ENV = "http://schemas.xmlsoap.org/soap/envelope/"
ENC = "http://schemas.xmlsoap.org/soap/encoding/"
ACTOR_NEXT = "http://schemas.xmlsoap.org/soap/actor/next"
class SCHEMA:
XSD1 = "http://www.w3.org/1999/XMLSchema"
XSD2 = "http://www.w3.org/2000/10/XMLSchema"
XSD3 = "http://www.w3.org/2001/XMLSchema"
XSD_LIST = [ XSD1, XSD2, XSD3]
XSI1 = "http://www.w3.org/1999/XMLSchema-instance"
XSI2 = "http://www.w3.org/2000/10/XMLSchema-instance"
XSI3 = "http://www.w3.org/2001/XMLSchema-instance"
XSI_LIST = [ XSI1, XSI2, XSI3 ]
BASE = XSD3
class WSDL:
BASE = "http://schemas.xmlsoap.org/wsdl/"
BIND_HTTP = "http://schemas.xmlsoap.org/wsdl/http/"
BIND_MIME = "http://schemas.xmlsoap.org/wsdl/mime/"
BIND_SOAP = "http://schemas.xmlsoap.org/wsdl/soap/"
BIND_SOAP12 = "http://schemas.xmlsoap.org/wsdl/soap12/"
class XMLNS:
BASE = "http://www.w3.org/2000/xmlns/"
XML = "http://www.w3.org/XML/1998/namespace"
HTML = "http://www.w3.org/TR/REC-html40"
class DSIG:
BASE = "http://www.w3.org/2000/09/xmldsig#"
C14N = "http://www.w3.org/TR/2001/REC-xml-c14n-20010315"
C14N_COMM = "http://www.w3.org/TR/2000/CR-xml-c14n-20010315#WithComments"
C14N_EXCL = "http://www.w3.org/2001/10/xml-exc-c14n#"
DIGEST_MD2 = "http://www.w3.org/2000/09/xmldsig#md2"
DIGEST_MD5 = "http://www.w3.org/2000/09/xmldsig#md5"
DIGEST_SHA1 = "http://www.w3.org/2000/09/xmldsig#sha1"
ENC_BASE64 = "http://www.w3.org/2000/09/xmldsig#base64"
ENVELOPED = "http://www.w3.org/2000/09/xmldsig#enveloped-signature"
HMAC_SHA1 = "http://www.w3.org/2000/09/xmldsig#hmac-sha1"
SIG_DSA_SHA1 = "http://www.w3.org/2000/09/xmldsig#dsa-sha1"
SIG_RSA_SHA1 = "http://www.w3.org/2000/09/xmldsig#rsa-sha1"
XPATH = "http://www.w3.org/TR/1999/REC-xpath-19991116"
XSLT = "http://www.w3.org/TR/1999/REC-xslt-19991116"
class ENCRYPTION:
BASE = "http://www.w3.org/2001/04/xmlenc#"
BLOCK_3DES = "http://www.w3.org/2001/04/xmlenc#des-cbc"
BLOCK_AES128 = "http://www.w3.org/2001/04/xmlenc#aes128-cbc"
BLOCK_AES192 = "http://www.w3.org/2001/04/xmlenc#aes192-cbc"
BLOCK_AES256 = "http://www.w3.org/2001/04/xmlenc#aes256-cbc"
DIGEST_RIPEMD160 = "http://www.w3.org/2001/04/xmlenc#ripemd160"
DIGEST_SHA256 = "http://www.w3.org/2001/04/xmlenc#sha256"
DIGEST_SHA512 = "http://www.w3.org/2001/04/xmlenc#sha512"
KA_DH = "http://www.w3.org/2001/04/xmlenc#dh"
KT_RSA_1_5 = "http://www.w3.org/2001/04/xmlenc#rsa-1_5"
KT_RSA_OAEP = "http://www.w3.org/2001/04/xmlenc#rsa-oaep-mgf1p"
STREAM_ARCFOUR = "http://www.w3.org/2001/04/xmlenc#arcfour"
WRAP_3DES = "http://www.w3.org/2001/04/xmlenc#kw-3des"
WRAP_AES128 = "http://www.w3.org/2001/04/xmlenc#kw-aes128"
WRAP_AES192 = "http://www.w3.org/2001/04/xmlenc#kw-aes192"
WRAP_AES256 = "http://www.w3.org/2001/04/xmlenc#kw-aes256"
class WSRF_V1_2:
'''OASIS WSRF Specifications Version 1.2
'''
class LIFETIME:
XSD_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-ResourceLifetime-1.2-draft-01.xsd"
XSD_DRAFT4 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-ResourceLifetime-1.2-draft-04.xsd"
WSDL_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-ResourceLifetime-1.2-draft-01.wsdl"
WSDL_DRAFT4 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-ResourceLifetime-1.2-draft-04.wsdl"
LATEST = WSDL_DRAFT4
WSDL_LIST = (WSDL_DRAFT1, WSDL_DRAFT4)
XSD_LIST = (XSD_DRAFT1, XSD_DRAFT4)
class PROPERTIES:
XSD_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-ResourceProperties-1.2-draft-01.xsd"
XSD_DRAFT5 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-ResourceProperties-1.2-draft-05.xsd"
WSDL_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-ResourceProperties-1.2-draft-01.wsdl"
WSDL_DRAFT5 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-ResourceProperties-1.2-draft-05.wsdl"
LATEST = WSDL_DRAFT5
WSDL_LIST = (WSDL_DRAFT1, WSDL_DRAFT5)
XSD_LIST = (XSD_DRAFT1, XSD_DRAFT5)
class BASENOTIFICATION:
XSD_DRAFT1 = "http://docs.oasis-open.org/wsn/2004/06/wsn-WS-BaseNotification-1.2-draft-01.xsd"
WSDL_DRAFT1 = "http://docs.oasis-open.org/wsn/2004/06/wsn-WS-BaseNotification-1.2-draft-01.wsdl"
LATEST = WSDL_DRAFT1
WSDL_LIST = (WSDL_DRAFT1,)
XSD_LIST = (XSD_DRAFT1,)
class BASEFAULTS:
XSD_DRAFT1 = "http://docs.oasis-open.org/wsrf/2004/06/wsrf-WS-BaseFaults-1.2-draft-01.xsd"
XSD_DRAFT3 = "http://docs.oasis-open.org/wsrf/2004/11/wsrf-WS-BaseFaults-1.2-draft-03.xsd"
#LATEST = DRAFT3
#WSDL_LIST = (WSDL_DRAFT1, WSDL_DRAFT3)
XSD_LIST = (XSD_DRAFT1, XSD_DRAFT3)
WSRF = WSRF_V1_2
WSRFLIST = (WSRF_V1_2,)
class OASIS:
'''URLs for Oasis specifications
'''
WSSE = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd"
UTILITY = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd"
class X509TOKEN:
Base64Binary = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0#Base64Binary"
STRTransform = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-soap-message-security-1.0"
PKCS7 = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#PKCS7"
X509 = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509"
X509PKIPathv1 = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509PKIPathv1"
X509v3SubjectKeyIdentifier = "http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-x509-token-profile-1.0#X509v3SubjectKeyIdentifier"
LIFETIME = WSRF_V1_2.LIFETIME.XSD_DRAFT1
PROPERTIES = WSRF_V1_2.PROPERTIES.XSD_DRAFT1
BASENOTIFICATION = WSRF_V1_2.BASENOTIFICATION.XSD_DRAFT1
BASEFAULTS = WSRF_V1_2.BASEFAULTS.XSD_DRAFT1
class APACHE:
'''This name space is defined by AXIS and it is used for the TC in TCapache.py,
Map and file attachment (DataHandler)
'''
AXIS_NS = "http://xml.apache.org/xml-soap"
class WSTRUST:
BASE = "http://schemas.xmlsoap.org/ws/2004/04/trust"
ISSUE = "http://schemas.xmlsoap.org/ws/2004/04/trust/Issue"
class WSSE:
BASE = "http://schemas.xmlsoap.org/ws/2002/04/secext"
TRUST = WSTRUST.BASE
class WSU:
BASE = "http://schemas.xmlsoap.org/ws/2002/04/utility"
UTILITY = "http://schemas.xmlsoap.org/ws/2002/07/utility"
class WSR:
PROPERTIES = "http://www.ibm.com/xmlns/stdwip/web-services/WS-ResourceProperties"
LIFETIME = "http://www.ibm.com/xmlns/stdwip/web-services/WS-ResourceLifetime"
class WSA200508:
ADDRESS = "http://www.w3.org/2005/08/addressing"
ANONYMOUS = "%s/anonymous" %ADDRESS
FAULT = "%s/fault" %ADDRESS
class WSA200408:
ADDRESS = "http://schemas.xmlsoap.org/ws/2004/08/addressing"
ANONYMOUS = "%s/role/anonymous" %ADDRESS
FAULT = "%s/fault" %ADDRESS
class WSA200403:
ADDRESS = "http://schemas.xmlsoap.org/ws/2004/03/addressing"
ANONYMOUS = "%s/role/anonymous" %ADDRESS
FAULT = "%s/fault" %ADDRESS
class WSA200303:
ADDRESS = "http://schemas.xmlsoap.org/ws/2003/03/addressing"
ANONYMOUS = "%s/role/anonymous" %ADDRESS
FAULT = None
WSA = WSA200408
WSA_LIST = (WSA200508, WSA200408, WSA200403, WSA200303)
class _WSAW(str):
""" Define ADDRESS attribute to be compatible with WSA* layout """
ADDRESS = property(lambda s: s)
WSAW200605 = _WSAW("http://www.w3.org/2006/05/addressing/wsdl")
WSAW_LIST = (WSAW200605,)
class WSP:
POLICY = "http://schemas.xmlsoap.org/ws/2002/12/policy"
class BEA:
SECCONV = "http://schemas.xmlsoap.org/ws/2004/04/sc"
SCTOKEN = "http://schemas.xmlsoap.org/ws/2004/04/security/sc/sct"
class GLOBUS:
SECCONV = "http://wsrf.globus.org/core/2004/07/security/secconv"
CORE = "http://www.globus.org/namespaces/2004/06/core"
SIG = "http://www.globus.org/2002/04/xmlenc#gssapi-sign"
TOKEN = "http://www.globus.org/ws/2004/09/security/sc#GSSAPI_GSI_TOKEN"
ZSI_SCHEMA_URI = 'http://www.zolera.com/schemas/ZSI/'
|
sbbic/core
|
refs/heads/master
|
writerfilter/source/ooxml/factory_ns.py
|
4
|
#!/usr/bin/env python
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
from __future__ import print_function
from xml.dom import minidom
import sys
def createHeader(model, ns):
nsToken = ns.replace('-', '_')
print("""
#ifndef INCLUDED_OOXML_FACTORY_%s_HXX
#define INCLUDED_OOXML_FACTORY_%s_HXX
#include "ooxml/OOXMLFactory.hxx"
#include "OOXMLFactory_generated.hxx"
#include "oox/token/namespaces.hxx"
#include "ooxml/resourceids.hxx"
namespace writerfilter {
namespace ooxml {
/// @cond GENERATED
""" % (nsToken.upper(), nsToken.upper()))
print("""class OOXMLFactory_%s : public OOXMLFactory_ns
{
public:
typedef std::shared_ptr <OOXMLFactory_ns> Pointer_t;
static Pointer_t getInstance();
virtual const AttributeInfo* getAttributeInfoArray(Id nId);
virtual bool getElementId(Id nDefine, Id nId, ResourceType_t& rOutResource, Id& rOutElement);
virtual bool getListValue(Id nId, const OUString& rValue, sal_uInt32& rOutValue);
virtual Id getResourceId(Id nDefine, sal_Int32 nToken);
""" % nsToken)
actions = []
for nsNode in [i for i in model.getElementsByTagName("namespace") if i.getAttribute("name") == ns]:
for resource in nsNode.getElementsByTagName("resource"):
for action in [i.getAttribute("name") for i in resource.childNodes if i.nodeType == minidom.Node.ELEMENT_NODE and i.tagName == "action"]:
if action != "characters" and action not in actions:
actions.append(action)
for action in actions:
print(" void %sAction(OOXMLFastContextHandler* pHandler);" % action)
print("""virtual void charactersAction(OOXMLFastContextHandler* pHandler, const OUString & sText);
virtual void attributeAction(OOXMLFastContextHandler* pHandler, Token_t nToken, OOXMLValue::Pointer_t pValue);
virtual ~OOXMLFactory_%s();
protected:
static Pointer_t m_pInstance;
OOXMLFactory_%s();
};
""" % (nsToken, nsToken))
print("""/// @endcond
}}
#endif //INCLUDED_OOXML_FACTORY_%s_HXX""" % nsToken.upper())
modelPath = sys.argv[1]
filePath = sys.argv[2]
model = minidom.parse(modelPath)
ns = filePath.split('OOXMLFactory_')[1].split('.hxx')[0]
createHeader(model, ns)
# vim:set shiftwidth=4 softtabstop=4 expandtab:
|
KonstantinRitt/qmqtt
|
refs/heads/master
|
tests/gtest/gtest/googletest/googlemock/scripts/upload_gmock.py
|
770
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gmock.py v0.1.0 -- uploads a Google Mock patch for review.
This simple wrapper passes all command line flags and
--cc=googlemock@googlegroups.com to upload.py.
USAGE: upload_gmock.py [options for upload.py]
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GMOCK_GROUP = 'googlemock@googlegroups.com'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Mock discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GMOCK_GROUP not in cc_list:
cc_list.append(GMOCK_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GMOCK_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
|
SUSE-Cloud/nova
|
refs/heads/stable/havana
|
nova/virt/libvirt/imagecache.py
|
8
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Michael Still and Canonical Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Image cache manager.
The cache manager implements the specification at
http://wiki.openstack.org/nova-image-cache-management.
"""
import hashlib
import json
import os
import re
import time
from oslo.config import cfg
from nova.compute import task_states
from nova.compute import vm_states
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
from nova.virt.libvirt import utils as virtutils
LOG = logging.getLogger(__name__)
imagecache_opts = [
cfg.StrOpt('base_dir_name',
default='_base',
help="Where cached images are stored under $instances_path."
"This is NOT the full path - just a folder name."
"For per-compute-host cached images, set to _base_$my_ip"),
cfg.StrOpt('image_info_filename_pattern',
default='$instances_path/$base_dir_name/%(image)s.info',
help='Allows image information files to be stored in '
'non-standard locations'),
cfg.BoolOpt('remove_unused_base_images',
default=True,
help='Should unused base images be removed?'),
cfg.BoolOpt('remove_unused_kernels',
default=False,
help='Should unused kernel images be removed? This is only '
'safe to enable if all compute nodes have been updated '
'to support this option. This will enabled by default '
'in future.'),
cfg.IntOpt('remove_unused_resized_minimum_age_seconds',
default=3600,
help='Unused resized base images younger than this will not be '
'removed'),
cfg.IntOpt('remove_unused_original_minimum_age_seconds',
default=(24 * 3600),
help='Unused unresized base images younger than this will not '
'be removed'),
cfg.BoolOpt('checksum_base_images',
default=False,
help='Write a checksum for files in _base to disk'),
cfg.IntOpt('checksum_interval_seconds',
default=3600,
help='How frequently to checksum base images'),
]
CONF = cfg.CONF
CONF.register_opts(imagecache_opts)
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('instances_path', 'nova.compute.manager')
def get_cache_fname(images, key):
"""Return a filename based on the SHA1 hash of a given image ID.
Image files stored in the _base directory that match this pattern
are considered for cleanup by the image cache manager. The cache
manager considers the file to be in use if it matches an instance's
image_ref, kernel_id or ramdisk_id property.
However, in grizzly-3 and before, only the image_ref property was
considered. This means that it's unsafe to store kernel and ramdisk
images using this pattern until we're sure that all compute nodes
are running a cache manager newer than grizzly-3. For now, we
require admins to confirm that by setting the remove_unused_kernels
boolean but, at some point in the future, we'll be safely able to
assume this.
"""
image_id = str(images[key])
if not CONF.remove_unused_kernels and key in ['kernel_id', 'ramdisk_id']:
return image_id
else:
return hashlib.sha1(image_id).hexdigest()
def get_info_filename(base_path):
"""Construct a filename for storing additional information about a base
image.
Returns a filename.
"""
base_file = os.path.basename(base_path)
return (CONF.image_info_filename_pattern
% {'image': base_file})
def is_valid_info_file(path):
"""Test if a given path matches the pattern for info files."""
digest_size = hashlib.sha1().digestsize * 2
regexp = (CONF.image_info_filename_pattern
% {'image': ('([0-9a-f]{%(digest_size)d}|'
'[0-9a-f]{%(digest_size)d}_sm|'
'[0-9a-f]{%(digest_size)d}_[0-9]+)'
% {'digest_size': digest_size})})
m = re.match(regexp, path)
if m:
return True
return False
def _read_possible_json(serialized, info_file):
try:
d = jsonutils.loads(serialized)
except ValueError as e:
LOG.error(_('Error reading image info file %(filename)s: '
'%(error)s'),
{'filename': info_file,
'error': e})
d = {}
return d
def read_stored_info(target, field=None, timestamped=False):
"""Read information about an image.
Returns an empty dictionary if there is no info, just the field value if
a field is requested, or the entire dictionary otherwise.
"""
info_file = get_info_filename(target)
if not os.path.exists(info_file):
# NOTE(mikal): Special case to handle essex checksums being converted.
# There is an assumption here that target is a base image filename.
old_filename = target + '.sha1'
if field == 'sha1' and os.path.exists(old_filename):
hash_file = open(old_filename)
hash_value = hash_file.read()
hash_file.close()
write_stored_info(target, field=field, value=hash_value)
os.remove(old_filename)
d = {field: hash_value}
else:
d = {}
else:
lock_name = 'info-%s' % os.path.split(target)[-1]
lock_path = os.path.join(CONF.instances_path, 'locks')
@utils.synchronized(lock_name, external=True, lock_path=lock_path)
def read_file(info_file):
LOG.debug(_('Reading image info file: %s'), info_file)
with open(info_file, 'r') as f:
return f.read().rstrip()
serialized = read_file(info_file)
d = _read_possible_json(serialized, info_file)
if field:
if timestamped:
return (d.get(field, None), d.get('%s-timestamp' % field, None))
else:
return d.get(field, None)
return d
def write_stored_info(target, field=None, value=None):
"""Write information about an image."""
if not field:
return
info_file = get_info_filename(target)
LOG.info(_('Writing stored info to %s'), info_file)
fileutils.ensure_tree(os.path.dirname(info_file))
lock_name = 'info-%s' % os.path.split(target)[-1]
lock_path = os.path.join(CONF.instances_path, 'locks')
@utils.synchronized(lock_name, external=True, lock_path=lock_path)
def write_file(info_file, field, value):
d = {}
if os.path.exists(info_file):
with open(info_file, 'r') as f:
d = _read_possible_json(f.read(), info_file)
d[field] = value
d['%s-timestamp' % field] = time.time()
with open(info_file, 'w') as f:
f.write(json.dumps(d))
write_file(info_file, field, value)
def read_stored_checksum(target, timestamped=True):
"""Read the checksum.
Returns the checksum (as hex) or None.
"""
return read_stored_info(target, field='sha1', timestamped=timestamped)
def write_stored_checksum(target):
"""Write a checksum to disk for a file in _base."""
with open(target, 'r') as img_file:
checksum = utils.hash_file(img_file)
write_stored_info(target, field='sha1', value=checksum)
class ImageCacheManager(object):
def __init__(self):
self.lock_path = os.path.join(CONF.instances_path, 'locks')
self._reset_state()
def _reset_state(self):
"""Reset state variables used for each pass."""
self.used_images = {}
self.image_popularity = {}
self.instance_names = set()
self.active_base_files = []
self.corrupt_base_files = []
self.originals = []
self.removable_base_files = []
self.unexplained_images = []
def _store_image(self, base_dir, ent, original=False):
"""Store a base image for later examination."""
entpath = os.path.join(base_dir, ent)
if os.path.isfile(entpath):
self.unexplained_images.append(entpath)
if original:
self.originals.append(entpath)
def _list_base_images(self, base_dir):
"""Return a list of the images present in _base.
Determine what images we have on disk. There will be other files in
this directory so we only grab the ones which are the right length
to be disk images.
Note that this does not return a value. It instead populates a class
variable with a list of images that we need to try and explain.
"""
digest_size = hashlib.sha1().digestsize * 2
for ent in os.listdir(base_dir):
if len(ent) == digest_size:
self._store_image(base_dir, ent, original=True)
elif (len(ent) > digest_size + 2 and
ent[digest_size] == '_' and
not is_valid_info_file(os.path.join(base_dir, ent))):
self._store_image(base_dir, ent, original=False)
def _list_running_instances(self, context, all_instances):
"""List running instances (on all compute nodes)."""
self.used_images = {}
self.image_popularity = {}
self.instance_names = set()
for instance in all_instances:
# NOTE(mikal): "instance name" here means "the name of a directory
# which might contain an instance" and therefore needs to include
# historical permutations as well as the current one.
self.instance_names.add(instance['name'])
self.instance_names.add(instance['uuid'])
resize_states = [task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING,
task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH]
if instance['task_state'] in resize_states or \
instance['vm_state'] == vm_states.RESIZED:
self.instance_names.add(instance['name'] + '_resize')
self.instance_names.add(instance['uuid'] + '_resize')
for image_key in ['image_ref', 'kernel_id', 'ramdisk_id']:
try:
image_ref_str = str(instance[image_key])
except KeyError:
continue
local, remote, insts = self.used_images.get(image_ref_str,
(0, 0, []))
if instance['host'] == CONF.host:
local += 1
else:
remote += 1
insts.append(instance['name'])
self.used_images[image_ref_str] = (local, remote, insts)
self.image_popularity.setdefault(image_ref_str, 0)
self.image_popularity[image_ref_str] += 1
def _list_backing_images(self):
"""List the backing images currently in use."""
inuse_images = []
for ent in os.listdir(CONF.instances_path):
if ent in self.instance_names:
LOG.debug(_('%s is a valid instance name'), ent)
disk_path = os.path.join(CONF.instances_path, ent, 'disk')
if os.path.exists(disk_path):
LOG.debug(_('%s has a disk file'), ent)
backing_file = virtutils.get_disk_backing_file(disk_path)
LOG.debug(_('Instance %(instance)s is backed by '
'%(backing)s'),
{'instance': ent,
'backing': backing_file})
if backing_file:
backing_path = os.path.join(CONF.instances_path,
CONF.base_dir_name,
backing_file)
if backing_path not in inuse_images:
inuse_images.append(backing_path)
if backing_path in self.unexplained_images:
LOG.warning(_('Instance %(instance)s is using a '
'backing file %(backing)s which '
'does not appear in the image '
'service'),
{'instance': ent,
'backing': backing_file})
self.unexplained_images.remove(backing_path)
return inuse_images
def _find_base_file(self, base_dir, fingerprint):
"""Find the base file matching this fingerprint.
Yields the name of the base file, a boolean which is True if the image
is "small", and a boolean which indicates if this is a resized image.
Note that is is possible for more than one yield to result from this
check.
If no base file is found, then nothing is yielded.
"""
# The original file from glance
base_file = os.path.join(base_dir, fingerprint)
if os.path.exists(base_file):
yield base_file, False, False
# An older naming style which can be removed sometime after Folsom
base_file = os.path.join(base_dir, fingerprint + '_sm')
if os.path.exists(base_file):
yield base_file, True, False
# Resized images
resize_re = re.compile('.*/%s_[0-9]+$' % fingerprint)
for img in self.unexplained_images:
m = resize_re.match(img)
if m:
yield img, False, True
def _verify_checksum(self, img_id, base_file, create_if_missing=True):
"""Compare the checksum stored on disk with the current file.
Note that if the checksum fails to verify this is logged, but no actual
action occurs. This is something sysadmins should monitor for and
handle manually when it occurs.
"""
if not CONF.checksum_base_images:
return None
lock_name = 'hash-%s' % os.path.split(base_file)[-1]
# Protect against other nova-computes performing checksums at the same
# time if we are using shared storage
@utils.synchronized(lock_name, external=True, lock_path=self.lock_path)
def inner_verify_checksum():
(stored_checksum, stored_timestamp) = read_stored_checksum(
base_file, timestamped=True)
if stored_checksum:
# NOTE(mikal): Checksums are timestamped. If we have recently
# checksummed (possibly on another compute node if we are using
# shared storage), then we don't need to checksum again.
if (stored_timestamp and
time.time() - stored_timestamp <
CONF.checksum_interval_seconds):
return True
# NOTE(mikal): If there is no timestamp, then the checksum was
# performed by a previous version of the code.
if not stored_timestamp:
write_stored_info(base_file, field='sha1',
value=stored_checksum)
with open(base_file, 'r') as f:
current_checksum = utils.hash_file(f)
if current_checksum != stored_checksum:
LOG.error(_('image %(id)s at (%(base_file)s): image '
'verification failed'),
{'id': img_id,
'base_file': base_file})
return False
else:
return True
else:
LOG.info(_('image %(id)s at (%(base_file)s): image '
'verification skipped, no hash stored'),
{'id': img_id,
'base_file': base_file})
# NOTE(mikal): If the checksum file is missing, then we should
# create one. We don't create checksums when we download images
# from glance because that would delay VM startup.
if CONF.checksum_base_images and create_if_missing:
LOG.info(_('%(id)s (%(base_file)s): generating checksum'),
{'id': img_id,
'base_file': base_file})
write_stored_checksum(base_file)
return None
return inner_verify_checksum()
def _remove_base_file(self, base_file):
"""Remove a single base file if it is old enough.
Returns nothing.
"""
if not os.path.exists(base_file):
LOG.debug(_('Cannot remove %(base_file)s, it does not exist'),
base_file)
return
mtime = os.path.getmtime(base_file)
age = time.time() - mtime
maxage = CONF.remove_unused_resized_minimum_age_seconds
if base_file in self.originals:
maxage = CONF.remove_unused_original_minimum_age_seconds
if age < maxage:
LOG.info(_('Base file too young to remove: %s'),
base_file)
else:
LOG.info(_('Removing base file: %s'), base_file)
try:
os.remove(base_file)
signature = get_info_filename(base_file)
if os.path.exists(signature):
os.remove(signature)
except OSError as e:
LOG.error(_('Failed to remove %(base_file)s, '
'error was %(error)s'),
{'base_file': base_file,
'error': e})
def _handle_base_image(self, img_id, base_file):
"""Handle the checks for a single base image."""
image_bad = False
image_in_use = False
LOG.info(_('image %(id)s at (%(base_file)s): checking'),
{'id': img_id,
'base_file': base_file})
if base_file in self.unexplained_images:
self.unexplained_images.remove(base_file)
if (base_file and os.path.exists(base_file)
and os.path.isfile(base_file)):
# _verify_checksum returns True if the checksum is ok, and None if
# there is no checksum file
checksum_result = self._verify_checksum(img_id, base_file)
if checksum_result is not None:
image_bad = not checksum_result
# Give other threads a chance to run
time.sleep(0)
instances = []
if img_id in self.used_images:
local, remote, instances = self.used_images[img_id]
if local > 0 or remote > 0:
image_in_use = True
LOG.info(_('image %(id)s at (%(base_file)s): '
'in use: on this node %(local)d local, '
'%(remote)d on other nodes sharing this instance '
'storage'),
{'id': img_id,
'base_file': base_file,
'local': local,
'remote': remote})
self.active_base_files.append(base_file)
if not base_file:
LOG.warning(_('image %(id)s at (%(base_file)s): warning '
'-- an absent base file is in use! '
'instances: %(instance_list)s'),
{'id': img_id,
'base_file': base_file,
'instance_list': ' '.join(instances)})
if image_bad:
self.corrupt_base_files.append(base_file)
if base_file:
if not image_in_use:
LOG.debug(_('image %(id)s at (%(base_file)s): image is not in '
'use'),
{'id': img_id,
'base_file': base_file})
self.removable_base_files.append(base_file)
else:
LOG.debug(_('image %(id)s at (%(base_file)s): image is in '
'use'),
{'id': img_id,
'base_file': base_file})
if os.path.exists(base_file):
virtutils.chown(base_file, os.getuid())
os.utime(base_file, None)
def verify_base_images(self, context, all_instances):
"""Verify that base images are in a reasonable state."""
# NOTE(mikal): The new scheme for base images is as follows -- an
# image is streamed from the image service to _base (filename is the
# sha1 hash of the image id). If CoW is enabled, that file is then
# resized to be the correct size for the instance (filename is the
# same as the original, but with an underscore and the resized size
# in bytes). This second file is then CoW'd to the instance disk. If
# CoW is disabled, the resize occurs as part of the copy from the
# cache to the instance directory. Files ending in _sm are no longer
# created, but may remain from previous versions.
self._reset_state()
base_dir = os.path.join(CONF.instances_path, CONF.base_dir_name)
if not os.path.exists(base_dir):
LOG.debug(_('Skipping verification, no base directory at %s'),
base_dir)
return
LOG.debug(_('Verify base images'))
self._list_base_images(base_dir)
self._list_running_instances(context, all_instances)
# Determine what images are on disk because they're in use
for img in self.used_images:
fingerprint = hashlib.sha1(img).hexdigest()
LOG.debug(_('Image id %(id)s yields fingerprint %(fingerprint)s'),
{'id': img,
'fingerprint': fingerprint})
for result in self._find_base_file(base_dir, fingerprint):
base_file, image_small, image_resized = result
self._handle_base_image(img, base_file)
if not image_small and not image_resized:
self.originals.append(base_file)
# Elements remaining in unexplained_images might be in use
inuse_backing_images = self._list_backing_images()
for backing_path in inuse_backing_images:
if backing_path not in self.active_base_files:
self.active_base_files.append(backing_path)
# Anything left is an unknown base image
for img in self.unexplained_images:
LOG.warning(_('Unknown base file: %s'), img)
self.removable_base_files.append(img)
# Dump these lists
if self.active_base_files:
LOG.info(_('Active base files: %s'),
' '.join(self.active_base_files))
if self.corrupt_base_files:
LOG.info(_('Corrupt base files: %s'),
' '.join(self.corrupt_base_files))
if self.removable_base_files:
LOG.info(_('Removable base files: %s'),
' '.join(self.removable_base_files))
if CONF.remove_unused_base_images:
for base_file in self.removable_base_files:
self._remove_base_file(base_file)
# That's it
LOG.debug(_('Verification complete'))
|
axbaretto/beam
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py
|
762
|
import hashlib
import os
from pip._vendor.lockfile import LockFile
from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile
from ..cache import BaseCache
from ..controller import CacheController
def _secure_open_write(filename, fmode):
# We only want to write to this file, so open it in write only mode
flags = os.O_WRONLY
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
# will open *new* files.
# We specify this because we want to ensure that the mode we pass is the
# mode of the file.
flags |= os.O_CREAT | os.O_EXCL
# Do not follow symlinks to prevent someone from making a symlink that
# we follow and insecurely open a cache file.
if hasattr(os, "O_NOFOLLOW"):
flags |= os.O_NOFOLLOW
# On Windows we'll mark this file as binary
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
# Before we open our file, we want to delete any existing file that is
# there
try:
os.remove(filename)
except (IOError, OSError):
# The file must not exist already, so we can just skip ahead to opening
pass
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
# race condition happens between the os.remove and this line, that an
# error will be raised. Because we utilize a lockfile this should only
# happen if someone is attempting to attack us.
fd = os.open(filename, flags, fmode)
try:
return os.fdopen(fd, "wb")
except:
# An error occurred wrapping our FD in a file object
os.close(fd)
raise
class FileCache(BaseCache):
def __init__(self, directory, forever=False, filemode=0o0600,
dirmode=0o0700, use_dir_lock=None, lock_class=None):
if use_dir_lock is not None and lock_class is not None:
raise ValueError("Cannot use use_dir_lock and lock_class together")
if use_dir_lock:
lock_class = MkdirLockFile
if lock_class is None:
lock_class = LockFile
self.directory = directory
self.forever = forever
self.filemode = filemode
self.dirmode = dirmode
self.lock_class = lock_class
@staticmethod
def encode(x):
return hashlib.sha224(x.encode()).hexdigest()
def _fn(self, name):
# NOTE: This method should not change as some may depend on it.
# See: https://github.com/ionrock/cachecontrol/issues/63
hashed = self.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
name = self._fn(key)
if not os.path.exists(name):
return None
with open(name, 'rb') as fh:
return fh.read()
def set(self, key, value):
name = self._fn(key)
# Make sure the directory exists
try:
os.makedirs(os.path.dirname(name), self.dirmode)
except (IOError, OSError):
pass
with self.lock_class(name) as lock:
# Write our actual file
with _secure_open_write(lock.path, self.filemode) as fh:
fh.write(value)
def delete(self, key):
name = self._fn(key)
if not self.forever:
os.remove(name)
def url_to_file_path(url, filecache):
"""Return the file cache path based on the URL.
This does not ensure the file exists!
"""
key = CacheController.cache_url(url)
return filecache._fn(key)
|
amenonsen/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/vyos/vyos_l3_interfaces.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for vyos_l3_interfaces
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'
}
DOCUMENTATION = """
---
module: vyos_l3_interfaces
version_added: 2.9
short_description: Manages L3 interface attributes of VyOS network devices.
description: This module manages the L3 interface attributes on VyOS network devices.
notes:
- Tested against VyOS 1.1.8 (helium).
- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html).
author: Nilashish Chakraborty (@NilashishC)
options:
config:
description: The provided L3 interfaces configuration.
type: list
elements: dict
suboptions:
name:
description:
- Full name of the interface, e.g. eth0, eth1.
type: str
required: True
ipv4:
description:
- List of IPv4 addresses of the interface.
type: list
elements: dict
suboptions:
address:
description:
- IPv4 address of the interface.
type: str
ipv6:
description:
- List of IPv6 addresses of the interface.
type: list
elements: dict
suboptions:
address:
description:
- IPv6 address of the interface.
type: str
vifs:
description:
- Virtual sub-interfaces L3 configurations.
elements: dict
type: list
suboptions:
vlan_id:
description:
- Identifier for the virtual sub-interface.
type: int
ipv4:
description:
- List of IPv4 addresses of the virtual interface.
type: list
elements: dict
suboptions:
address:
description:
- IPv4 address of the virtual interface.
type: str
ipv6:
description:
- List of IPv6 addresses of the virual interface.
type: list
elements: dict
suboptions:
address:
description:
- IPv6 address of the virtual interface.
type: str
state:
description:
- The state the configuration should be left in.
type: str
choices:
- merged
- replaced
- overridden
- deleted
default: merged
"""
EXAMPLES = """
# Using merged
#
# Before state:
# -------------
#
# vyos:~$ show configuration commands | grep -e eth[2,3]
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 vif 101
# set interfaces ethernet eth3 vif 102
- name: Merge provided configuration with device configuration
vyos_l3_interfaces:
config:
- name: eth2
ipv4:
- address: 192.0.2.10/28
- address: 198.51.100.40/27
ipv6:
- address: 2001:db8:100::2/32
- address: 2001:db8:400::10/32
- name: eth3
ipv4:
- address: 203.0.113.65/26
vifs:
- vlan_id: 101
ipv4:
- address: 192.0.2.71/28
- address: 198.51.100.131/25
- vlan_id: 102
ipv6:
- address: 2001:db8:1000::5/38
- address: 2001:db8:1400::3/38
state: merged
# After state:
# -------------
#
# vyos:~$ show configuration commands | grep -e eth[2,3]
# set interfaces ethernet eth2 address '192.0.2.10/28'
# set interfaces ethernet eth2 address '198.51.100.40/27'
# set interfaces ethernet eth2 address '2001:db8:100::2/32'
# set interfaces ethernet eth2 address '2001:db8:400::10/32'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth3 address '203.0.113.65/26'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 vif 101 address '192.0.2.71/28'
# set interfaces ethernet eth3 vif 101 address '198.51.100.131/25'
# set interfaces ethernet eth3 vif 102 address '2001:db8:1000::5/38'
# set interfaces ethernet eth3 vif 102 address '2001:db8:1400::3/38'
# set interfaces ethernet eth3 vif 102 address '2001:db8:4000::2/34'
# Using replaced
#
# Before state:
# -------------
#
# vyos:~$ show configuration commands | grep eth
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:EA:0F:B9'
# set interfaces ethernet eth1 address '192.0.2.14/24'
# set interfaces ethernet eth2 address '192.0.2.10/24'
# set interfaces ethernet eth2 address '192.0.2.11/24'
# set interfaces ethernet eth2 address '2001:db8::10/32'
# set interfaces ethernet eth2 address '2001:db8::11/32'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth3 address '198.51.100.10/24'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 vif 101 address '198.51.100.130/25'
# set interfaces ethernet eth3 vif 101 address '198.51.100.131/25'
# set interfaces ethernet eth3 vif 102 address '2001:db8:4000::3/34'
# set interfaces ethernet eth3 vif 102 address '2001:db8:4000::2/34'
#
- name: Replace device configurations of listed interfaces with provided configurations
vyos_l3_interfaces:
config:
- name: eth2
ipv4:
- address: 192.0.2.10/24
- name: eth3
ipv6:
- address: 2001:db8::11/32
state: replaced
# After state:
# -------------
#
# vyos:~$ show configuration commands | grep eth
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:EA:0F:B9'
# set interfaces ethernet eth1 address '192.0.2.14/24'
# set interfaces ethernet eth2 address '192.0.2.10/24'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 address '2001:db8::11/32'
# set interfaces ethernet eth3 vif 101
# set interfaces ethernet eth3 vif 102
# Using overridden
#
# Before state
# --------------
#
# vyos@vyos-appliance:~$ show configuration commands | grep eth
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:EA:0F:B9'
# set interfaces ethernet eth1 address '192.0.2.14/24'
# set interfaces ethernet eth2 address '192.0.2.10/24'
# set interfaces ethernet eth2 address '192.0.2.11/24'
# set interfaces ethernet eth2 address '2001:db8::10/32'
# set interfaces ethernet eth2 address '2001:db8::11/32'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth3 address '198.51.100.10/24'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 vif 101 address '198.51.100.130/25'
# set interfaces ethernet eth3 vif 101 address '198.51.100.131/25'
# set interfaces ethernet eth3 vif 102 address '2001:db8:4000::3/34'
# set interfaces ethernet eth3 vif 102 address '2001:db8:4000::2/34'
- name: Overrides all device configuration with provided configuration
vyos_l3_interfaces:
config:
- name: eth0
ipv4:
- address: dhcp
ipv6:
- address: dhcpv6
state: overridden
# After state
# ------------
#
# vyos@vyos-appliance:~$ show configuration commands | grep eth
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 address 'dhcpv6'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:EA:0F:B9'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 vif 101
# set interfaces ethernet eth3 vif 102
# Using deleted
#
# Before state
# -------------
# vyos@vyos-appliance:~$ show configuration commands | grep eth
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:30:f0:22'
# set interfaces ethernet eth0 smp-affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:EA:0F:B9'
# set interfaces ethernet eth1 address '192.0.2.14/24'
# set interfaces ethernet eth2 address '192.0.2.10/24'
# set interfaces ethernet eth2 address '192.0.2.11/24'
# set interfaces ethernet eth2 address '2001:db8::10/32'
# set interfaces ethernet eth2 address '2001:db8::11/32'
# set interfaces ethernet eth2 hw-id '08:00:27:c2:98:23'
# set interfaces ethernet eth3 address '198.51.100.10/24'
# set interfaces ethernet eth3 hw-id '08:00:27:43:70:8c'
# set interfaces ethernet eth3 vif 101 address '198.51.100.130/25'
# set interfaces ethernet eth3 vif 101 address '198.51.100.131/25'
# set interfaces ethernet eth3 vif 102 address '2001:db8:4000::3/34'
# set interfaces ethernet eth3 vif 102 address '2001:db8:4000::2/34'
- name: Delete L3 attributes of given interfaces (Note - This won't delete the interface itself)
vyos_l3_interfaces:
config:
- name: eth1
- name: eth2
- name: eth3
state: deleted
# After state
# ------------
# vyos@vyos-appliance:~$ show configuration commands | grep eth
# set interfaces ethernet eth0 address 'dhcp'
# set interfaces ethernet eth0 duplex 'auto'
# set interfaces ethernet eth0 hw-id '08:00:27:f3:6c:b5'
# set interfaces ethernet eth0 smp_affinity 'auto'
# set interfaces ethernet eth0 speed 'auto'
# set interfaces ethernet eth1 hw-id '08:00:27:ad:ef:65'
# set interfaces ethernet eth1 smp_affinity 'auto'
# set interfaces ethernet eth2 hw-id '08:00:27:ab:4e:79'
# set interfaces ethernet eth2 smp_affinity 'auto'
# set interfaces ethernet eth3 hw-id '08:00:27:17:3c:85'
# set interfaces ethernet eth3 smp_affinity 'auto'
"""
RETURN = """
before:
description: The configuration prior to the model invocation.
returned: always
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The resulting configuration model invocation.
returned: when changed
type: list
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample: ['set interfaces ethernet eth1 192.0.2.14/2', 'set interfaces ethernet eth3 vif 101 address 198.51.100.130/25']
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.vyos.argspec.l3_interfaces.l3_interfaces import L3_interfacesArgs
from ansible.module_utils.network.vyos.config.l3_interfaces.l3_interfaces import L3_interfaces
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
module = AnsibleModule(argument_spec=L3_interfacesArgs.argument_spec,
supports_check_mode=True)
result = L3_interfaces(module).execute_module()
module.exit_json(**result)
if __name__ == '__main__':
main()
|
facelessuser/Pywin32
|
refs/heads/master
|
lib/x32/win32com/universal.py
|
27
|
# Code that packs and unpacks the Univgw structures.
# See if we have a special directory for the binaries (for developers)
import types
import pythoncom
from win32com.client import gencache
com_error = pythoncom.com_error
_univgw = pythoncom._univgw
def RegisterInterfaces(typelibGUID, lcid, major, minor, interface_names = None):
ret = [] # return a list of (dispid, funcname for our policy's benefit
# First see if we have makepy support. If so, we can probably satisfy the request without loading the typelib.
try:
mod = gencache.GetModuleForTypelib(typelibGUID, lcid, major, minor)
except ImportError:
mod = None
if mod is None:
import win32com.client.build
# Load up the typelib and build (but don't cache) it now
tlb = pythoncom.LoadRegTypeLib(typelibGUID, major, minor, lcid)
typecomp_lib = tlb.GetTypeComp()
if interface_names is None:
interface_names = []
for i in range(tlb.GetTypeInfoCount()):
info = tlb.GetTypeInfo(i)
doc = tlb.GetDocumentation(i)
attr = info.GetTypeAttr()
if attr.typekind == pythoncom.TKIND_INTERFACE or \
(attr.typekind == pythoncom.TKIND_DISPATCH and attr.wTypeFlags & pythoncom.TYPEFLAG_FDUAL):
interface_names.append(doc[0])
for name in interface_names:
type_info, type_comp = typecomp_lib.BindType(name, )
# Not sure why we don't get an exception here - BindType's C
# impl looks correct..
if type_info is None:
raise ValueError("The interface '%s' can not be located" % (name,))
# If we got back a Dispatch interface, convert to the real interface.
attr = type_info.GetTypeAttr()
if attr.typekind == pythoncom.TKIND_DISPATCH:
refhtype = type_info.GetRefTypeOfImplType(-1)
type_info = type_info.GetRefTypeInfo(refhtype)
attr = type_info.GetTypeAttr()
item = win32com.client.build.VTableItem(type_info, attr, type_info.GetDocumentation(-1))
_doCreateVTable(item.clsid, item.python_name, item.bIsDispatch, item.vtableFuncs)
for info in item.vtableFuncs:
names, dispid, desc = info
invkind = desc[4]
ret.append((dispid, invkind, names[0]))
else:
# Cool - can used cached info.
if not interface_names:
interface_names = list(mod.VTablesToClassMap.values())
for name in interface_names:
try:
iid = mod.NamesToIIDMap[name]
except KeyError:
raise ValueError("Interface '%s' does not exist in this cached typelib" % (name,))
# print "Processing interface", name
sub_mod = gencache.GetModuleForCLSID(iid)
is_dispatch = getattr(sub_mod, name + "_vtables_dispatch_", None)
method_defs = getattr(sub_mod, name + "_vtables_", None)
if is_dispatch is None or method_defs is None:
raise ValueError("Interface '%s' is IDispatch only" % (name,))
# And create the univgw defn
_doCreateVTable(iid, name, is_dispatch, method_defs)
for info in method_defs:
names, dispid, desc = info
invkind = desc[4]
ret.append((dispid, invkind, names[0]))
return ret
def _doCreateVTable(iid, interface_name, is_dispatch, method_defs):
defn = Definition(iid, is_dispatch, method_defs)
vtbl = _univgw.CreateVTable(defn, is_dispatch)
_univgw.RegisterVTable(vtbl, iid, interface_name)
def _CalcTypeSize(typeTuple):
t = typeTuple[0]
if t & (pythoncom.VT_BYREF | pythoncom.VT_ARRAY):
# Its a pointer.
cb = _univgw.SizeOfVT(pythoncom.VT_PTR)[1]
elif t == pythoncom.VT_RECORD:
# Just because a type library uses records doesn't mean the user
# is trying to. We need to better place to warn about this, but it
# isn't here.
#try:
# import warnings
# warnings.warn("warning: records are known to not work for vtable interfaces")
#except ImportError:
# print "warning: records are known to not work for vtable interfaces"
cb = _univgw.SizeOfVT(pythoncom.VT_PTR)[1]
#cb = typeInfo.GetTypeAttr().cbSizeInstance
else:
cb = _univgw.SizeOfVT(t)[1]
return cb
class Arg:
def __init__(self, arg_info, name = None):
self.name = name
self.vt, self.inOut, self.default, self.clsid = arg_info
self.size = _CalcTypeSize(arg_info)
# Offset from the beginning of the arguments of the stack.
self.offset = 0
class Method:
def __init__(self, method_info, isEventSink=0):
all_names, dispid, desc = method_info
name = all_names[0]
names = all_names[1:]
invkind = desc[4]
arg_defs = desc[2]
ret_def = desc[8]
self.dispid = dispid
self.invkind = invkind
# We dont use this ATM.
# self.ret = Arg(ret_def)
if isEventSink and name[:2] != "On":
name = "On%s" % name
self.name = name
cbArgs = 0
self.args = []
for argDesc in arg_defs:
arg = Arg(argDesc)
arg.offset = cbArgs
cbArgs = cbArgs + arg.size
self.args.append(arg)
self.cbArgs = cbArgs
self._gw_in_args = self._GenerateInArgTuple()
self._gw_out_args = self._GenerateOutArgTuple()
def _GenerateInArgTuple(self):
# Given a method, generate the in argument tuple
l = []
for arg in self.args:
if arg.inOut & pythoncom.PARAMFLAG_FIN or \
arg.inOut == 0:
l.append((arg.vt, arg.offset, arg.size))
return tuple(l)
def _GenerateOutArgTuple(self):
# Given a method, generate the out argument tuple
l = []
for arg in self.args:
if arg.inOut & pythoncom.PARAMFLAG_FOUT or \
arg.inOut & pythoncom.PARAMFLAG_FRETVAL or \
arg.inOut == 0:
l.append((arg.vt, arg.offset, arg.size, arg.clsid))
return tuple(l)
class Definition:
def __init__(self, iid, is_dispatch, method_defs):
self._iid = iid
self._methods = []
self._is_dispatch = is_dispatch
for info in method_defs:
entry = Method(info)
self._methods.append(entry)
def iid(self):
return self._iid
def vtbl_argsizes(self):
return [m.cbArgs for m in self._methods]
def vtbl_argcounts(self):
return [len(m.args) for m in self._methods]
def dispatch(self, ob, index, argPtr,
ReadFromInTuple=_univgw.ReadFromInTuple,
WriteFromOutTuple=_univgw.WriteFromOutTuple):
"Dispatch a call to an interface method."
meth = self._methods[index]
# Infer S_OK if they don't return anything bizarre.
hr = 0
args = ReadFromInTuple(meth._gw_in_args, argPtr)
# If ob is a dispatcher, ensure a policy
ob = getattr(ob, "policy", ob)
# Ensure the correct dispid is setup
ob._dispid_to_func_[meth.dispid] = meth.name
retVal = ob._InvokeEx_(meth.dispid, 0, meth.invkind, args, None, None)
# None is an allowed return value stating that
# the code doesn't want to touch any output arguments.
if type(retVal) == tuple: # Like pythoncom, we special case a tuple.
# However, if they want to return a specific HRESULT,
# then they have to return all of the out arguments
# AND the HRESULT.
if len(retVal) == len(meth._gw_out_args) + 1:
hr = retVal[0]
retVal = retVal[1:]
else:
raise TypeError("Expected %s return values, got: %s" % (len(meth._gw_out_args) + 1, len(retVal)))
else:
retVal = [retVal]
retVal.extend([None] * (len(meth._gw_out_args)-1))
retVal = tuple(retVal)
WriteFromOutTuple(retVal, meth._gw_out_args, argPtr)
return hr
|
t-miyamae/teuthology
|
refs/heads/master
|
scripts/run.py
|
10
|
"""
usage: teuthology --help
teuthology --version
teuthology [options] [--] <config>...
Run ceph integration tests
positional arguments:
<config> one or more config files to read
optional arguments:
-h, --help show this help message and exit
-v, --verbose be more verbose
--version the current installed version of teuthology
-a DIR, --archive DIR path to archive results in
--description DESCRIPTION job description
--owner OWNER job owner
--lock lock machines for the duration of the run
--machine-type MACHINE_TYPE Type of machine to lock/run tests on.
--os-type OS_TYPE Distro/OS of machine to run test on.
--os-version OS_VERSION Distro/OS version of machine to run test on.
--block block until locking machines succeeds (use with --lock)
--name NAME name for this teuthology run
--suite-path SUITE_PATH Location of ceph-qa-suite on disk. If not specified,
it will be fetched
"""
import docopt
import teuthology.run
def main():
args = docopt.docopt(__doc__, version=teuthology.__version__)
teuthology.run.main(args)
|
smartsheet-platform/smartsheet-python-sdk
|
refs/heads/master
|
smartsheet/models/copy_or_move_row_directive.py
|
1
|
# pylint: disable=C0111,R0902,R0904,R0912,R0913,R0915,E1101
# Smartsheet Python SDK.
#
# Copyright 2018 Smartsheet.com, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from .copy_or_move_row_destination import CopyOrMoveRowDestination
from ..types import *
from ..util import serialize
from ..util import deserialize
class CopyOrMoveRowDirective(object):
"""Smartsheet CopyOrMoveRowDirective data model."""
def __init__(self, props=None, base_obj=None):
"""Initialize the CopyOrMoveRowDirective model."""
self._base = None
if base_obj is not None:
self._base = base_obj
self._row_ids = TypedList(int)
self._to = TypedObject(CopyOrMoveRowDestination)
if props:
deserialize(self, props)
@property
def row_ids(self):
return self._row_ids
@row_ids.setter
def row_ids(self, value):
self._row_ids.load(value)
@property
def to(self):
return self._to.value
@to.setter
def to(self, value):
self._to.value = value
def to_dict(self):
return serialize(self)
def to_json(self):
return json.dumps(self.to_dict())
def __str__(self):
return self.to_json()
|
avanzosc/avanzosc6.1
|
refs/heads/master
|
avanzosc_tire_management/wizard/wizard_vehicle_locations.py
|
1
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2011-2012 Daniel (Avanzosc) <http://www.avanzosc.com>
# 28/03/2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
from tools.translate import _
import wizard
import pooler
import Image
import time
class wizard_create_locations (osv.osv_memory):
"""create locations"""
_name = 'wizard.create.locations'
_description = 'Vehicle location'
_columns = {
'company': fields.many2one ('res.company','Company', required=True),
'tire_stock': fields.many2one ('stock.location','Tire Stock'),
'retread': fields.many2one ('stock.location','Retread'),
'scratch': fields.many2one ('stock.location','Scratch'),
'waste': fields.many2one ('stock.location','Waste'),
'flatire': fields.many2one ('stock.location','Flat Tire'),
'schema4' : fields.binary('4 Tire Schema'),
'schema6' : fields.binary('6 Tire Schema'),
'schema8' : fields.binary('8 Tire Schema'),
}
def onchange_company (self, cr, uid, ids, company_id, context=None):
pool = pooler.get_pool(cr.dbname)
company_obj = pool.get('res.company')
company = company_obj.browse (cr,uid,company_id)
context = {'company' : company.id}
values ={}
if company.tire_stock:
values['tire_stock'] = company.tire_stock.id
if company.retread:
values['retread'] = company.retread.id
if company.scratch:
values['scratch'] = company.scratch.id
if company.waste:
values['waste'] = company.waste.id
if company.flatire:
values['flatire'] = company.flatire.id
if company.schema4:
values['schema4'] = company.schema4
if company.schema6:
values['schema6'] = company.schema6
if company.schema8:
values['schema8'] = company.schema8
values ['context'] = context
return {'value' : values}
def save_config (self,cr,uid,ids,data,context=None):
values ={}
pool = pooler.get_pool(cr.dbname)
company_obj = pool.get('res.company')
company= self.browse(cr,uid,ids[0]).company
values ['tire_stock'] = self.browse(cr,uid,ids[0]).tire_stock.id
values ['retread'] = self.browse(cr,uid,ids[0]).retread.id
values ['scratch'] = self.browse(cr,uid,ids[0]).scratch.id
values ['waste'] = self.browse(cr,uid,ids[0]).waste.id
values ['flatire'] = self.browse(cr,uid,ids[0]).flatire.id
if self.browse(cr,uid,ids[0]).schema4:
values ['schema4'] = self.browse(cr,uid,ids[0]).schema4.id
else:
values ['schema4'] = ''
if self.browse(cr,uid,ids[0]).schema6:
values ['schema6'] = self.browse(cr,uid,ids[0]).schema6.id
else:
values ['schema6'] = ''
if self.browse(cr,uid,ids[0]).schema8:
values ['schema8'] = self.browse(cr,uid,ids[0]).schema8
else:
values ['schema8'] = ''
company = company_obj.browse(cr,uid,company.id)
company_obj.write(cr,uid,company.id,values)
value = {
'type': 'ir.actions.close_window',
}
return value
def create_locations(self, cr, uid, data, context):
res = {}
pool = pooler.get_pool(cr.dbname)
company_obj = pool.get('res.company')
company_list = company_obj.search(cr,uid,[])
company= company_obj.browse(cr,uid,company_list[0])
vehicle_obj = pool.get('fleet.vehicles')
stloc_obj = pool.get('stock.location')
vehicle_list = vehicle_obj.search(cr, uid, [('buslocat','=', None)])
for vehicle in vehicle_list:
vehi = vehicle_obj.browse(cr, uid, vehicle)
buslo_val={'name': vehi.name,'active':True ,'usage':'internal', 'chained_location_type':'none','chained_auto_packing' :'manual', 'chained_delay':'0'}
bus_location = stloc_obj.create(cr,uid,buslo_val)
if vehi.tires:
if vehi.tires > 6:
vehi_vals = {'axles' : '3axle', 'buslocat':bus_location, 'schema':''}
if company.schema8 :
vehi_vals ['schema'] = company.schema8
elif vehi.tires ==4:
vehi_vals = {'axles' : '2axle', 'buslocat':bus_location, 'schema':''}
if company.schema4 :
vehi_vals ['schema'] = company.schema4
else:
vehi_vals = {'axles' : '2axle', 'buslocat':bus_location, 'schema':''}
if company.schema6 :
vehi_vals ['schema'] = company.schema6
for i in range (1,vehi.tires+1): # middle axle creation and its tires
tire_name= vehi.name.strip('bus ') + '-' + str(i)
tire = {'name': tire_name,'active':True,'usage':'internal','location_id': bus_location, 'chained_location_type':'none','chained_auto_packing' :'manual', 'chained_delay':'0'}
tires = stloc_obj.create(cr,uid,tire)
vehicle_obj.write(cr,uid,vehi.id,vehi_vals)
return res
wizard_create_locations()
|
vnsofthe/odoo-dev
|
refs/heads/master
|
addons/base_action_rule/test_models.py
|
333
|
from openerp.osv import fields, osv
from openerp import api
AVAILABLE_STATES = [
('draft', 'New'),
('cancel', 'Cancelled'),
('open', 'In Progress'),
('pending', 'Pending'),
('done', 'Closed')
]
class lead_test(osv.Model):
_name = "base.action.rule.lead.test"
_columns = {
'name': fields.char('Subject', required=True, select=1),
'user_id': fields.many2one('res.users', 'Responsible'),
'state': fields.selection(AVAILABLE_STATES, string="Status", readonly=True),
'active': fields.boolean('Active', required=False),
'partner_id': fields.many2one('res.partner', 'Partner', ondelete='set null'),
'date_action_last': fields.datetime('Last Action', readonly=1),
}
_defaults = {
'state' : 'draft',
'active' : True,
}
@api.cr_uid_ids_context
def message_post(self, cr, uid, thread_id, body='', subject=None, type='notification', subtype=None, parent_id=False, attachments=None, context=None, **kwargs):
pass
def message_subscribe(self, cr, uid, ids, partner_ids, subtype_ids=None, context=None):
pass
|
xen0l/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/cloudwatchlogs_log_group.py
|
20
|
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cloudwatchlogs_log_group
short_description: create or delete log_group in CloudWatchLogs
notes:
- for details of the parameters and returns see U(http://boto3.readthedocs.io/en/latest/reference/services/logs.html)
description:
- Create or delete log_group in CloudWatchLogs.
version_added: "2.5"
author:
- Willian Ricardo(@willricardo) <willricardo@gmail.com>
requirements: [ json, botocore, boto3 ]
options:
state:
description:
- Whether the rule is present, absent or get
choices: ["present", "absent"]
default: present
required: false
log_group_name:
description:
- The name of the log group.
required: true
kms_key_id:
description:
- The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
required: false
tags:
description:
- The key-value pairs to use for the tags.
required: false
retention:
description:
- "The number of days to retain the log events in the specified log group.
Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]"
required: false
overwrite:
description:
- Whether an existing log group should be overwritten on create.
default: false
required: false
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- cloudwatchlogs_log_group:
log_group_name: test-log-group
- cloudwatchlogs_log_group:
state: present
log_group_name: test-log-group
tags: { "Name": "test-log-group", "Env" : "QA" }
- cloudwatchlogs_log_group:
state: present
log_group_name: test-log-group
tags: { "Name": "test-log-group", "Env" : "QA" }
kms_key_id: arn:aws:kms:region:account-id:key/key-id
- cloudwatchlogs_log_group:
state: absent
log_group_name: test-log-group
'''
RETURN = '''
log_groups:
description: Return the list of complex objetcs representing log groups
returned: success
type: complex
contains:
log_group_name:
description: The name of the log group.
returned: always
type: string
creation_time:
description: The creation time of the log group.
returned: always
type: integer
retention_in_days:
description: The number of days to retain the log events in the specified log group.
returned: always
type: integer
metric_filter_count:
description: The number of metric filters.
returned: always
type: integer
arn:
description: The Amazon Resource Name (ARN) of the log group.
returned: always
type: string
stored_bytes:
description: The number of bytes stored.
returned: always
type: string
kms_key_id:
description: The Amazon Resource Name (ARN) of the CMK to use when encrypting log data.
returned: always
type: string
'''
import traceback
from ansible.module_utils._text import to_native
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO3, camel_dict_to_snake_dict, boto3_conn, ec2_argument_spec, get_aws_connection_info
try:
import botocore
except ImportError:
pass # will be detected by imported HAS_BOTO3
def create_log_group(client, log_group_name, kms_key_id, tags, retention, module):
request = {'logGroupName': log_group_name}
if kms_key_id:
request['kmsKeyId'] = kms_key_id
if tags:
request['tags'] = tags
try:
client.create_log_group(**request)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to create log group: {0}".format(to_native(e)),
exception=traceback.format_exc())
if retention:
input_retention_policy(client=client,
log_group_name=log_group_name,
retention=retention, module=module)
desc_log_group = describe_log_group(client=client,
log_group_name=log_group_name,
module=module)
if 'logGroups' in desc_log_group:
for i in desc_log_group['logGroups']:
if log_group_name == i['logGroupName']:
return i
module.fail_json(msg="The aws CloudWatchLogs log group was not created. \n please try again!")
def input_retention_policy(client, log_group_name, retention, module):
try:
permited_values = [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]
if retention in permited_values:
response = client.put_retention_policy(logGroupName=log_group_name,
retentionInDays=retention)
else:
delete_log_group(client=client, log_group_name=log_group_name, module=module)
module.fail_json(msg="Invalid retention value. Valid values are: [1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 3653]")
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to put retention policy for log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def delete_log_group(client, log_group_name, module):
desc_log_group = describe_log_group(client=client,
log_group_name=log_group_name,
module=module)
try:
if 'logGroups' in desc_log_group:
for i in desc_log_group['logGroups']:
if log_group_name == i['logGroupName']:
client.delete_log_group(logGroupName=log_group_name)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to delete log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def describe_log_group(client, log_group_name, module):
try:
desc_log_group = client.describe_log_groups(logGroupNamePrefix=log_group_name)
return desc_log_group
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc(), **camel_dict_to_snake_dict(e.response))
except botocore.exceptions.BotoCoreError as e:
module.fail_json(msg="Unable to describe log group {0}: {1}".format(log_group_name, to_native(e)),
exception=traceback.format_exc())
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
log_group_name=dict(required=True, type='str'),
state=dict(choices=['present', 'absent'],
default='present'),
kms_key_id=dict(required=False, type='str'),
tags=dict(required=False, type='dict'),
retention=dict(required=False, type='int'),
overwrite=dict(required=False, type='bool', default=False)
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
logs = boto3_conn(module, conn_type='client', resource='logs', region=region, endpoint=ec2_url, **aws_connect_kwargs)
state = module.params.get('state')
changed = False
# Determine if the log group exists
desc_log_group = describe_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module)
found_log_group = {}
for i in desc_log_group.get('logGroups', []):
if module.params['log_group_name'] == i['logGroupName']:
found_log_group = i
break
if state == 'present':
if found_log_group and module.params['overwrite'] is True:
changed = True
delete_log_group(client=logs, log_group_name=module.params['log_group_name'], module=module)
found_log_group = create_log_group(client=logs,
log_group_name=module.params['log_group_name'],
kms_key_id=module.params['kms_key_id'],
tags=module.params['tags'],
retention=module.params['retention'],
module=module)
elif not found_log_group:
changed = True
found_log_group = create_log_group(client=logs,
log_group_name=module.params['log_group_name'],
kms_key_id=module.params['kms_key_id'],
tags=module.params['tags'],
retention=module.params['retention'],
module=module)
elif found_log_group:
if module.params['retention'] != found_log_group['retentionInDays']:
changed = True
input_retention_policy(client=logs,
log_group_name=module.params['log_group_name'],
retention=module.params['retention'],
module=module)
found_log_group['retentionInDays'] = module.params['retention']
module.exit_json(changed=changed, **camel_dict_to_snake_dict(found_log_group))
elif state == 'absent':
if found_log_group:
changed = True
delete_log_group(client=logs,
log_group_name=module.params['log_group_name'],
module=module)
module.exit_json(changed=changed)
if __name__ == '__main__':
main()
|
dezgeg/debbindiff
|
refs/heads/master
|
debbindiff/presenters/html.py
|
1
|
# -*- coding: utf-8 -*-
#
# debbindiff: highlight differences between two builds of Debian packages
#
# Copyright © 2014-2015 Jérémy Bobbio <lunar@debian.org>
# © 2015 Reiner Herrmann <reiner@reiner-h.de>
# © 2012-2013 Olivier Matz <zer0@droids-corp.org>
# © 2012 Alan De Smet <adesmet@cs.wisc.edu>
# © 2012 Sergey Satskiy <sergey.satskiy@gmail.com>
# © 2012 scito <info@scito.ch>
#
#
# debbindiff is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# debbindiff is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with debbindiff. If not, see <http://www.gnu.org/licenses/>.
#
#
# Most of the code is borrowed from diff2html.py available at:
# http://git.droids-corp.org/?p=diff2html.git
#
# Part of the code is inspired by diff2html.rb from
# Dave Burt <dave (at) burt.id.au> (mainly for html theme)
#
from __future__ import print_function
import os.path
import cgi
import re
import subprocess
import sys
from tempfile import NamedTemporaryFile
from xml.sax.saxutils import escape
from debbindiff import logger, VERSION
from debbindiff.comparators.utils import make_temp_directory
# minimum line size, we add a zero-sized breakable space every
# LINESIZE characters
LINESIZE = 20
MAX_LINE_SIZE = 1024
TABSIZE = 8
# Characters we're willing to word wrap on
WORDBREAK = " \t;.,/):-"
DIFFON = "\x01"
DIFFOFF = "\x02"
HEADER = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="generator" content="debbindiff">
<title>%(title)s</title>
<style>
body {
background: white;
color: black;
}
.footer {
font-size: small;
}
.difference {
border: outset #888 1px;
background-color:rgba(0,0,0,.1);
padding: 0.5em;
margin: 0.5em 0;
}
.difference table {
table-layout: fixed;
width: 100%%;
border: 0;
}
.difference th,
.difference td {
border: 0;
}
table.diff {
border: 0px;
border-collapse:collapse;
font-size:0.75em;
font-family: Lucida Console, monospace;
}
td.line {
color:#8080a0
}
th {
background: black;
color: white
}
tr.diffunmodified td {
background: #D0D0E0
}
tr.diffhunk td {
background: #A0A0A0
}
tr.diffadded td {
background: #CCFFCC
}
tr.diffdeleted td {
background: #FFCCCC
}
tr.diffchanged td {
background: #FFFFA0
}
span.diffchanged2 {
background: #E0C880
}
span.diffponct {
color: #B08080
}
.comment {
font-style: italic;
}
.source {
font-weight: bold;
}
.error {
border: solid black 1px;
background: red;
color: white;
padding: 0.2em;
}
.anchor {
margin-left: 0.5em;
font-size: 80%%;
color: #333;
text-decoration: none;
display: none;
}
.diffheader:hover .anchor {
display: inline;
}
</style>
%(css_link)s
</head>
<body>
"""
FOOTER = """
<div class="footer">Generated by debbindiff %(version)s</div>
</body>
</html>
"""
DEFAULT_MAX_PAGE_SIZE = 2000 * 2 ** 10 # 2000 kB
MAX_DIFF_BLOCK_LINES = 50
class PrintLimitReached(Exception):
pass
def create_limited_print_func(print_func, max_page_size):
def limited_print_func(s, force=False):
if not hasattr(limited_print_func, 'char_count'):
limited_print_func.char_count = 0
print_func(s)
limited_print_func.char_count += len(s)
if not force and limited_print_func.char_count >= max_page_size:
raise PrintLimitReached()
return limited_print_func
buf = []
add_cpt, del_cpt = 0, 0
line1, line2 = 0, 0
hunk_off1, hunk_size1, hunk_off2, hunk_size2 = 0, 0, 0, 0
def sane(x):
r = ""
for i in x:
j = ord(i)
if i not in ['\t', '\n'] and (j < 32):
r = r + "."
else:
r = r + i
return r
def linediff(s, t):
'''
Original line diff algorithm of diff2html. It's character based.
'''
if len(s):
s = unicode(reduce(lambda x, y:x+y, [ sane(c) for c in s ]))
if len(t):
t = unicode(reduce(lambda x, y:x+y, [ sane(c) for c in t ]))
m, n = len(s), len(t)
d = [[(0, 0) for i in range(n+1)] for i in range(m+1)]
d[0][0] = (0, (0, 0))
for i in range(m+1)[1:]:
d[i][0] = (i,(i-1, 0))
for j in range(n+1)[1:]:
d[0][j] = (j,(0, j-1))
for i in range(m+1)[1:]:
for j in range(n+1)[1:]:
if s[i-1] == t[j-1]:
cost = 0
else:
cost = 1
d[i][j] = min((d[i-1][j][0] + 1, (i-1, j)),
(d[i][j-1][0] + 1, (i, j-1)),
(d[i-1][j-1][0] + cost, (i-1, j-1)))
l = []
coord = (m, n)
while coord != (0, 0):
l.insert(0, coord)
x, y = coord
coord = d[x][y][1]
l1 = []
l2 = []
for coord in l:
cx, cy = coord
child_val = d[cx][cy][0]
father_coord = d[cx][cy][1]
fx, fy = father_coord
father_val = d[fx][fy][0]
diff = (cx-fx, cy-fy)
if diff == (0, 1):
l1.append("")
l2.append(DIFFON + t[fy] + DIFFOFF)
elif diff == (1, 0):
l1.append(DIFFON + s[fx] + DIFFOFF)
l2.append("")
elif child_val-father_val == 1:
l1.append(DIFFON + s[fx] + DIFFOFF)
l2.append(DIFFON + t[fy] + DIFFOFF)
else:
l1.append(s[fx])
l2.append(t[fy])
r1, r2 = (reduce(lambda x, y:x+y, l1), reduce(lambda x, y:x+y, l2))
return r1, r2
def convert(s, ponct=0):
i = 0
t = u""
for c in s:
# used by diffs
if c == DIFFON:
t += u'<span class="diffchanged2">'
elif c == DIFFOFF:
t += u"</span>"
# special highlighted chars
elif c == "\t" and ponct == 1:
n = TABSIZE-(i%TABSIZE)
if n == 0:
n = TABSIZE
t += (u'<span class="diffponct">»</span>'+' '*(n-1))
elif c == " " and ponct == 1:
t += u'<span class="diffponct">·</span>'
elif c == "\n" and ponct == 1:
t += u'<br/><span class="diffponct">\</span>'
elif ord(c) < 32:
conv = u"\\x%x" % ord(c)
t += u"<em>%s</em>" % conv
i += len(conv)
else:
t += cgi.escape(c)
i += 1
if WORDBREAK.count(c) == 1:
t += u'​'
i = 0
if i > LINESIZE:
i = 0
t += u"​"
return t
def output_hunk(print_func):
print_func(u'<tr class="diffhunk"><td colspan="2">Offset %d, %d lines modified</td>'%(hunk_off1, hunk_size1))
print_func(u'<td colspan="2">Offset %d, %d lines modified</td></tr>\n'%(hunk_off2, hunk_size2))
def output_line(print_func, s1, s2):
global line1
global line2
orig1 = s1
orig2 = s2
if s1 and len(s1) > MAX_LINE_SIZE:
s1 = s1[:MAX_LINE_SIZE] + u" ✂"
if s2 and len(s2) > MAX_LINE_SIZE:
s2 = s2[:MAX_LINE_SIZE] + u" ✂"
if s1 == None and s2 == None:
type_name = "unmodified"
elif s1 == "" and s2 == "":
type_name = "unmodified"
elif s1 == None or s1 == "":
type_name = "added"
elif s2 == None or s2 == "":
type_name = "deleted"
elif orig1 == orig2 and not s1.endswith('lines removed ]') and not s2.endswith('lines removed ]'):
type_name = "unmodified"
else:
type_name = "changed"
s1, s2 = linediff(s1, s2)
print_func(u'<tr class="diff%s">' % type_name)
try:
if s1 is not None:
print_func(u'<td class="diffline">%d </td>' % line1)
print_func(u'<td class="diffpresent">')
print_func(convert(s1, ponct=1))
print_func(u'</td>')
else:
s1 = ""
print_func(u'<td colspan="2"> </td>')
if s2 is not None:
print_func(u'<td class="diffline">%d </td>' % line2)
print_func(u'<td class="diffpresent">')
print_func(convert(s2, ponct=1))
print_func(u'</td>')
else:
s2 = ""
print_func(u'<td colspan="2"> </td>')
finally:
print_func(u"</tr>\n", force=True)
m = orig1 and re.match(r"^\[ (\d+) lines removed \]$", orig1)
if m:
line1 += int(m.group(1))
elif orig1 is not None:
line1 += 1
m = orig2 and re.match(r"^\[ (\d+) lines removed \]$", orig2)
if m:
line2 += int(m.group(1))
elif orig2 is not None:
line2 += 1
def empty_buffer(print_func):
global buf
global add_cpt
global del_cpt
if del_cpt == 0 or add_cpt == 0:
for l in buf:
output_line(print_func, l[0], l[1])
elif del_cpt != 0 and add_cpt != 0:
l0, l1 = [], []
for l in buf:
if l[0] != None:
l0.append(l[0])
if l[1] != None:
l1.append(l[1])
max_len = (len(l0) > len(l1)) and len(l0) or len(l1)
for i in range(max_len):
s0, s1 = "", ""
if i < len(l0):
s0 = l0[i]
if i < len(l1):
s1 = l1[i]
output_line(print_func, s0, s1)
add_cpt, del_cpt = 0, 0
buf = []
def output_unified_diff(print_func, unified_diff):
global add_cpt, del_cpt
global line1, line2
global hunk_off1, hunk_size1, hunk_off2, hunk_size2
print_func(u'<table class="diff">\n')
try:
print_func(u'<colgroup><col style="width: 3em;"/><col style="99%"/>\n')
print_func(u'<col style="width: 3em;"/><col style="99%"/></colgroup>\n')
for l in unified_diff.splitlines():
m = re.match(r'^--- ([^\s]*)', l)
if m:
empty_buffer(print_func)
continue
m = re.match(r'^\+\+\+ ([^\s]*)', l)
if m:
empty_buffer(print_func)
continue
m = re.match(r"@@ -(\d+),?(\d*) \+(\d+),?(\d*)", l)
if m:
empty_buffer(print_func)
hunk_data = map(lambda x:x=="" and 1 or int(x), m.groups())
hunk_off1, hunk_size1, hunk_off2, hunk_size2 = hunk_data
line1, line2 = hunk_off1, hunk_off2
output_hunk(print_func)
continue
if re.match(r'^\[', l):
empty_buffer(print_func)
print_func(u'<td colspan="2">%s</td>\n' % l)
if re.match(r"^\\ No newline", l):
if hunk_size2 == 0:
buf[-1] = (buf[-1][0], buf[-1][1] + '\n' + l[2:])
else:
buf[-1] = (buf[-1][0] + '\n' + l[2:], buf[-1][1])
continue
if hunk_size1 <= 0 and hunk_size2 <= 0:
empty_buffer(print_func)
continue
m = re.match(r"^\+\[ (\d+) lines removed \]$", l)
if m:
add_cpt += int(m.group(1))
hunk_size2 -= int(m.group(1))
buf.append((None, l[1:]))
continue
if re.match(r"^\+", l):
add_cpt += 1
hunk_size2 -= 1
buf.append((None, l[1:]))
continue
m = re.match(r"^-\[ (\d+) lines removed \]$", l)
if m:
del_cpt += int(m.group(1))
hunk_size1 -= int(m.group(1))
buf.append((l[1:], None))
continue
if re.match(r"^-", l):
del_cpt += 1
hunk_size1 -= 1
buf.append((l[1:], None))
continue
if re.match(r"^ ", l) and hunk_size1 and hunk_size2:
empty_buffer(print_func)
hunk_size1 -= 1
hunk_size2 -= 1
buf.append((l[1:], l[1:]))
continue
empty_buffer(print_func)
empty_buffer(print_func)
finally:
print_func(u"</table>", force=True)
def output_difference(difference, print_func, parents):
logger.debug('html output for %s', difference.source1)
sources = parents + [difference.source1]
print_func(u"<div class='difference'>")
try:
print_func(u"<div class='diffheader'>")
if difference.source1 == difference.source2:
print_func(u"<div><span class='source'>%s<span>"
% escape(difference.source1))
else:
print_func(u"<div><span class='source'>%s</span> vs.</div>"
% escape(difference.source1))
print_func(u"<div><span class='source'>%s</span>"
% escape(difference.source2))
anchor = '/'.join(sources[1:])
print_func(u" <a class='anchor' href='#%s' name='%s'>¶</a>" % (anchor, anchor))
print_func(u"</div>")
if difference.comment:
print_func(u"<div class='comment'>%s</div>"
% escape(difference.comment).replace('\n', '<br />'))
print_func(u"</div>")
if difference.unified_diff:
output_unified_diff(print_func, difference.unified_diff)
for detail in difference.details:
output_difference(detail, print_func, sources)
except PrintLimitReached:
logger.debug('print limit reached')
raise
finally:
print_func(u"</div>", force=True)
def output_header(css_url, print_func):
if css_url:
css_link = '<link href="%s" type="text/css" rel="stylesheet" />' % css_url
else:
css_link = ''
print_func(HEADER % {'title': escape(' '.join(sys.argv)),
'css_link': css_link,
})
def output_html(difference, css_url=None, print_func=None, max_page_size=None):
if print_func is None:
print_func = print
if max_page_size is None:
max_page_size = DEFAULT_MAX_PAGE_SIZE
print_func = create_limited_print_func(print_func, max_page_size)
try:
output_header(css_url, print_func)
output_difference(difference, print_func, [])
except PrintLimitReached:
logger.debug('print limit reached')
print_func(u"<div class='error'>Max output size reached.</div>",
force=True)
print_func(FOOTER % {'version': VERSION}, force=True)
|
htomeht/euler
|
refs/heads/master
|
util/primes.py
|
1
|
#! /bin/env python
import math
class Sieve:
def __init__(self, size):
self._sieve = [False] * (size)
def flip(self, n):
self._sieve[n-1] = not self._sieve[n-1]
def reset(self, n):
self._sieve[n-1] = False
def isPrime(self, n):
return self._sieve[n-1]
class Primes:
def __init__(self, limit):
self.limit = limit
self.roof = int(math.ceil(math.sqrt(limit)))
self.primes = [2,3,5]
self.sieve = Sieve(limit)
def genPrimes(self):
for i in range(1,self.roof):
for j in range(1, self.roof):
n = 4 * i**2 + j**2
if ( n <= self.limit and (n % 12 == 1 or n % 12 == 5)):
self.sieve.flip(n)
n = 3 * i**2 + j**2
if (n <= self.limit and ( n % 12 == 7)):
self.sieve.flip(n)
n = 3 * i**2 - j**2
if ((i > j) and (n <= self.limit) and (n % 12 == 11)):
self.sieve.flip(n)
# Mark multiples
for r in range(5, self.roof):
if self.sieve.isPrime(r):
for i in range(r**2, self.limit, r**2):
self.sieve.reset(i)
for a in range(7, self.limit):
if (self.sieve.isPrime(a)):
self.primes.append(a)
def getPrimes(self):
return self.primes
def getPrime(self, n):
return self.primes[n-1]
|
Cinntax/home-assistant
|
refs/heads/dev
|
homeassistant/components/upnp/const.py
|
7
|
"""Constants for the IGD component."""
import logging
CONF_ENABLE_PORT_MAPPING = "port_mapping"
CONF_ENABLE_SENSORS = "sensors"
CONF_HASS = "hass"
CONF_LOCAL_IP = "local_ip"
CONF_PORTS = "ports"
DOMAIN = "upnp"
LOGGER = logging.getLogger(__package__)
SIGNAL_REMOVE_SENSOR = "upnp_remove_sensor"
|
jorik041/weevely3
|
refs/heads/master
|
testsuite/test_shell_sh.py
|
14
|
from testsuite.base_test import BaseTest
from core.weexceptions import ArgparseError
from core.vectors import PhpCode
from core.vectors import Os
from core import modules
from core.sessions import SessionURL
from core import messages
import logging
import os
class SystemInfo(BaseTest):
def setUp(self):
self.session = SessionURL(self.url, self.password, volatile = True)
modules.load_modules(self.session)
self.run_argv = modules.loaded['shell_sh'].run_argv
def _spoil_vectors_but(self, vector_safe_name):
# Spoil all the module sessions but the safe one
for i in range(0, len(modules.loaded['shell_sh'].vectors)):
name = modules.loaded['shell_sh'].vectors[i].name
payload = modules.loaded['shell_sh'].vectors[i].arguments[0]
if name != vector_safe_name:
modules.loaded['shell_sh'].vectors[i] = PhpCode('\'"%s' % payload, name)
def test_run_unless(self):
vector_safe_name = 'proc_open'
self._spoil_vectors_but(vector_safe_name)
# Check correctness of execution
self.assertEqual(self.run_argv(["echo -n 1"]), "1");
# Check stored vector
self.assertEqual(self.session['shell_sh']['stored_args']['vector'], vector_safe_name)
def test_param_vector(self):
vector_safe_name = 'proc_open'
# Check correctness of execution
self.assertEqual(self.run_argv(["-vector", vector_safe_name, "echo -n 1"]), "1");
# Check stored vector
self.assertEqual(self.session['shell_sh']['stored_args']['vector'], vector_safe_name)
def test_vector_one_os(self):
bogus_vector = 'bogus_win'
# Add a bogus Os.WIN vector
modules.loaded['shell_sh'].vectors.append(PhpCode("echo(1);", name=bogus_vector, target=Os.WIN))
# Check if called forced the bogusv vector name, returns Null
self.assertRaises(ArgparseError, self.run_argv, ["-vector", bogus_vector, "echo 1"]);
def test_vector_all_os(self):
bogus_vector = 'bogus_win'
# Add a bogus Os.WIN vector
modules.loaded['shell_sh'].vectors.append(PhpCode("echo(1);", name=bogus_vector, target=Os.WIN))
# Spoil all vectors but bogus_win
self._spoil_vectors_but(bogus_vector)
# Check if looping all vectors still returns None
self.assertIsNone(self.run_argv(["echo 1"]), None);
|
MattRijk/django-ecomsite
|
refs/heads/master
|
lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/__main__.py
|
200
|
'''
support ';python -m charade <file1> [file2] ...' package execution syntax (2.7+)
'''
from charade import charade_cli
charade_cli()
|
chbrun/behavui
|
refs/heads/master
|
behavui/features/config.py
|
1
|
FEATURE_NB_MAX=5
|
yjxtogo/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/data_processing/nodegroup_templates/tables.py
|
5
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.template import defaultfilters as filters
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import tables
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class NodeGroupTemplatesFilterAction(tables.FilterAction):
filter_type = "server"
filter_choices = (('name', _("Name"), True),
('plugin', _("Plugin"), True),
('hadoop_version', _("Version"), True))
class CreateNodegroupTemplate(tables.LinkAction):
name = "create"
verbose_name = _("Create Template")
url = ("horizon:project:data_processing.nodegroup_templates:"
"create-nodegroup-template")
classes = ("ajax-modal", "create-nodegrouptemplate-btn")
icon = "plus"
class ConfigureNodegroupTemplate(tables.LinkAction):
name = "configure"
verbose_name = _("Configure Template")
url = ("horizon:project:data_processing.nodegroup_templates:"
"configure-nodegroup-template")
classes = ("ajax-modal", "configure-nodegrouptemplate-btn")
icon = "plus"
attrs = {"style": "display: none"}
class CopyTemplate(tables.LinkAction):
name = "copy"
verbose_name = _("Copy Template")
url = "horizon:project:data_processing.nodegroup_templates:copy"
classes = ("ajax-modal", )
class EditTemplate(tables.LinkAction):
name = "edit"
verbose_name = _("Edit Template")
url = "horizon:project:data_processing.nodegroup_templates:edit"
classes = ("ajax-modal", )
class DeleteTemplate(tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Template",
u"Delete Templates",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted Template",
u"Deleted Templates",
count
)
def delete(self, request, template_id):
saharaclient.nodegroup_template_delete(request, template_id)
class NodegroupTemplatesTable(tables.DataTable):
name = tables.Column(
"name",
verbose_name=_("Name"),
link="horizon:project:data_processing.nodegroup_templates:details")
plugin_name = tables.Column("plugin_name",
verbose_name=_("Plugin"))
hadoop_version = tables.Column("hadoop_version",
verbose_name=_("Version"))
node_processes = tables.Column("node_processes",
verbose_name=_("Node Processes"),
wrap_list=True,
filters=(filters.unordered_list,))
class Meta(object):
name = "nodegroup_templates"
verbose_name = _("Node Group Templates")
table_actions = (CreateNodegroupTemplate,
ConfigureNodegroupTemplate,
DeleteTemplate,
NodeGroupTemplatesFilterAction,)
row_actions = (EditTemplate,
CopyTemplate,
DeleteTemplate)
|
rafalo1333/kivy
|
refs/heads/master
|
kivy/tests/test_uix_anchorlayout.py
|
21
|
'''
Anchor layout unit test
=======================
'''
from kivy.tests.common import GraphicUnitTest
class UIXAnchorLayoutTestcase(GraphicUnitTest):
def box(self, r, g, b):
from kivy.uix.widget import Widget
from kivy.graphics import Color, Rectangle
wid = Widget(size_hint=(None, None), size=(100, 100))
with wid.canvas:
Color(r, g, b)
r = Rectangle(pos=wid.pos, size=wid.size)
def linksp(instance, *largs):
r.pos = instance.pos
r.size = instance.size
wid.bind(pos=linksp, size=linksp)
return wid
def test_anchorlayout_default(self):
from kivy.uix.anchorlayout import AnchorLayout
r = self.render
b = self.box
layout = AnchorLayout()
layout.add_widget(b(1, 0, 0))
r(layout)
def test_anchorlayout_x(self):
from kivy.uix.anchorlayout import AnchorLayout
r = self.render
b = self.box
layout = AnchorLayout(anchor_x='left')
layout.add_widget(b(1, 0, 0))
r(layout)
layout = AnchorLayout(anchor_x='center')
layout.add_widget(b(1, 0, 0))
r(layout)
layout = AnchorLayout(anchor_x='right')
layout.add_widget(b(1, 0, 0))
r(layout)
def test_anchorlayout_y(self):
from kivy.uix.anchorlayout import AnchorLayout
r = self.render
b = self.box
layout = AnchorLayout(anchor_y='bottom')
layout.add_widget(b(1, 0, 0))
r(layout)
layout = AnchorLayout(anchor_y='center')
layout.add_widget(b(1, 0, 0))
r(layout)
layout = AnchorLayout(anchor_y='top')
layout.add_widget(b(1, 0, 0))
r(layout)
def test_anchor_layout_xy(self):
from kivy.uix.anchorlayout import AnchorLayout
r = self.render
b = self.box
layout = AnchorLayout(anchor_y='bottom', anchor_x='left')
layout.add_widget(b(1, 0, 0))
r(layout)
layout = AnchorLayout(anchor_y='top', anchor_x='right')
layout.add_widget(b(1, 0, 0))
r(layout)
|
GoogleCloudPlatform/solutions-cloud-orchestrate
|
refs/heads/main
|
cli/src/orchestrate/commands/broker/machines/unassign.py
|
1
|
# python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Unassign machines from users in connection broker.
Usage: orchestrate broker machines unassign <DEPLOYMENT> <MACHINE>
"""
import logging
import optparse
from orchestrate import base
from orchestrate.systems.teradici import camapi
log = logging.getLogger(__name__)
class Command(base.OrchestrateCommand):
"""Unassign machines from users in connection broker.
"""
@property
def description(self):
return """
Unassign all users from a given macines in connection broker.
Usage: orchestrate broker machines unassign <DEPLOYMENT> <MACHINE1> [ <MACHINE2>[ ...]]
""".lstrip()
@property
def defaults(self):
"""Returns default option values."""
return dict(
deployment=None,
)
@property
def options(self):
"""Returns command parser options."""
options = [
optparse.Option('--deployment', help=(
'Deployment name. Uses project name by default if not explicitly'
' provided')),
]
return options
def run(self, options, arguments):
"""Executes command.
Args:
options: Command-line options.
arguments: Command-line positional arguments
Returns:
True if successful. False, otherwise.
"""
log.debug('broker machines unassign %(options)s %(arguments)s', dict(
options=options, arguments=arguments))
if len(arguments) < 1:
log.error('Expected at least one machine name.')
return False
machine_names = arguments
deployment_name = options.deployment or options.project
self.unassign(options.project, deployment_name, machine_names)
def unassign(self, project, deployment_name, machine_names):
"""Unassign all users from given machines.
Args:
project: GCP project.
deployment_name: Deployment.
machine_names: Machine names.
Returns:
True if it succeeded. False otherwise.
"""
log.debug('Locating deployment: %s', deployment_name)
cam = camapi.CloudAccessManager(project=project,
scope=camapi.Scope.DEPLOYMENT)
deployment = cam.deployments.get(deployment_name)
# Get machine ids
all_machines = []
for machine_name in machine_names:
log.debug('Locating machine in CAM: %s', machine_name)
machines = cam.machines.get(deployment, machineName=machine_name)
if machines:
machine = machines[0]
log.debug('Found machine %s with ID %s', machine_name,
machine['machineId'])
all_machines.append(machine)
else:
message = (
'Could not locate machine {machine_name}. Check whether it exists'
' and that it was assigned to users. Skipping for now.'
).format(machine_name=machine_name)
log.warning(message)
# Find all entitlements for all machine ids collected and remove them
for machine in all_machines:
log.info(
'Locating entitlements for machine %(machineName)s %(machineId)s',
machine)
entitlements = cam.machines.entitlements.get(
deployment, machineName=machine['machineName'])
for entitlement in entitlements:
log.info('Removing entitlement %(entitlementId)s', entitlement)
cam.machines.entitlements.delete(entitlement)
return True
|
Work4Labs/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/tests/regressiontests/utils/module_loading.py
|
38
|
import os
import sys
import unittest
from zipimport import zipimporter
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
class DefaultLoader(unittest.TestCase):
def test_loader(self):
"Normal module existence can be tested"
test_module = import_module('regressiontests.utils.test_module')
# An importable child
self.assertTrue(module_has_submodule(test_module, 'good_module'))
mod = import_module('regressiontests.utils.test_module.good_module')
self.assertEqual(mod.content, 'Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(test_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'regressiontests.utils.test_module.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(test_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'regressiontests.utils.test_module.no_such_module')
# Don't be confused by caching of import misses
import types # causes attempted import of regressiontests.utils.types
self.assertFalse(module_has_submodule(sys.modules['regressiontests.utils'], 'types'))
class EggLoader(unittest.TestCase):
def setUp(self):
self.old_path = sys.path[:]
self.egg_dir = '%s/eggs' % os.path.dirname(__file__)
def tearDown(self):
sys.path = self.old_path
sys.path_importer_cache.clear()
sys.modules.pop('egg_module.sub1.sub2.bad_module', None)
sys.modules.pop('egg_module.sub1.sub2.good_module', None)
sys.modules.pop('egg_module.sub1.sub2', None)
sys.modules.pop('egg_module.sub1', None)
sys.modules.pop('egg_module.bad_module', None)
sys.modules.pop('egg_module.good_module', None)
sys.modules.pop('egg_module', None)
def test_shallow_loader(self):
"Module existence can be tested inside eggs"
egg_name = '%s/test_egg.egg' % self.egg_dir
sys.path.append(egg_name)
egg_module = import_module('egg_module')
# An importable child
self.assertTrue(module_has_submodule(egg_module, 'good_module'))
mod = import_module('egg_module.good_module')
self.assertEqual(mod.content, 'Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(egg_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'egg_module.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(egg_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'egg_module.no_such_module')
def test_deep_loader(self):
"Modules deep inside an egg can still be tested for existence"
egg_name = '%s/test_egg.egg' % self.egg_dir
sys.path.append(egg_name)
egg_module = import_module('egg_module.sub1.sub2')
# An importable child
self.assertTrue(module_has_submodule(egg_module, 'good_module'))
mod = import_module('egg_module.sub1.sub2.good_module')
self.assertEqual(mod.content, 'Deep Good Module')
# A child that exists, but will generate an import error if loaded
self.assertTrue(module_has_submodule(egg_module, 'bad_module'))
self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.bad_module')
# A child that doesn't exist
self.assertFalse(module_has_submodule(egg_module, 'no_such_module'))
self.assertRaises(ImportError, import_module, 'egg_module.sub1.sub2.no_such_module')
class TestFinder(object):
def __init__(self, *args, **kwargs):
self.importer = zipimporter(*args, **kwargs)
def find_module(self, path):
importer = self.importer.find_module(path)
if importer is None:
return
return TestLoader(importer)
class TestLoader(object):
def __init__(self, importer):
self.importer = importer
def load_module(self, name):
mod = self.importer.load_module(name)
mod.__loader__ = self
return mod
class CustomLoader(EggLoader):
"""The Custom Loader test is exactly the same as the EggLoader, but
it uses a custom defined Loader and Finder that is intentionally
split into two classes. Although the EggLoader combines both functions
into one class, this isn't required.
"""
def setUp(self):
super(CustomLoader, self).setUp()
sys.path_hooks.insert(0, TestFinder)
sys.path_importer_cache.clear()
def tearDown(self):
super(CustomLoader, self).tearDown()
sys.path_hooks.pop(0)
|
cloudrain21/googletest
|
refs/heads/master
|
scripts/fuse_gtest_files.py
|
2577
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
|
zerothi/sids
|
refs/heads/master
|
sisl/io/siesta/fdf.py
|
1
|
import warnings
from datetime import datetime
import numpy as np
import scipy as sp
from os.path import isfile
import itertools as itools
from ..sile import add_sile, get_sile_class, sile_fh_open, sile_raise_write, SileError
from .sile import SileSiesta
from .._help import *
from sisl._internal import set_module
from sisl import constant
from sisl.unit.siesta import units
import sisl._array as _a
from sisl._indices import indices_only
from sisl.utils.ranges import list2str
from sisl.messages import SislError, info, warn
from sisl.utils.mathematics import fnorm
from .binaries import tshsSileSiesta, tsdeSileSiesta
from .binaries import dmSileSiesta, hsxSileSiesta, onlysSileSiesta
from .eig import eigSileSiesta
from .fc import fcSileSiesta
from .fa import faSileSiesta
from .siesta_grid import gridncSileSiesta
from .siesta_nc import ncSileSiesta
from .basis import ionxmlSileSiesta, ionncSileSiesta
from .orb_indx import orbindxSileSiesta
from .xv import xvSileSiesta
from sisl import Geometry, Orbital, Atom, AtomGhost, Atoms, SuperCell, DynamicalMatrix
from sisl.utils.cmd import default_ArgumentParser, default_namespace
from sisl.utils.misc import merge_instances
from sisl.unit.siesta import unit_convert, unit_default, unit_group
__all__ = ['fdfSileSiesta']
_LOGICAL_TRUE = ['.true.', 'true', 'yes', 'y', 't']
_LOGICAL_FALSE = ['.false.', 'false', 'no', 'n', 'f']
_LOGICAL = _LOGICAL_FALSE + _LOGICAL_TRUE
Bohr2Ang = unit_convert('Bohr', 'Ang')
def _listify_str(arg):
if isinstance(arg, str):
return [arg]
return arg
def _track(method, msg):
if method.__self__.track:
info(f"{method.__self__.__class__.__name__}.{method.__name__}: {msg}")
def _track_file(method, f, msg=None):
if msg is None:
if f.is_file():
msg = f"reading file {f}"
else:
msg = f"could not find file {f}"
if method.__self__.track:
info(f"{method.__self__.__class__.__name__}.{method.__name__}: {msg}")
@set_module("sisl.io.siesta")
class fdfSileSiesta(SileSiesta):
""" FDF-input file
By supplying base you can reference files in other directories.
By default the ``base`` is the directory given in the file name.
Parameters
----------
filename: str
fdf file
mode : str, optional
opening mode, default to read-only
base : str, optional
base-directory to read output files from.
Examples
--------
>>> fdf = fdfSileSiesta('tmp/RUN.fdf') # reads output files in 'tmp/' folder
>>> fdf = fdfSileSiesta('tmp/RUN.fdf', base='.') # reads output files in './' folder
"""
def _setup(self, *args, **kwargs):
""" Setup the `fdfSileSiesta` after initialization """
self._comment = ['#', '!', ';']
# List of parent file-handles used while reading
# This is because fdf enables inclusion of other files
self._parent_fh = []
# Public key for printing information about where stuff comes from
self.track = kwargs.get("track", False)
def _pushfile(self, f):
if self.dir_file(f).is_file():
self._parent_fh.append(self.fh)
self.fh = self.dir_file(f).open(self._mode)
else:
warn(str(self) + f' is trying to include file: {f} but the file seems not to exist? Will disregard file!')
def _popfile(self):
if len(self._parent_fh) > 0:
self.fh.close()
self.fh = self._parent_fh.pop()
return True
return False
def _seek(self):
""" Closes all files, and starts over from beginning """
try:
while self._popfile():
pass
self.fh.seek(0)
except:
pass
@sile_fh_open()
def includes(self):
""" Return a list of all files that are *included* or otherwise necessary for reading the fdf file """
self._seek()
# In FDF files, %include marks files that progress
# down in a tree structure
def add(f):
f = self.dir_file(f)
if f not in includes:
includes.append(f)
# List of includes
includes = []
l = self.readline()
while l != '':
ls = l.split()
if '%include' == ls[0].lower():
add(ls[1])
self._pushfile(ls[1])
elif '<' in ls:
# TODO, in principle the < could contain
# include if this line is not a %block.
add(ls[ls.index('<')+1])
l = self.readline()
while l == '':
# last line of file
if self._popfile():
l = self.readline()
else:
break
return includes
@sile_fh_open()
def _read_label(self, label):
""" Try and read the first occurence of a key
This will take care of blocks, labels and piped in labels
Parameters
----------
label : str
label to find in the fdf file
"""
self._seek()
def tolabel(label):
return label.lower().replace('_', '').replace('-', '').replace('.', '')
labell = tolabel(label)
def valid_line(line):
ls = line.strip()
if len(ls) == 0:
return False
return not (ls[0] in self._comment)
def process_line(line):
# Split line by spaces
ls = line.split()
if len(ls) == 0:
return None
# Make a lower equivalent of ls
lsl = list(map(tolabel, ls))
# Check if there is a pipe in the line
if '<' in lsl:
idx = lsl.index('<')
# Now there are two cases
# 1. It is a block, in which case
# the full block is piped into the label
# %block Label < file
if lsl[0] == '%block' and lsl[1] == labell:
# Correct line found
# Read the file content, removing any empty and/or comment lines
lines = self.dir_file(ls[3]).open('r').readlines()
return [l.strip() for l in lines if valid_line(l)]
# 2. There are labels that should be read from a subsequent file
# Label1 Label2 < other.fdf
if labell in lsl[:idx]:
# Valid line, read key from other.fdf
return fdfSileSiesta(self.dir_file(ls[idx+1]), base=self._directory)._read_label(label)
# It is not in this line, either key is
# on the RHS of <, or the key could be "block". Say.
return None
# The last case is if the label is the first word on the line
# In that case we have found what we are looking for
if lsl[0] == labell:
return (' '.join(ls[1:])).strip()
elif lsl[0] == '%block':
if lsl[1] == labell:
# Read in the block content
lines = []
# Now read lines
l = self.readline().strip()
while not tolabel(l).startswith('%endblock'):
if len(l) > 0:
lines.append(l)
l = self.readline().strip()
return lines
elif lsl[0] == '%include':
# We have to open a new file
self._pushfile(ls[1])
return None
# Perform actual reading of line
l = self.readline().split('#')[0]
if len(l) == 0:
return None
l = process_line(l)
while l is None:
l = self.readline().split('#')[0]
if len(l) == 0:
if not self._popfile():
return None
l = process_line(l)
return l
@classmethod
def _type(cls, value):
""" Determine the type by the value
Parameters
----------
value : str or list or numpy.ndarray
the value to check for fdf-type
"""
if value is None:
return None
if isinstance(value, list):
# A block, %block ...
return 'B'
if isinstance(value, np.ndarray):
# A list, Label [...]
return 'a'
# Grab the entire line (beside the key)
values = value.split()
if len(values) == 1:
fdf = values[0].lower()
if fdf in _LOGICAL:
# logical
return 'b'
try:
float(fdf)
if '.' in fdf:
# a real number (otherwise an integer)
return 'r'
return 'i'
except:
pass
# fall-back to name with everything
elif len(values) == 2:
# possibly a physical value
try:
float(values[0])
return 'p'
except:
pass
return 'n'
@sile_fh_open()
def type(self, label):
""" Return the type of the fdf-keyword
Parameters
----------
label : str
the label to look-up
"""
self._seek()
return self._type(self._read_label(label))
@sile_fh_open()
def get(self, label, default=None, unit=None, with_unit=False):
""" Retrieve fdf-keyword from the file
Parameters
----------
label : str
the fdf-label to search for
default : optional
if the label is not found, this will be the returned value (default to ``None``)
unit : str, optional
unit of the physical quantity to return
with_unit : bool, optional
whether the physical quantity gets returned with the found unit in the fdf file.
Returns
-------
value : the value of the fdf-label. If the label is a block, a `list` is returned, for
a real value a `float` (or if the default is of `float`), for an integer, an
`int` is returned.
unit : if `with_unit` is true this will contain the associated unit if it is specified
Examples
--------
>>> print(open(...).readlines())
LabeleV 1. eV
LabelRy 1. Ry
Label name
FakeInt 1
%block Hello
line 1
line2
%endblock
>>> fdf.get('LabeleV') == 1. # default unit is eV
>>> fdf.get('LabelRy') == unit.siesta.unit_convert('Ry', 'eV')
>>> fdf.get('LabelRy', unit='Ry') == 1.
>>> fdf.get('LabelRy', with_unit=True) == (1., 'Ry')
>>> fdf.get('FakeInt', '0') == '1'
>>> fdf.get('LabeleV', with_unit=True) == (1., 'eV')
>>> fdf.get('Label', with_unit=True) == 'name' # no unit present on line
>>> fdf.get('Hello') == ['line 1', 'line2']
"""
# Try and read a line
value = self._read_label(label)
# Simply return the default value if not found
if value is None:
return default
# Figure out what it is
t = self._type(value)
# We will only do something if it is a real, int, or physical.
# Else we simply return, as-is
if t == 'r':
if default is None:
return float(value)
t = type(default)
return t(value)
elif t == 'i':
if default is None:
return int(value)
t = type(default)
return t(value)
elif t == 'p':
value = value.split()
if with_unit:
# Simply return, as is. Let the user do whatever.
return float(value[0]), value[1]
if unit is None:
default = unit_default(unit_group(value[1]))
else:
if unit_group(value[1]) != unit_group(unit):
raise ValueError(f"Requested unit for {label} is not the same type. "
"Found/Requested {value[1]}/{unit}'")
default = unit
return float(value[0]) * unit_convert(value[1], default)
elif t == 'b':
return value.lower() in _LOGICAL_TRUE
return value
def set(self, key, value, keep=True):
""" Add the key and value to the FDF file
Parameters
----------
key : str
the fdf-key value to be set in the fdf file
value : str or list of str
the value of the string. If a `str` is passed a regular
fdf-key is used, if a `list` it will be a %block.
keep : bool, optional
whether old flags will be kept in the fdf file. In this case
a time-stamp will be written to show when the key was overwritten.
"""
# To set a key we first need to figure out if it is
# already present, if so, we will add the new key, just above
# the already present key.
top_file = str(self.file)
# 1. find the old value, and thus the file in which it is found
with self:
try:
self.get(key)
# Get the file of the containing data
top_file = str(self.fh.name)
except:
pass
# Ensure that all files are closed
self._seek()
# Now we should re-read and edit the file
lines = open(top_file, 'r').readlines()
def write(fh, value):
if value is None:
return
if isinstance(value, str):
fh.write(self.print(key, value))
if '\n' not in value:
fh.write('\n')
else:
raise NotImplementedError('Currently blocks are not implemented in set!')
# Now loop, write and edit
do_write = True
lkey = key.lower()
with open(top_file, 'w') as fh:
for line in lines:
if self.line_has_key(line, lkey, case=False) and do_write:
write(fh, value)
if keep:
fh.write('# Old value ({})\n'.format(datetime.today().strftime('%Y-%m-%d %H:%M')))
fh.write(f'{line}')
do_write = False
else:
fh.write(line)
@staticmethod
def print(key, value):
""" Return a string which is pretty-printing the key+value """
if isinstance(value, list):
s = f'%block {key}'
# if the value has any new-values
has_nl = False
for v in value:
if '\n' in v:
has_nl = True
break
if has_nl:
# do not skip to next line in next segment
value[-1].replace('\n', '')
s += '\n{}'.format(''.join(value))
else:
s += '\n{} {}'.format(value[0], '\n'.join(value[1:]))
s += f'%endblock {key}'
else:
s = f'{key} {value}'
return s
@sile_fh_open()
def write_supercell(self, sc, fmt='.8f', *args, **kwargs):
""" Writes the supercell to the contained file """
# Check that we can write to the file
sile_raise_write(self)
fmt_str = ' {{0:{0}}} {{1:{0}}} {{2:{0}}}\n'.format(fmt)
unit = kwargs.get('unit', 'Ang').capitalize()
conv = 1.
if unit in ['Ang', 'Bohr']:
conv = unit_convert('Ang', unit)
else:
unit = 'Ang'
# Write out the cell
self._write(f'LatticeConstant 1.0 {unit}\n')
self._write('%block LatticeVectors\n')
self._write(fmt_str.format(*sc.cell[0, :] * conv))
self._write(fmt_str.format(*sc.cell[1, :] * conv))
self._write(fmt_str.format(*sc.cell[2, :] * conv))
self._write('%endblock LatticeVectors\n')
@sile_fh_open()
def write_geometry(self, geometry, fmt='.8f', *args, **kwargs):
""" Writes the geometry to the contained file """
# Check that we can write to the file
sile_raise_write(self)
self.write_supercell(geometry.sc, fmt, *args, **kwargs)
self._write('\n')
self._write(f'NumberOfAtoms {geometry.na}\n')
unit = kwargs.get('unit', 'Ang').capitalize()
is_fractional = unit in ['Frac', 'Fractional']
if is_fractional:
self._write('AtomicCoordinatesFormat Fractional\n')
else:
conv = unit_convert('Ang', unit)
self._write(f'AtomicCoordinatesFormat {unit}\n')
self._write('%block AtomicCoordinatesAndAtomicSpecies\n')
n_species = len(geometry.atoms.atom)
# Count for the species
if is_fractional:
xyz = geometry.fxyz
else:
xyz = geometry.xyz * conv
if fmt[0] == '.':
# Correct for a "same" length of all coordinates
c_max = len(str((f'{{:{fmt}}}').format(xyz.max())))
c_min = len(str((f'{{:{fmt}}}').format(xyz.min())))
fmt = str(max(c_min, c_max)) + fmt
fmt_str = ' {{3:{0}}} {{4:{0}}} {{5:{0}}} {{0}} # {{1:{1}d}}: {{2}}\n'.format(fmt, len(str(len(geometry))))
for ia, a, isp in geometry.iter_species():
self._write(fmt_str.format(isp + 1, ia + 1, a.tag, *xyz[ia, :]))
self._write('%endblock AtomicCoordinatesAndAtomicSpecies\n\n')
# Write out species
# First swap key and value
self._write(f'NumberOfSpecies {n_species}\n')
self._write('%block ChemicalSpeciesLabel\n')
for i, a in enumerate(geometry.atoms.atom):
if isinstance(a, AtomGhost):
self._write(' {} {} {}\n'.format(i + 1, -a.Z, a.tag))
else:
self._write(' {} {} {}\n'.format(i + 1, a.Z, a.tag))
self._write('%endblock ChemicalSpeciesLabel\n')
_write_block = True
def write_block(atoms, append, write_block):
if write_block:
self._write('\n# Constraints\n%block Geometry.Constraints\n')
write_block = False
self._write(f' atom [{atoms}]{append}\n')
return write_block
for d in range(4):
append = {0: '', 1: ' 1. 0. 0.', 2: ' 0. 1. 0.', 3: ' 0. 0. 1.'}.get(d)
n = 'CONSTRAIN' + {0: '', 1: '-x', 2: '-y', 3: '-z'}.get(d)
if n in geometry.names:
idx = list2str(geometry.names[n] + 1).replace('-', ' -- ')
if len(idx) > 200:
info(f"{str(self)}.write_geometry will not write the constraints for {n} (too long line).")
else:
_write_block = write_block(idx, append, _write_block)
if not _write_block:
self._write('%endblock\n')
@staticmethod
def _SpGeom_replace_geom(spgeom, geometry):
""" Replace all atoms in spgeom with the atom in geometry while retaining the number of orbitals
Currently we need some way of figuring out whether the number of atoms and orbitals are
consistent.
Parameters
----------
spgeom : SparseGeometry
the sparse object with attached geometry
geometry : Geometry
geometry to grab atoms from
full_replace : bool, optional
whether the full geometry may be replaced in case ``spgeom.na != geometry.na && spgeom.no == geometry.no``.
This is required when `spgeom` does not contain information about atoms.
"""
if spgeom.na != geometry.na and spgeom.no == geometry.no:
# In this case we cannot compare individiual atoms # of orbitals.
# I.e. we suspect the incoming geometry to be correct.
spgeom._geometry = geometry
return True
elif spgeom.na != geometry.na:
warn('cannot replace geometry due to insufficient information regarding number of '
'atoms and orbitals, ensuring correct geometry failed...')
no_no = spgeom.no == geometry.no
# Loop and make sure the number of orbitals is consistent
for a, idx in geometry.atoms.iter(True):
if len(idx) == 0:
continue
Sa = spgeom.geometry.atoms[idx[0]]
if Sa.no != a.no:
# Make sure the atom we replace with retains the same information
# *except* the number of orbitals.
a = a.__class__(a.Z, Sa.orbital, mass=a.mass, tag=a.tag)
spgeom.geometry.atoms.replace(idx, a)
spgeom.geometry.reduce()
return no_no
def read_supercell_nsc(self, *args, **kwargs):
""" Read supercell size using any method available
Raises
------
SislWarning if none of the files can be read
"""
order = _listify_str(kwargs.pop('order', ['nc', 'ORB_INDX']))
for f in order:
v = getattr(self, '_r_supercell_nsc_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
_track(self.read_supercell_nsc, f"found file {f}")
return v
warn('number of supercells could not be read from output files. Assuming molecule cell '
'(no supercell connections)')
return _a.onesi(3)
def _r_supercell_nsc_nc(self, *args, **kwargs):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_supercell_nsc_nc, f)
if f.is_file():
return ncSileSiesta(f).read_supercell_nsc()
return None
def _r_supercell_nsc_orb_indx(self, *args, **kwargs):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.ORB_INDX')
_track_file(self._r_supercell_nsc_orb_indx, f)
if f.is_file():
return orbindxSileSiesta(f).read_supercell_nsc()
return None
def read_supercell(self, output=False, *args, **kwargs):
""" Returns SuperCell object by reading fdf or Siesta output related files.
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
output: bool, optional
whether to read supercell from output files (default to read from
the fdf file).
order: list of str, optional
the order of which to try and read the supercell.
By default this is ``['XV', 'nc', 'fdf']`` if `output` is true.
If `order` is present `output` is disregarded.
Examples
--------
>>> fdf = get_sile('RUN.fdf')
>>> fdf.read_supercell() # read from fdf
>>> fdf.read_supercell(True) # read from [XV, nc, fdf]
>>> fdf.read_supercell(order=['nc']) # read from [nc]
>>> fdf.read_supercell(True, order=['nc']) # read from [nc]
"""
if output:
order = _listify_str(kwargs.pop('order', ['XV', 'nc', 'fdf']))
else:
order = _listify_str(kwargs.pop('order', ['fdf']))
for f in order:
v = getattr(self, '_r_supercell_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
_track(self.read_supercell, f"found file {f}")
return v
return None
def _r_supercell_fdf(self, *args, **kwargs):
""" Returns `SuperCell` object from the FDF file """
s = self.get('LatticeConstant', unit='Ang')
if s is None:
raise SileError('Could not find LatticeConstant in file')
# Read in cell
cell = _a.emptyd([3, 3])
lc = self.get('LatticeVectors')
if lc:
for i in range(3):
cell[i, :] = [float(k) for k in lc[i].split()[:3]]
else:
lc = self.get('LatticeParameters')
if lc:
tmp = [float(k) for k in lc[0].split()[:6]]
cell = SuperCell.tocell(*tmp)
if lc is None:
# the fdf file contains neither the latticevectors or parameters
raise SileError('Could not find LatticeVectors or LatticeParameters block in file')
cell *= s
# When reading from the fdf, the warning should be suppressed
with warnings.catch_warnings():
warnings.simplefilter("ignore")
nsc = self.read_supercell_nsc()
return SuperCell(cell, nsc=nsc)
def _r_supercell_nc(self):
# Read supercell from <>.nc file
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_supercell_nc, f)
if f.is_file():
return ncSileSiesta(f).read_supercell()
return None
def _r_supercell_xv(self, *args, **kwargs):
""" Returns `SuperCell` object from the FDF file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.XV')
_track_file(self._r_supercell_xv, f)
if f.is_file():
nsc = self.read_supercell_nsc()
sc = xvSileSiesta(f).read_supercell()
sc.set_nsc(nsc)
return sc
return None
def _r_supercell_tshs(self, *args, **kwargs):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS')
_track_file(self._r_supercell_tshs, f)
if f.is_file():
return tshsSileSiesta(f).read_supercell()
return None
def _r_supercell_onlys(self, *args, **kwargs):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.onlyS')
_track_file(self._r_supercell_onlys, f)
if f.is_file():
return onlysSileSiesta(f).read_supercell()
return None
def read_force(self, *args, **kwargs):
""" Read forces from the output of the calculation (forces are not defined in the input)
Parameters
----------
order : list of str, optional
the order of the forces we are trying to read, default to ``['FA', 'nc']``
Returns
-------
(*, 3) : vector with forces for each of the atoms
"""
order = _listify_str(kwargs.pop('order', ['FA', 'nc']))
for f in order:
v = getattr(self, '_r_force_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
if self.track:
info(f"{self.file}(read_force) found in file={f}")
return v
return None
def _r_force_fa(self, *args, **kwargs):
""" Read forces from the FA file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.FA')
if f.is_file():
return faSileSiesta(f).read_force()
return None
def _r_force_fac(self, *args, **kwargs):
""" Read forces from the FAC file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.FAC')
if f.is_file():
return faSileSiesta(f).read_force()
return None
def _r_force_nc(self, *args, **kwargs):
""" Read forces from the nc file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
if f.is_file():
return ncSileSiesta(f).read_force()
return None
def read_force_constant(self, *args, **kwargs):
""" Read force constant from the output of the calculation
Returns
-------
force_constant : numpy.ndarray
vector [*, 3, 2, *, 3] with force constant element for each of the atomic displacements
"""
order = _listify_str(kwargs.pop('order', ['nc', 'FC']))
for f in order:
v = getattr(self, '_r_force_constant_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
if self.track:
info(f"{self.file}(read_force_constant) found in file={f}")
return v
return None
def _r_force_constant_nc(self, *args, **kwargs):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
if f.is_file():
if not 'FC' in ncSileSiesta(f).groups:
return None
fc = ncSileSiesta(f).read_force_constant()
return fc
return None
def _r_force_constant_fc(self, *args, **kwargs):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.FC')
if f.is_file():
na = self.get('NumberOfAtoms', default=None)
return fcSileSiesta(f).read_force_constant(na=na)
return None
def read_fermi_level(self, *args, **kwargs):
""" Read fermi-level from output of the calculation
Parameters
----------
order: list of str, optional
the order of which to try and read the fermi-level.
By default this is ``['nc', 'TSDE', 'TSHS', 'EIG']``.
Returns
-------
Ef : float
fermi-level
"""
order = _listify_str(kwargs.pop('order', ['nc', 'TSDE', 'TSHS', 'EIG']))
for f in order:
v = getattr(self, '_r_fermi_level_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
if self.track:
info(f"{self.file}(read_fermi_level) found in file={f}")
return v
return None
def _r_fermi_level_nc(self):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
if isfile(f):
return ncSileSiesta(f).read_fermi_level()
return None
def _r_fermi_level_tsde(self):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSDE')
if isfile(f):
return tsdeSileSiesta(f).read_fermi_level()
return None
def _r_fermi_level_tshs(self):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS')
if isfile(f):
return tshsSileSiesta(f).read_fermi_level()
return None
def _r_fermi_level_eig(self):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.EIG')
if isfile(f):
return eigSileSiesta(f).read_fermi_level()
return None
def read_dynamical_matrix(self, *args, **kwargs):
""" Read dynamical matrix from output of the calculation
Generally the mass is stored in the basis information output,
but for dynamical matrices it makes sense to let the user control this,
e.g. through the fdf file.
By default the mass will be read from the AtomicMass key in the fdf file
and _not_ from the basis set information.
Parameters
----------
order: list of str, optional
the order of which to try and read the dynamical matrix.
By default this is ``['nc', 'FC']``.
cutoff_dist : float, optional
cutoff value for the distance of the force-constants (everything farther than
`cutoff_dist` will be set to 0 Ang). Default, no cutoff.
cutoff : float, optional
absolute values below the cutoff are considered 0. Defaults to 0. eV/Ang**2.
trans_inv : bool, optional
if true (default), the force-constant matrix will be fixed so that translational
invariance will be enforced
sum0 : bool, optional
if true (default), the sum of forces on atoms for each displacement will be
forced to 0.
hermitian: bool, optional
if true (default), the returned dynamical matrix will be hermitian
Returns
-------
dynamic_matrix : DynamicalMatrix
the dynamical matrix
"""
order = _listify_str(kwargs.pop('order', ['nc', 'FC']))
for f in order:
v = getattr(self, '_r_dynamical_matrix_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
if self.track:
info(f"{self.file}(read_dynamical_matrix) found in file={f}")
return v
return None
def _r_dynamical_matrix_fc(self, *args, **kwargs):
FC = self.read_force_constant(*args, order="FC", **kwargs)
if FC is None:
return None
geom = self.read_geometry()
basis_fdf = self.read_basis(order="fdf")
for i, atom in enumerate(basis_fdf):
geom.atoms.replace(i, atom)
# Get list of FC atoms
FC_atoms = _a.arangei(self.get('MD.FCFirst', default=1) - 1, self.get('MD.FCLast', default=geom.na))
return self._dynamical_matrix_from_fc(geom, FC, FC_atoms, *args, **kwargs)
def _r_dynamical_matrix_nc(self, *args, **kwargs):
FC = self.read_force_constant(*args, order=['nc'], **kwargs)
if FC is None:
return None
geom = self.read_geometry(order=['nc'])
basis_fdf = self.read_basis(order="fdf")
for i, atom in enumerate(basis_fdf):
geom.atoms.replace(i, atom)
# Get list of FC atoms
# TODO change to read in from the NetCDF file
FC_atoms = _a.arangei(self.get('MD.FCFirst', default=1) - 1, self.get('MD.FCLast', default=geom.na))
return self._dynamical_matrix_from_fc(geom, FC, FC_atoms, *args, **kwargs)
def _dynamical_matrix_from_fc(self, geom, FC, FC_atoms, *args, **kwargs):
# We have the force constant matrix.
# Now handle it...
# FC(OLD) = (n_displ, 3, 2, na, 3)
# FC(NEW) = (n_displ, 3, na, 3)
# In fact, after averaging this becomes the Hessian
FC = FC.sum(axis=2) * 0.5
hermitian = kwargs.get("hermitian", True)
# Figure out the "original" periodic directions
periodic = geom.nsc > 1
# Create conversion from eV/Ang^2 to correct units
# Further down we are multiplying with [1 / amu]
scale = constant.hbar / units('Ang', 'm') / units('eV amu', 'J kg') ** 0.5
# Cut-off too small values
fc_cut = kwargs.get('cutoff', 0.)
FC = np.where(np.abs(FC) > fc_cut, FC, 0.)
# Convert the force constant such that a diagonalization returns eV ^ 2
# FC is in [eV / Ang^2]
# Convert the geometry to contain 3 orbitals per atom (x, y, z)
R = kwargs.get('cutoff_dist', -2.)
orbs = [Orbital(R / 2, tag=tag) for tag in 'xyz']
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for atom, _ in geom.atoms.iter(True):
new_atom = atom.__class__(atom.Z, orbs, mass=atom.mass, tag=atom.tag)
geom.atoms.replace(atom, new_atom)
# Figure out the supercell indices
supercell = kwargs.get('supercell', [1, 1, 1])
if supercell is False:
supercell = [1] * 3
elif supercell is True:
_, supercell = geom.as_primary(FC.shape[0], ret_super=True)
info("{}.read_dynamical_matrix(FC) guessed on a [{}, {}, {}] "
"supercell calculation.".format(str(self), *supercell))
# Convert to integer array
supercell = _a.asarrayi(supercell)
# Reshape to supercell
FC.shape = (FC.shape[0], 3, *supercell, -1, 3)
na_fc = len(FC_atoms)
assert FC.shape[0] == len(FC_atoms)
assert FC.shape[5] == len(geom) // np.prod(supercell)
# Now we are in a problem since the tiling of the geometry
# is not necessarily in x, y, z order.
# Say for users who did:
# geom.tile(*, 2).tile(*, 1).tile(*, 0).write(...)
# then we need to pivot the data to be consistent with the
# supercell information
if np.any(supercell > 1):
# Re-arange FC before we use _fc_correct
# Now we need to figure out how the atoms are laid out.
# It *MUST* either be repeated or tiled (preferentially tiled).
# We have an actual supercell. Lets try and fix it.
# First lets recreate the smallest geometry
sc = geom.sc.cell.copy()
sc[0, :] /= supercell[0]
sc[1, :] /= supercell[1]
sc[2, :] /= supercell[2]
# Ensure nsc is at least an odd number, later down we will symmetrize the FC matrix
nsc = supercell + (supercell + 1) % 2
if R > 0:
# Correct for the optional radius
sc_norm = fnorm(sc)
# R is already "twice" the "orbital" range
nsc_R = 1 + 2 * np.ceil(R / sc_norm).astype(np.int32)
for i in range(3):
nsc[i] = min(nsc[i], nsc_R[i])
del nsc_R
# Construct the minimal unit-cell geometry
sc = SuperCell(sc, nsc=nsc)
# TODO check that the coordinates are in the cell
geom_small = Geometry(geom.xyz[FC_atoms], geom.atoms[FC_atoms], sc)
# Convert the big geometry's coordinates to fractional coordinates of the small unit-cell.
isc_xyz = (geom.xyz.dot(geom_small.sc.icell.T) -
np.tile(geom_small.fxyz, (np.product(supercell), 1)))
axis_tiling = []
offset = len(geom_small)
for _ in (supercell > 1).nonzero()[0]:
first_isc = (np.around(isc_xyz[FC_atoms + offset, :]) == 1.).sum(0)
axis_tiling.append(np.argmax(first_isc))
# Fix the offset
offset *= supercell[axis_tiling[-1]]
while len(axis_tiling) < 3:
for i in range(3):
if not i in axis_tiling:
axis_tiling.append(i)
# Now we have the tiling operation, check it sort of matches
geom_tile = geom_small.copy()
for axis in axis_tiling:
geom_tile = geom_tile.tile(supercell[axis], axis)
# Proximity check of 0.01 Ang (TODO add this as an argument)
if not np.allclose(geom_tile.xyz, geom.xyz, rtol=0, atol=0.01):
raise SislError(f"{str(self)}.read_dynamical_matrix(FC) could "
"not figure out the tiling method for the supercell")
# Convert the FC matrix to a "rollable" matrix
# This will make it easier to symmetrize
# 0. displaced atoms
# 1. x, y, z (displacements)
# 2. tile-axis_tiling[0]
# 3. tile-axis_tiling[1]
# 4. tile-axis_tiling[2]
# 5. na
# 6. x, y, z (force components)
FC.shape = (na_fc, 3, *supercell[axis_tiling], na_fc, 3)
# Now swap the [2, 3, 4] dimensions so that we get in order of lattice vectors
swap = np.array([2, 3, 4])[axis_tiling]
swap = (0, 1, *swap, 5, 6)
FC = np.transpose(FC, swap)
del axis_tiling
# Now FC is sorted according to the supercell tiling
# TODO this will probably fail if: FC_atoms.size != FC.shape[5]
from ._help import _fc_correct
FC = _fc_correct(FC, trans_inv=kwargs.get("trans_inv", True),
sum0=kwargs.get("sum0", True),
hermitian=hermitian)
# Remove ghost-atoms or atoms with 0 mass!
# TODO check if ghost-atoms should be taken into account in _fc_correct
idx = (geom.atoms.mass == 0.).nonzero()[0]
if len(idx) > 0:
FC = np.delete(FC, idx, axis=5)
geom = geom.remove(idx)
geom.set_nsc([1] * 3)
raise NotImplementedError(f"{self}.read_dynamical_matrix could not reduce geometry "
"since there are atoms with 0 mass.")
# Now we can build the dynamical matrix (it will always be real)
na = len(geom)
if np.all(supercell <= 1):
# also catches supercell == 0
D = sp.sparse.lil_matrix((geom.no, geom.no), dtype=np.float64)
FC = np.squeeze(FC, axis=(2, 3, 4))
# Instead of doing the sqrt in all D = FC (below) we do it here
m = scale / geom.atoms.mass ** 0.5
FC *= m[FC_atoms].reshape(-1, 1, 1, 1) * m.reshape(1, 1, -1, 1)
j_FC_atoms = FC_atoms
idx = _a.arangei(len(FC_atoms))
for ia, fia in enumerate(FC_atoms):
if R > 0:
# find distances between the other atoms to cut-off the distance
idx = geom.close(fia, R=R, atoms=FC_atoms)
idx = indices_only(FC_atoms, idx)
j_FC_atoms = FC_atoms[idx]
for ja, fja in zip(idx, j_FC_atoms):
D[ia*3:(ia+1)*3, ja*3:(ja+1)*3] = FC[ia, :, fja, :]
else:
geom = geom_small
if np.any(np.diff(FC_atoms) != 1):
raise SislError(f"{self}.read_dynamical_matrix(FC) requires the FC atoms to be consecutive!")
# Re-order FC matrix so the FC-atoms are first
if FC.shape[0] != FC.shape[5]:
ordered = _a.arangei(FC.shape[5])
ordered = np.concatenate(FC_atoms, np.delete(ordered, FC_atoms))
FC = FC[:, :, :, :, :, ordered, :]
FC_atoms = _a.arangei(len(FC_atoms))
if FC_atoms[0] != 0:
# TODO we could roll the axis such that the displaced atoms moves into the
# first elements
raise SislError(f"{self}.read_dynamical_matrix(FC) requires the displaced atoms to start from 1!")
# After having done this we can easily mass scale all FC components
m = scale / geom.atoms.mass ** 0.5
FC *= m.reshape(-1, 1, 1, 1, 1, 1, 1) * m.reshape(1, 1, 1, 1, 1, -1, 1)
# Check whether we need to "halve" the equivalent supercell
# This will be present in calculations done on an even number of supercells.
# I.e. for 4 supercells
# [0] [1] [2] [3]
# where in the supercell approach:
# *[2] [3] [0] [1] *[2]
# I.e. since we are double counting [2] we will halve it.
# This is not *exactly* true because depending on the range one should do the symmetry operations.
# However the FC does not contain such symmetry considerations.
for i in range(3):
if supercell[i] % 2 == 1:
# We don't need to do anything
continue
# Figure out the supercell to halve
halve_idx = supercell[i] // 2
if i == 0:
FC[:, :, halve_idx, :, :, :, :] *= 0.5
elif i == 1:
FC[:, :, :, halve_idx, :, :, :] *= 0.5
else:
FC[:, :, :, :, halve_idx, :, :] *= 0.5
# Now create the dynamical matrix
# Currently this will be in lil_matrix (changed in the end)
D = sp.sparse.lil_matrix((geom.no, geom.no_s), dtype=np.float64)
# When x, y, z are negative we simply look-up from the back of the array
# which is exactly what is required
isc_off = geom.sc.isc_off
nxyz, na = geom.no, geom.na
dist = geom.rij
# Now take all positive supercell connections (including inner cell)
nsc = geom.nsc // 2
list_nsc = [range(-x, x + 1) for x in nsc]
iter_FC_atoms = _a.arangei(len(FC_atoms))
iter_j_FC_atoms = iter_FC_atoms
for x, y, z in itools.product(*list_nsc):
isc = isc_off[x, y, z]
aoff = isc * na
joff = isc * nxyz
for ia in iter_FC_atoms:
# Reduce second loop based on radius cutoff
if R > 0:
iter_j_FC_atoms = iter_FC_atoms[dist(ia, aoff + iter_FC_atoms) <= R]
for ja in iter_j_FC_atoms:
D[ia*3:(ia+1)*3, joff+ja*3:joff+(ja+1)*3] = FC[ia, :, x, y, z, ja, :]
D = D.tocsr()
# Remove all zeros
D.eliminate_zeros()
D = DynamicalMatrix.fromsp(geom, D)
if hermitian:
D.finalize()
D = (D + D.transpose()) * 0.5
return D
def read_geometry(self, output=False, *args, **kwargs):
""" Returns Geometry object by reading fdf or Siesta output related files.
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
output: bool, optional
whether to read geometry from output files (default to read from
the fdf file).
order: list of str, optional
the order of which to try and read the geometry.
By default this is ``['XV', 'nc', 'fdf', 'TSHS']`` if `output` is true
If `order` is present `output` is disregarded.
Examples
--------
>>> fdf = get_sile('RUN.fdf')
>>> fdf.read_geometry() # read from fdf
>>> fdf.read_geometry(True) # read from [XV, nc, fdf]
>>> fdf.read_geometry(order=['nc']) # read from [nc]
>>> fdf.read_geometry(True, order=['nc']) # read from [nc]
"""
##
# NOTE
# When adding more capabilities please check the read_geometry(True, order=...) in this
# code to correct.
##
if output:
order = _listify_str(kwargs.pop('order', ['XV', 'nc', 'fdf', 'TSHS']))
else:
order = _listify_str(kwargs.pop('order', ['fdf']))
for f in order:
v = getattr(self, '_r_geometry_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
if self.track:
info(f"{self.file}(read_geometry) found in file={f}")
return v
return None
def _r_geometry_xv(self, *args, **kwargs):
""" Returns `SuperCell` object from the FDF file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.XV')
geom = None
if f.is_file():
basis = self.read_basis()
if basis is None:
geom = xvSileSiesta(f).read_geometry()
else:
geom = xvSileSiesta(f).read_geometry(species_Z=True)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for atom, _ in geom.atoms.iter(True):
geom.atoms.replace(atom, basis[atom.Z-1])
geom.reduce()
nsc = self.read_supercell_nsc()
geom.set_nsc(nsc)
return geom
def _r_geometry_nc(self):
# Read geometry from <>.nc file
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
if f.is_file():
return ncSileSiesta(f).read_geometry()
return None
def _r_geometry_tshs(self):
# Read geometry from <>.TSHS file
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS')
if f.is_file():
# Default to a geometry with the correct atomic numbers etc.
return tshsSileSiesta(f).read_geometry(geometry=self.read_geometry(False))
return None
def _r_geometry_fdf(self, *args, **kwargs):
""" Returns Geometry object from the FDF file
NOTE: Interaction range of the Atoms are currently not read.
"""
sc = self.read_supercell(order='fdf')
# No fractional coordinates
is_frac = False
# Read atom scaling
lc = self.get('AtomicCoordinatesFormat', default='Bohr').lower()
if 'ang' in lc or 'notscaledcartesianang' in lc:
s = 1.
elif 'bohr' in lc or 'notscaledcartesianbohr' in lc:
s = Bohr2Ang
elif 'scaledcartesian' in lc:
# the same scaling as the lattice-vectors
s = self.get('LatticeConstant', unit='Ang')
elif 'fractional' in lc or 'scaledbylatticevectors' in lc:
# no scaling of coordinates as that is entirely
# done by the latticevectors
s = 1.
is_frac = True
# If the user requests a shifted geometry
# we correct for this
origo = _a.zerosd([3])
lor = self.get('AtomicCoordinatesOrigin')
if lor:
if kwargs.get('origin', True):
if isinstance(lor, str):
origo = lor.lower()
else:
origo = _a.asarrayd(list(map(float, lor[0].split()[:3]))) * s
# Origo cannot be interpreted with fractional coordinates
# hence, it is not transformed.
# Read atom block
atms = self.get('AtomicCoordinatesAndAtomicSpecies')
if atms is None:
raise SileError('AtomicCoordinatesAndAtomicSpecies block could not be found')
# Read number of atoms and block
# We default to the number of elements in the
# AtomicCoordinatesAndAtomicSpecies block
na = self.get('NumberOfAtoms', default=len(atms))
# Reduce space if number of atoms specified
if na < len(atms):
# align number of atoms and atms array
atms = atms[:na]
elif na > len(atms):
raise SileError('NumberOfAtoms is larger than the atoms defined in the blocks')
elif na == 0:
raise SileError('NumberOfAtoms has been determined to be zero, no atoms.')
# Create array
xyz = _a.emptyd([na, 3])
species = _a.emptyi([na])
for ia in range(na):
l = atms[ia].split()
xyz[ia, :] = [float(k) for k in l[:3]]
species[ia] = int(l[3]) - 1
if is_frac:
xyz = np.dot(xyz, sc.cell)
xyz *= s
# Read the block (not strictly needed, if so we simply set all atoms to H)
atoms = self.read_basis()
if atoms is None:
warn(SileWarning('Block ChemicalSpeciesLabel does not exist, cannot determine the basis (all Hydrogen).'))
# Default atom (hydrogen)
atoms = Atom(1)
else:
atoms = [atoms[i] for i in species]
atoms = Atoms(atoms, na=len(xyz))
if isinstance(origo, str):
opt = origo
if opt.startswith('cop'):
origo = sc.cell.sum(0) * 0.5 - np.average(xyz, 0)
elif opt.startswith('com'):
# TODO for ghost atoms its mass should not be used
w = atom.mass
w /= w.sum()
origo = sc.cell.sum(0) * 0.5 - np.average(xyz, 0, weights=w)
elif opt.startswith('min'):
origo = - np.amin(xyz, 0)
if len(opt) > 4:
opt = opt[4:]
if opt == 'x':
origo[1:] = 0.
elif opt == 'y':
origo[[0, 2]] = 0.
elif opt == 'z':
origo[:2] = 0.
elif opt == 'xy' or opt == 'yx':
origo[2] = 0.
elif opt == 'xz' or opt == 'zx':
origo[1] = 0.
elif opt == 'yz' or opt == 'zy':
origo[0] = 0.
xyz += origo
# Create and return geometry object
return Geometry(xyz, atoms, sc=sc)
def read_grid(self, name, *args, **kwargs):
""" Read grid related information from any of the output files
The order of the readed data is shown below.
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
name : str
name of data to read. The list of names correspond to the
Siesta output manual (Rho, TotalPotential, etc.), the strings are
case insensitive.
order: list of str, optional
the order of which to try and read the geometry.
By default this is ``['nc', 'grid.nc', 'bin']`` (bin refers to the binary files)
"""
order = _listify_str(kwargs.pop('order', ['nc', 'grid.nc', 'bin']))
for f in order:
v = getattr(self, '_r_grid_{}'.format(f.lower().replace('.', '_')))(name, *args, **kwargs)
if v is not None:
if self.track:
info(f"{self.file}(read_grid) found in file={f}")
return v
return None
def _r_grid_nc(self, name, *args, **kwargs):
# Read grid from the <>.nc file
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
if f.is_file():
# Capitalize correctly
name = {'rho': 'Rho',
'rhoinit': 'RhoInit',
'vna': 'Vna',
'ioch': 'Chlocal',
'chlocal': 'Chlocal',
'toch': 'RhoTot',
'totalcharge': 'RhoTot',
'rhotot': 'RhoTot',
'drho': 'RhoDelta',
'deltarho': 'RhoDelta',
'rhodelta': 'RhoDelta',
'vh': 'Vh',
'electrostaticpotential': 'Vh',
'rhoxc': 'RhoXC',
'vt': 'Vt',
'totalpotential': 'Vt',
'bader': 'RhoBader',
'baderrho': 'RhoBader',
'rhobader': 'RhoBader'
}.get(name.lower())
return ncSileSiesta(f).read_grid(name, **kwargs)
return None
def _r_grid_grid_nc(self, name, *args, **kwargs):
# Read grid from the <>.nc file
name = {'rho': 'Rho',
'rhoinit': 'RhoInit',
'vna': 'Vna',
'ioch': 'Chlocal',
'chlocal': 'Chlocal',
'toch': 'TotalCharge',
'totalcharge': 'TotalCharge',
'rhotot': 'TotalCharge',
'drho': 'DeltaRho',
'deltarho': 'DeltaRho',
'rhodelta': 'DeltaRho',
'vh': 'ElectrostaticPotential',
'electrostaticpotential': 'ElectrostaticPotential',
'rhoxc': 'RhoXC',
'vt': 'TotalPotential',
'totalpotential': 'TotalPotential',
'bader': 'BaderCharge',
'baderrho': 'BaderCharge',
'rhobader': 'BaderCharge'
}.get(name.lower()) + '.grid.nc'
f = self.dir_file(name)
if f.is_file():
grid = gridncSileSiesta(f).read_grid(*args, **kwargs)
grid.set_geometry(self.read_geometry(True))
return grid
return None
def _r_grid_bin(self, name, *args, **kwargs):
# Read grid from the <>.VT/... file
name = {'rho': '.RHO',
'rhoinit': '.RHOINIT',
'vna': '.VNA',
'ioch': '.IOCH',
'chlocal': '.IOCH',
'toch': '.TOCH',
'totalcharge': '.TOCH',
'rhotot': '.TOCH',
'drho': '.DRHO',
'deltarho': '.DRHO',
'rhodelta': '.DRHO',
'vh': '.VH',
'electrostaticpotential': '.VH',
'rhoxc': '.RHOXC',
'vt': '.VT',
'totalpotential': '.VT',
'bader': '.BADER',
'baderrho': '.BADER',
'rhobader': '.BADER'
}.get(name.lower())
f = self.dir_file(self.get('SystemLabel', default='siesta') + name)
if f.is_file():
grid = get_sile_class(f)(f).read_grid(*args, **kwargs)
grid.set_geometry(self.read_geometry(True))
return grid
return None
def read_basis(self, *args, **kwargs):
""" Read the atomic species and figure out the number of atomic orbitals in their basis
The order of the read is shown below.
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
order: list of str, optional
the order of which to try and read the basis information.
By default this is ``['nc', 'ion', 'ORB_INDX', 'fdf']``
"""
order = _listify_str(kwargs.pop('order', ['nc', 'ion', 'ORB_INDX', 'fdf']))
for f in order:
v = getattr(self, '_r_basis_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
if self.track:
info(f"{self.file}(read_basis) found in file={f}")
return v
return None
def _r_basis_nc(self):
# Read basis from <>.nc file
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
if f.is_file():
return ncSileSiesta(f).read_basis()
return None
def _r_basis_ion(self):
# Read basis from <>.ion.nc file or <>.ion.xml
spcs = self.get('ChemicalSpeciesLabel')
if spcs is None:
# We haven't found the chemical and species label
# so return nothing
return None
# Now spcs contains the block of the chemicalspecieslabel
atoms = [None] * len(spcs)
found_one = False
found_all = True
for spc in spcs:
idx, Z, lbl = spc.split()[:3]
idx = int(idx) - 1 # F-indexing
Z = int(Z)
lbl = lbl.strip()
f = self.dir_file(lbl + ".ext")
# now try and read the basis
if f.with_suffix('.ion.nc').is_file():
atoms[idx] = ionncSileSiesta(f.with_suffix('.ion.nc')).read_basis()
found_one = True
elif f.with_suffix('.ion.xml').is_file():
atoms[idx] = ionxmlSileSiesta(f.with_suffix('.ion.xml')).read_basis()
found_one = True
else:
# default the atom to not have a range, and no associated orbitals
atoms[idx] = Atom(Z=Z, tag=lbl)
found_all = False
if found_one and not found_all:
warn("Siesta basis information could not read all ion.nc/ion.xml files. "
"Only a subset of the basis information is accessible.")
elif not found_one:
return None
return atoms
def _r_basis_orb_indx(self):
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.ORB_INDX')
if f.is_file():
info(f"Siesta basis information is read from {f}, the radial functions are not accessible.")
return orbindxSileSiesta(f).read_basis(atoms=self._r_basis_fdf())
return None
def _r_basis_fdf(self):
# Read basis from fdf file
spcs = self.get('ChemicalSpeciesLabel')
if spcs is None:
# We haven't found the chemical and species label
# so return nothing
return None
all_mass = self.get('AtomicMass', default=[])
# default mass
mass = None
# Now spcs contains the block of the chemicalspecieslabel
atoms = [None] * len(spcs)
for spc in spcs:
idx, Z, lbl = spc.split()[:3]
idx = int(idx) - 1 # F-indexing
Z = int(Z)
lbl = lbl.strip()
if len(all_mass) > 0:
for mass_line in all_mass:
s, mass = mass_line.split()
if int(s) - 1 == idx:
mass = float(mass)
break
else:
mass = None
atoms[idx] = Atom(Z=Z, mass=mass, tag=lbl)
return atoms
def _r_add_overlap(self, parent_call, M):
""" Internal routine to ensure that the overlap matrix is read and added to the matrix `M` """
try:
S = self.read_overlap()
# Check for the same sparsity pattern
if np.all(M._csr.col == S._csr.col):
M._csr._D[:, -1] = S._csr._D[:, 0]
else:
raise ValueError
except:
warn(str(self) + f' could not succesfully read the overlap matrix in {parent_call}.')
def read_density_matrix(self, *args, **kwargs):
""" Try and read density matrix by reading the <>.nc, <>.TSDE files, <>.DM (in that order)
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
order: list of str, optional
the order of which to try and read the density matrix
By default this is ``['nc', 'TSDE', 'DM']``.
"""
order = _listify_str(kwargs.pop('order', ['nc', 'TSDE', 'DM']))
for f in order:
DM = getattr(self, '_r_density_matrix_{}'.format(f.lower()))(*args, **kwargs)
if DM is not None:
_track(self.read_density_matrix, f"found file {f}")
return DM
return None
def _r_density_matrix_nc(self, *args, **kwargs):
""" Try and read the density matrix by reading the <>.nc """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_density_matrix_nc, f)
DM = None
if f.is_file():
# this *should* also contain the overlap matrix
DM = ncSileSiesta(f).read_density_matrix(*args, **kwargs)
return DM
def _r_density_matrix_tsde(self, *args, **kwargs):
""" Read density matrix from the TSDE file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSDE')
_track_file(self._r_density_matrix_tsde, f)
DM = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf'])
DM = tsdeSileSiesta(f).read_density_matrix(*args, **kwargs)
self._r_add_overlap('_r_density_matrix_tsde', DM)
return DM
def _r_density_matrix_dm(self, *args, **kwargs):
""" Read density matrix from the DM file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.DM')
_track_file(self._r_density_matrix_dm, f)
DM = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf'])
DM = dmSileSiesta(f).read_density_matrix(*args, **kwargs)
self._r_add_overlap('_r_density_matrix_dm', DM)
return DM
def read_energy_density_matrix(self, *args, **kwargs):
""" Try and read energy density matrix by reading the <>.nc or <>.TSDE files (in that order)
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
order: list of str, optional
the order of which to try and read the density matrix
By default this is ``['nc', 'TSDE']``.
"""
order = _listify_str(kwargs.pop('order', ['nc', 'TSDE']))
for f in order:
EDM = getattr(self, '_r_energy_density_matrix_{}'.format(f.lower()))(*args, **kwargs)
if EDM is not None:
_track(self.read_energy_density_matrix, f"found file {f}")
return EDM
return None
def _r_energy_density_matrix_nc(self, *args, **kwargs):
""" Read energy density matrix by reading the <>.nc """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_energy_density_matrix_nc, f)
if f.is_file():
return ncSileSiesta(f).read_energy_density_matrix(*args, **kwargs)
return None
def _r_energy_density_matrix_tsde(self, *args, **kwargs):
""" Read energy density matrix from the TSDE file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSDE')
_track_file(self._r_energy_density_matrix_tsde, f)
EDM = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS'])
EDM = tsdeSileSiesta(f).read_energy_density_matrix(*args, **kwargs)
self._r_add_overlap('_r_energy_density_matrix_tsde', EDM)
return EDM
def read_overlap(self, *args, **kwargs):
""" Try and read the overlap matrix by reading the <>.nc, <>.TSHS files, <>.HSX, <>.onlyS (in that order)
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
order: list of str, optional
the order of which to try and read the overlap matrix
By default this is ``['nc', 'TSHS', 'HSX', 'onlyS']``.
"""
order = _listify_str(kwargs.pop('order', ['nc', 'TSHS', 'HSX', 'onlyS']))
for f in order:
v = getattr(self, '_r_overlap_{}'.format(f.lower()))(*args, **kwargs)
if v is not None:
_track(self.read_overlap, f"found file {f}")
return v
return None
def _r_overlap_nc(self, *args, **kwargs):
""" Read overlap from the nc file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_overlap_nc, f)
if f.is_file():
return ncSileSiesta(f).read_overlap(*args, **kwargs)
return None
def _r_overlap_tshs(self, *args, **kwargs):
""" Read overlap from the TSHS file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS')
_track_file(self._r_overlap_tshs, f)
S = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS'])
S = tshsSileSiesta(f).read_overlap(*args, **kwargs)
return S
def _r_overlap_hsx(self, *args, **kwargs):
""" Read overlap from the HSX file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.HSX')
_track_file(self._r_overlap_hsx, f)
S = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf'])
S = hsxSileSiesta(f).read_overlap(*args, **kwargs)
return S
def _r_overlap_onlys(self, *args, **kwargs):
""" Read overlap from the onlyS file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.onlyS')
_track_file(self._r_overlap_onlys, f)
S = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf'])
S = onlysSileSiesta(f).read_overlap(*args, **kwargs)
return S
def read_hamiltonian(self, *args, **kwargs):
""" Try and read the Hamiltonian by reading the <>.nc, <>.TSHS files, <>.HSX (in that order)
One can limit the tried files to only one file by passing
only a single file ending.
Parameters
----------
order: list of str, optional
the order of which to try and read the Hamiltonian.
By default this is ``['nc', 'TSHS', 'HSX']``.
"""
order = _listify_str(kwargs.pop('order', ['nc', 'TSHS', 'HSX']))
for f in order:
H = getattr(self, '_r_hamiltonian_{}'.format(f.lower()))(*args, **kwargs)
if H is not None:
_track(self.read_hamiltonian, f"found file {f}")
return H
return None
def _r_hamiltonian_nc(self, *args, **kwargs):
""" Read Hamiltonian from the nc file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.nc')
_track_file(self._r_hamiltonian_nc, f)
if f.is_file():
return ncSileSiesta(f).read_hamiltonian(*args, **kwargs)
return None
def _r_hamiltonian_tshs(self, *args, **kwargs):
""" Read Hamiltonian from the TSHS file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.TSHS')
_track_file(self._r_hamiltonian_tshs, f)
H = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS'])
H = tshsSileSiesta(f).read_hamiltonian(*args, **kwargs)
return H
def _r_hamiltonian_hsx(self, *args, **kwargs):
""" Read Hamiltonian from the HSX file """
f = self.dir_file(self.get('SystemLabel', default='siesta') + '.HSX')
_track_file(self._r_hamiltonian_hsx, f)
H = None
if f.is_file():
if 'geometry' not in kwargs:
# to ensure we get the correct orbital count
kwargs['geometry'] = self.read_geometry(True, order=['nc', 'TSHS', 'fdf'])
H = hsxSileSiesta(f).read_hamiltonian(*args, **kwargs)
Ef = self.read_fermi_level()
if Ef is None:
info(f"{str(self)}.read_hamiltonian from HSX file failed shifting to the Fermi-level.")
else:
H.shift(-Ef)
return H
@default_ArgumentParser(description="Manipulate a FDF file.")
def ArgumentParser(self, p=None, *args, **kwargs):
""" Returns the arguments that is available for this Sile """
import argparse
# We must by-pass this fdf-file for importing
import sisl.io.siesta as sis
# The fdf parser is more complicated
# It is based on different settings based on the
sp = p.add_subparsers(help="Determine which part of the fdf-file that should be processed.")
# Get the label which retains all the sub-modules
label = self.get('SystemLabel', default='siesta')
f_label = label + ".ext"
def label_file(suffix):
return self.dir_file(f_label).with_suffix(suffix)
# The default on all sub-parsers are the retrieval and setting
d = {
'_fdf': self,
'_fdf_first': True,
}
namespace = default_namespace(**d)
ep = sp.add_parser('edit',
help='Change or read and print data from the fdf file')
# As the fdf may provide additional stuff, we do not add EVERYTHING from
# the Geometry class.
class FDFAdd(argparse.Action):
def __call__(self, parser, ns, values, option_string=None):
key = values[0]
val = values[1]
if ns._fdf_first:
# Append to the end of the file
with ns._fdf as fd:
fd.write('\n\n# SISL added keywords\n')
setattr(ns, '_fdf_first', False)
ns._fdf.set(key, val)
ep.add_argument('--set', '-s', nargs=2, metavar=('KEY', 'VALUE'),
action=FDFAdd,
help='Add a key to the FDF file. If it already exists it will be overwritten')
class FDFGet(argparse.Action):
def __call__(self, parser, ns, value, option_string=None):
# Retrieve the value in standard units
# Currently, we write out the unit "as-is"
val = ns._fdf.get(value[0], with_unit=True)
if val is None:
print(f'# {value[0]} is currently not in the FDF file ')
return
if isinstance(val, tuple):
print(ns._fdf.print(value[0], '{} {}'.format(*val)))
else:
print(ns._fdf.print(value[0], val))
ep.add_argument('--get', '-g', nargs=1, metavar='KEY',
action=FDFGet,
help='Print (to stdout) the value of the key in the FDF file.')
# If the XV file exists, it has precedence
# of the contained geometry (we will issue
# a warning in that case)
f = label_file('.XV')
try:
geom = self.read_geometry(True)
tmp_p = sp.add_parser('geom',
help="Edit the contained geometry in the file")
tmp_p, tmp_ns = geom.ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
except:
# Allowed pass due to pythonic reading
pass
f = label_file('.bands')
if f.is_file():
tmp_p = sp.add_parser('band',
help="Manipulate bands file from the Siesta simulation")
tmp_p, tmp_ns = sis.bandsSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
f = label_file('.PDOS.xml')
if f.is_file():
tmp_p = sp.add_parser('pdos',
help="Manipulate PDOS.xml file from the Siesta simulation")
tmp_p, tmp_ns = sis.pdosSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
f = label_file('.EIG')
if f.is_file():
tmp_p = sp.add_parser('eig',
help="Manipulate EIG file from the Siesta simulation")
tmp_p, tmp_ns = sis.eigSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
#f = label + '.FA'
#if isfile(f):
# tmp_p = sp.add_parser('force',
# help="Manipulate FA file from the Siesta simulation")
# tmp_p, tmp_ns = sis.faSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
# namespace = merge_instances(namespace, tmp_ns)
f = label_file('.TBT.nc')
if f.is_file():
tmp_p = sp.add_parser('tbt',
help="Manipulate tbtrans output file")
tmp_p, tmp_ns = sis.tbtncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
f = label_file('.TBT.Proj.nc')
if f.is_file():
tmp_p = sp.add_parser('tbt-proj',
help="Manipulate tbtrans projection output file")
tmp_p, tmp_ns = sis.tbtprojncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
f = label_file('.PHT.nc')
if f.is_file():
tmp_p = sp.add_parser('pht',
help="Manipulate the phtrans output file")
tmp_p, tmp_ns = sis.phtncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
f = label_file('.PHT.Proj.nc')
if f.is_file():
tmp_p = sp.add_parser('pht-proj',
help="Manipulate phtrans projection output file")
tmp_p, tmp_ns = sis.phtprojncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
f = label_file('.nc')
if f.is_file():
tmp_p = sp.add_parser('nc',
help="Manipulate Siesta NetCDF output file")
tmp_p, tmp_ns = sis.ncSileSiesta(f).ArgumentParser(tmp_p, *args, **kwargs)
namespace = merge_instances(namespace, tmp_ns)
return p, namespace
add_sile('fdf', fdfSileSiesta, case=False, gzip=True)
|
tukutela/Kay-Framework
|
refs/heads/master
|
kay/lib/babel/plural.py
|
7
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""CLDR Plural support. See UTS #35. EXPERIMENTAL"""
import re
from babel.util import frozenset, set
__all__ = ['PluralRule', 'RuleError', 'to_gettext', 'to_javascript',
'to_python']
__docformat__ = 'restructuredtext en'
_plural_tags = ('zero', 'one', 'two', 'few', 'many', 'other')
_fallback_tag = 'other'
class PluralRule(object):
"""Represents a set of language pluralization rules. The constructor
accepts a list of (tag, expr) tuples or a dict of CLDR rules. The
resulting object is callable and accepts one parameter with a positive or
negative number (both integer and float) for the number that indicates the
plural form for a string and returns the tag for the format:
>>> rule = PluralRule({'one': 'n is 1'})
>>> rule(1)
'one'
>>> rule(2)
'other'
Currently the CLDR defines these tags: zero, one, two, few, many and
other where other is an implicit default. Rules should be mutually
exclusive; for a given numeric value, only one rule should apply (i.e.
the condition should only be true for one of the plural rule elements.
"""
__slots__ = ('abstract', '_func')
def __init__(self, rules):
"""Initialize the rule instance.
:param rules: a list of ``(tag, expr)``) tuples with the rules
conforming to UTS #35 or a dict with the tags as keys
and expressions as values.
:raise RuleError: if the expression is malformed
"""
if isinstance(rules, dict):
rules = rules.items()
found = set()
self.abstract = []
for key, expr in rules:
if key not in _plural_tags:
raise ValueError('unknown tag %r' % key)
elif key in found:
raise ValueError('tag %r defined twice' % key)
found.add(key)
self.abstract.append((key, _Parser(expr).ast))
def __repr__(self):
rules = self.rules
return '<%s %r>' % (
type(self).__name__,
', '.join(['%s: %s' % (tag, rules[tag]) for tag in _plural_tags
if tag in rules])
)
def parse(cls, rules):
"""Create a `PluralRule` instance for the given rules. If the rules
are a `PluralRule` object, that object is returned.
:param rules: the rules as list or dict, or a `PluralRule` object
:return: a corresponding `PluralRule` object
:raise Ruleerror: if the expression is malformed
"""
if isinstance(rules, cls):
return rules
return cls(rules)
parse = classmethod(parse)
def rules(self):
"""The `PluralRule` as a dict of unicode plural rules.
>>> rule = PluralRule({'one': 'n is 1'})
>>> rule.rules
{'one': 'n is 1'}
"""
_compile = _UnicodeCompiler().compile
return dict([(tag, _compile(ast)) for tag, ast in self.abstract])
rules = property(rules, doc=rules.__doc__)
tags = property(lambda x: frozenset([i[0] for i in x.abstract]), doc="""
A set of explicitly defined tags in this rule. The implicit default
``'other'`` rules is not part of this set unless there is an explicit
rule for it.""")
def __getstate__(self):
return self.abstract
def __setstate__(self, abstract):
self.abstract = abstract
def __call__(self, n):
if not hasattr(self, '_func'):
self._func = to_python(self)
return self._func(n)
def to_javascript(rule):
"""Convert a list/dict of rules or a `PluralRule` object into a JavaScript
function. This function depends on no external library:
>>> to_javascript({'one': 'n is 1'})
"(function(n) { return (n == 1) ? 'one' : 'other'; })"
Implementation detail: The function generated will probably evaluate
expressions involved into range operations multiple times. This has the
advantage that external helper functions are not required and is not a
big performance hit for these simple calculations.
:param rule: the rules as list or dict, or a `PluralRule` object
:return: a corresponding JavaScript function as `str`
:raise RuleError: if the expression is malformed
"""
to_js = _JavaScriptCompiler().compile
result = ['(function(n) { return ']
for tag, ast in PluralRule.parse(rule).abstract:
result.append('%s ? %r : ' % (to_js(ast), tag))
result.append('%r; })' % _fallback_tag)
return ''.join(result)
def to_python(rule):
"""Convert a list/dict of rules or a `PluralRule` object into a regular
Python function. This is useful in situations where you need a real
function and don't are about the actual rule object:
>>> func = to_python({'one': 'n is 1', 'few': 'n in 2..4'})
>>> func(1)
'one'
>>> func(3)
'few'
:param rule: the rules as list or dict, or a `PluralRule` object
:return: a corresponding Python function
:raise RuleError: if the expression is malformed
"""
namespace = {
'IN': in_range,
'WITHIN': within_range,
'MOD': cldr_modulo
}
to_python = _PythonCompiler().compile
result = ['def evaluate(n):']
for tag, ast in PluralRule.parse(rule).abstract:
result.append(' if (%s): return %r' % (to_python(ast), tag))
result.append(' return %r' % _fallback_tag)
exec '\n'.join(result) in namespace
return namespace['evaluate']
def to_gettext(rule):
"""The plural rule as gettext expression. The gettext expression is
technically limited to integers and returns indices rather than tags.
>>> to_gettext({'one': 'n is 1', 'two': 'n is 2'})
'nplurals=3; plural=((n == 2) ? 1 : (n == 1) ? 0 : 2)'
:param rule: the rules as list or dict, or a `PluralRule` object
:return: an equivalent gettext-style plural expression
:raise RuleError: if the expression is malformed
"""
rule = PluralRule.parse(rule)
used_tags = rule.tags | set([_fallback_tag])
_compile = _GettextCompiler().compile
_get_index = [tag for tag in _plural_tags if tag in used_tags].index
result = ['nplurals=%d; plural=(' % len(used_tags)]
for tag, ast in rule.abstract:
result.append('%s ? %d : ' % (_compile(ast), _get_index(tag)))
result.append('%d)' % _get_index(_fallback_tag))
return ''.join(result)
def in_range(num, min, max):
"""Integer range test. This is the callback for the "in" operator
of the UTS #35 pluralization rule language:
>>> in_range(1, 1, 3)
True
>>> in_range(3, 1, 3)
True
>>> in_range(1.2, 1, 4)
False
>>> in_range(10, 1, 4)
False
"""
return num == int(num) and within_range(num, min, max)
def within_range(num, min, max):
"""Float range test. This is the callback for the "within" operator
of the UTS #35 pluralization rule language:
>>> within_range(1, 1, 3)
True
>>> within_range(1.0, 1, 3)
True
>>> within_range(1.2, 1, 4)
True
>>> within_range(10, 1, 4)
False
"""
return num >= min and num <= max
def cldr_modulo(a, b):
"""Javaish modulo. This modulo operator returns the value with the sign
of the dividend rather than the divisor like Python does:
>>> cldr_modulo(-3, 5)
-3
>>> cldr_modulo(-3, -5)
-3
>>> cldr_modulo(3, 5)
3
"""
reverse = 0
if a < 0:
a *= -1
reverse = 1
if b < 0:
b *= -1
rv = a % b
if reverse:
rv *= -1
return rv
class RuleError(Exception):
"""Raised if a rule is malformed."""
class _Parser(object):
"""Internal parser. This class can translate a single rule into an abstract
tree of tuples. It implements the following grammar::
condition = and_condition ('or' and_condition)*
and_condition = relation ('and' relation)*
relation = is_relation | in_relation | within_relation | 'n' <EOL>
is_relation = expr 'is' ('not')? value
in_relation = expr ('not')? 'in' range
within_relation = expr ('not')? 'within' range
expr = 'n' ('mod' value)?
value = digit+
digit = 0|1|2|3|4|5|6|7|8|9
range = value'..'value
- Whitespace can occur between or around any of the above tokens.
- Rules should be mutually exclusive; for a given numeric value, only one
rule should apply (i.e. the condition should only be true for one of
the plural rule elements.
The translator parses the expression on instanciation into an attribute
called `ast`.
"""
_rules = [
(None, re.compile(r'\s+(?u)')),
('word', re.compile(r'\b(and|or|is|(?:with)?in|not|mod|n)\b')),
('value', re.compile(r'\d+')),
('ellipsis', re.compile(r'\.\.'))
]
def __init__(self, string):
string = string.lower()
result = []
pos = 0
end = len(string)
while pos < end:
for tok, rule in self._rules:
match = rule.match(string, pos)
if match is not None:
pos = match.end()
if tok:
result.append((tok, match.group()))
break
else:
raise RuleError('malformed CLDR pluralization rule. '
'Got unexpected %r' % string[pos])
self.tokens = result[::-1]
self.ast = self.condition()
if self.tokens:
raise RuleError('Expected end of rule, got %r' %
self.tokens[-1][1])
def test(self, type, value=None):
return self.tokens and self.tokens[-1][0] == type and \
(value is None or self.tokens[-1][1] == value)
def skip(self, type, value=None):
if self.test(type, value):
return self.tokens.pop()
def expect(self, type, value=None, term=None):
token = self.skip(type, value)
if token is not None:
return token
if term is None:
term = repr(value is None and type or value)
if not self.tokens:
raise RuleError('expected %s but end of rule reached' % term)
raise RuleError('expected %s but got %r' % (term, self.tokens[-1][1]))
def condition(self):
op = self.and_condition()
while self.skip('word', 'or'):
op = 'or', (op, self.and_condition())
return op
def and_condition(self):
op = self.relation()
while self.skip('word', 'and'):
op = 'and', (op, self.relation())
return op
def relation(self):
left = self.expr()
if self.skip('word', 'is'):
return self.skip('word', 'not') and 'isnot' or 'is', \
(left, self.value())
negated = self.skip('word', 'not')
method = 'in'
if self.skip('word', 'within'):
method = 'within'
else:
self.expect('word', 'in', term="'within' or 'in'")
rv = 'relation', (method, left, self.range())
if negated:
rv = 'not', (rv,)
return rv
def range(self):
left = self.value()
self.expect('ellipsis')
return 'range', (left, self.value())
def expr(self):
self.expect('word', 'n')
if self.skip('word', 'mod'):
return 'mod', (('n', ()), self.value())
return 'n', ()
def value(self):
return 'value', (int(self.expect('value')[1]),)
def _binary_compiler(tmpl):
"""Compiler factory for the `_Compiler`."""
return lambda self, l, r: tmpl % (self.compile(l), self.compile(r))
def _unary_compiler(tmpl):
"""Compiler factory for the `_Compiler`."""
return lambda self, x: tmpl % self.compile(x)
class _Compiler(object):
"""The compilers are able to transform the expressions into multiple
output formats.
"""
def compile(self, (op, args)):
return getattr(self, 'compile_' + op)(*args)
compile_n = lambda x: 'n'
compile_value = lambda x, v: str(v)
compile_and = _binary_compiler('(%s && %s)')
compile_or = _binary_compiler('(%s || %s)')
compile_not = _unary_compiler('(!%s)')
compile_mod = _binary_compiler('(%s %% %s)')
compile_is = _binary_compiler('(%s == %s)')
compile_isnot = _binary_compiler('(%s != %s)')
def compile_relation(self, method, expr, range):
range = '%s, %s' % tuple(map(self.compile, range[1]))
return '%s(%s, %s)' % (method.upper(), self.compile(expr), range)
class _PythonCompiler(_Compiler):
"""Compiles an expression to Python."""
compile_and = _binary_compiler('(%s and %s)')
compile_or = _binary_compiler('(%s or %s)')
compile_not = _unary_compiler('(not %s)')
compile_mod = _binary_compiler('MOD(%s, %s)')
class _GettextCompiler(_Compiler):
"""Compile into a gettext plural expression."""
def compile_relation(self, method, expr, range):
expr = self.compile(expr)
min, max = map(self.compile, range[1])
return '(%s >= %s && %s <= %s)' % (expr, min, expr, max)
class _JavaScriptCompiler(_GettextCompiler):
"""Compiles the expression to plain of JavaScript."""
def compile_relation(self, method, expr, range):
code = _GettextCompiler.compile_relation(self, method, expr, range)
if method == 'in':
expr = self.compile(expr)
code = '(parseInt(%s) == %s && %s)' % (expr, expr, code)
return code
class _UnicodeCompiler(_Compiler):
"""Returns a unicode pluralization rule again."""
compile_is = _binary_compiler('%s is %s')
compile_isnot = _binary_compiler('%s is not %s')
compile_and = _binary_compiler('%s and %s')
compile_or = _binary_compiler('%s or %s')
compile_mod = _binary_compiler('%s mod %s')
def compile_not(self, relation):
return self.compile_relation(negated=True, *relation[1])
def compile_relation(self, method, expr, range, negated=False):
return '%s%s %s %s' % (
self.compile(expr), negated and ' not' or '',
method, '%s..%s' % tuple(map(self.compile, range[1]))
)
|
arpruss/raspberryjam-pe
|
refs/heads/master
|
p2/scripts3/squarecurve.py
|
2
|
#
# Code by Alexander Pruss and under the MIT license
#
import lsystem
from mineturtle import *
t = Turtle()
t.pendelay(0)
t.turtle(None)
t.penblock(block.BRICK_BLOCK)
t.gridalign()
# http://mathforum.org/advanced/robertd/lsys2d.html
rules = { 'X': 'XF-F+F-XF+F+XF-F+F-X' }
def go():
t.startface()
for i in range(4):
t.go(4)
t.pitch(90)
t.endface()
t.go(4)
dictionary = {
'F': go,
'+': lambda: t.yaw(90),
'-': lambda: t.yaw(-90),
}
lsystem.lsystem('X', rules, dictionary, 4)
|
ocadotechnology/boto
|
refs/heads/develop
|
tests/integration/awslambda/__init__.py
|
586
|
# Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
|
romilbhardwaj/OlaDash
|
refs/heads/master
|
OlaDashServer/OlaDash/views.py
|
1
|
from django.shortcuts import render, HttpResponse
from django.conf import settings
def requestcab(request):
Lat = float(getattr(settings, "USER_LAT", None))
Lon = float(getattr(settings, "USER_LON", None))
print Lat
print Lon
#TODO: INSERT OLA API CALL
return HttpResponse("Cab requested at {0}, {1}!".format(Lat,Lon))
def authOla(request):
#TODO: INSERT OLA AUTH PROCESS
return HttpResponse("Move along, nothing to look here.")
|
BT-fgarbely/odoo
|
refs/heads/8.0
|
openerp/report/render/rml2pdf/color.py
|
443
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from reportlab.lib import colors
import re
allcols = colors.getAllNamedColors()
regex_t = re.compile('\(([0-9\.]*),([0-9\.]*),([0-9\.]*)\)')
regex_h = re.compile('#([0-9a-zA-Z][0-9a-zA-Z])([0-9a-zA-Z][0-9a-zA-Z])([0-9a-zA-Z][0-9a-zA-Z])')
def get(col_str):
if col_str is None:
col_str = ''
global allcols
if col_str in allcols.keys():
return allcols[col_str]
res = regex_t.search(col_str, 0)
if res:
return float(res.group(1)), float(res.group(2)), float(res.group(3))
res = regex_h.search(col_str, 0)
if res:
return tuple([ float(int(res.group(i),16))/255 for i in range(1,4)])
return colors.red
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
GPflow/GPflow
|
refs/heads/develop
|
doc/source/notebooks/advanced/advanced_many_points.pct.py
|
1
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# More details on models with many observation points
# --
#
# see SGPR.ipynb. I guess it would also be interesting to cover mini-batching here...
|
MaxQu/MAVProxy
|
refs/heads/master
|
setup.py
|
2
|
from setuptools import setup
version = "1.4.24"
setup(name='MAVProxy',
version=version,
zip_safe=True,
description='MAVProxy MAVLink ground station',
long_description='''A MAVLink protocol proxy and ground station. MAVProxy
is oriented towards command line operation, and is suitable for embedding in
small autonomous vehicles or for using on ground control stations. It also
features a number of graphical tools such as a slipmap for satellite mapping
view of the vehicles location, and status console and several useful vehicle
control modules. MAVProxy is extensible via a modules system - see the modules
subdirectory for some example modules. MAVProxy was developed by CanberraUAV
for use in the 2012 Outback Challenge, and includes a module for the
CanberraUAV search and rescue system. See
http://Dronecode.github.io/MAVProxy/ for more information
on how to use MAVProxy.''',
url='https://github.com/Dronecode/MAVProxy',
author='Andrew Tridgell',
author_email='andrew@tridgell.net',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering'],
license='GPLv3',
packages=['MAVProxy',
'MAVProxy.modules',
'MAVProxy.modules.mavproxy_map',
'MAVProxy.modules.mavproxy_misseditor',
'MAVProxy.modules.mavproxy_smartcamera',
'MAVProxy.modules.lib',
'MAVProxy.modules.lib.ANUGA',
'MAVProxy.modules.lib.optparse_gui'],
# note that we do not include all the real dependencies here (like matplotlib etc)
# as that breaks the pip install. It seems that pip is not smart enough to
# use the system versions of these dependencies, so it tries to download and install
# large numbers of modules like numpy etc which may be already installed
install_requires=['pymavlink>=1.1.50',
'pyserial'],
scripts=['MAVProxy/mavproxy.py', 'MAVProxy/tools/mavflightview.py',
'MAVProxy/modules/mavproxy_map/mp_slipmap.py',
'MAVProxy/modules/mavproxy_map/mp_tile.py'],
package_data={'MAVProxy':
['modules/mavproxy_map/data/*.jpg',
'modules/mavproxy_map/data/*.png']}
)
|
VanirAOSP/external_chromium_org
|
refs/heads/kk44
|
tools/telemetry/telemetry/unittest/options_for_unittests.py
|
63
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module provides the global variable options_for_unittests.
This is set to a BrowserOptions object by the test harness, or None
if unit tests are not running.
This allows multiple unit tests to use a specific
browser, in face of multiple options."""
_options = None
_browser_type = None
def Set(options, browser_type):
global _options
global _browser_type
_options = options
_browser_type = browser_type
def GetCopy():
if not _options:
return None
return _options.Copy()
def AreSet():
if _options:
return True
return False
def GetBrowserType():
return _browser_type
|
davidvon/pipa-pay-server
|
refs/heads/master
|
admin/posplat/services/query_merchant_rsp.py
|
1
|
# coding=utf-8
from posplat.ISO8583.ISO8583 import ISO8583
from posplat.services import PosResponse
from posplat.services.query_merchant_service import QueryMerchantService
class QueryMerchantResponse(PosResponse):
def __init__(self, msg):
super(QueryMerchantResponse, self).__init__(msg)
self.service = QueryMerchantService()
self.trans_datetime = ''
self.deal_time = ''
self.deal_date = ''
self.sys_trace_id = ''
self.merchant_type = ''
self.acquire_institute_code = ''
self.forward_institute_code = ''
self.ack_code = ''
self.term_id = ''
self.merchant_id = ''
self.mac = ''
def load(self):
iso = ISO8583(iso=self.msg)
self.mti = iso.getMTI()
self.precessing_code = iso.getBit(3)
self.trans_datetime = iso.getBit(7)
self.sys_trace_id = iso.getBit(11)
self.deal_time = iso.getBit(12)
self.deal_date = iso.getBit(13)
self.merchant_type = iso.getBit(18)
self.acquire_institute_code = iso.getBit(32)
self.forward_institute_code = iso.getBit(33)
self.ack_code = iso.getBit(39)
self.term_id = iso.getBit(41)
self.merchant_id = iso.getBit(42)
service_data = iso.getBit(62)
self.service.load(service_data)
self.mac = iso.getBit(128)
def query_merchant_response(msg):
response = QueryMerchantResponse(msg)
response.load()
return response
|
yetu/repotools
|
refs/heads/master
|
third_party/boto/mashups/order.py
|
101
|
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
High-level abstraction of an EC2 order for servers
"""
import boto
import boto.ec2
from boto.mashups.server import Server, ServerSet
from boto.mashups.iobject import IObject
from boto.pyami.config import Config
from boto.sdb.persist import get_domain, set_domain
import time, StringIO
InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge']
class Item(IObject):
def __init__(self):
self.region = None
self.name = None
self.instance_type = None
self.quantity = 0
self.zone = None
self.ami = None
self.groups = []
self.key = None
self.ec2 = None
self.config = None
def set_userdata(self, key, value):
self.userdata[key] = value
def get_userdata(self, key):
return self.userdata[key]
def set_region(self, region=None):
if region:
self.region = region
else:
l = [(r, r.name, r.endpoint) for r in boto.ec2.regions()]
self.region = self.choose_from_list(l, prompt='Choose Region')
def set_name(self, name=None):
if name:
self.name = name
else:
self.name = self.get_string('Name')
def set_instance_type(self, instance_type=None):
if instance_type:
self.instance_type = instance_type
else:
self.instance_type = self.choose_from_list(InstanceTypes, 'Instance Type')
def set_quantity(self, n=0):
if n > 0:
self.quantity = n
else:
self.quantity = self.get_int('Quantity')
def set_zone(self, zone=None):
if zone:
self.zone = zone
else:
l = [(z, z.name, z.state) for z in self.ec2.get_all_zones()]
self.zone = self.choose_from_list(l, prompt='Choose Availability Zone')
def set_ami(self, ami=None):
if ami:
self.ami = ami
else:
l = [(a, a.id, a.location) for a in self.ec2.get_all_images()]
self.ami = self.choose_from_list(l, prompt='Choose AMI')
def add_group(self, group=None):
if group:
self.groups.append(group)
else:
l = [(s, s.name, s.description) for s in self.ec2.get_all_security_groups()]
self.groups.append(self.choose_from_list(l, prompt='Choose Security Group'))
def set_key(self, key=None):
if key:
self.key = key
else:
l = [(k, k.name, '') for k in self.ec2.get_all_key_pairs()]
self.key = self.choose_from_list(l, prompt='Choose Keypair')
def update_config(self):
if not self.config.has_section('Credentials'):
self.config.add_section('Credentials')
self.config.set('Credentials', 'aws_access_key_id', self.ec2.aws_access_key_id)
self.config.set('Credentials', 'aws_secret_access_key', self.ec2.aws_secret_access_key)
if not self.config.has_section('Pyami'):
self.config.add_section('Pyami')
sdb_domain = get_domain()
if sdb_domain:
self.config.set('Pyami', 'server_sdb_domain', sdb_domain)
self.config.set('Pyami', 'server_sdb_name', self.name)
def set_config(self, config_path=None):
if not config_path:
config_path = self.get_filename('Specify Config file')
self.config = Config(path=config_path)
def get_userdata_string(self):
s = StringIO.StringIO()
self.config.write(s)
return s.getvalue()
def enter(self, **params):
self.region = params.get('region', self.region)
if not self.region:
self.set_region()
self.ec2 = self.region.connect()
self.name = params.get('name', self.name)
if not self.name:
self.set_name()
self.instance_type = params.get('instance_type', self.instance_type)
if not self.instance_type:
self.set_instance_type()
self.zone = params.get('zone', self.zone)
if not self.zone:
self.set_zone()
self.quantity = params.get('quantity', self.quantity)
if not self.quantity:
self.set_quantity()
self.ami = params.get('ami', self.ami)
if not self.ami:
self.set_ami()
self.groups = params.get('groups', self.groups)
if not self.groups:
self.add_group()
self.key = params.get('key', self.key)
if not self.key:
self.set_key()
self.config = params.get('config', self.config)
if not self.config:
self.set_config()
self.update_config()
class Order(IObject):
def __init__(self):
self.items = []
self.reservation = None
def add_item(self, **params):
item = Item()
item.enter(**params)
self.items.append(item)
def display(self):
print 'This Order consists of the following items'
print
print 'QTY\tNAME\tTYPE\nAMI\t\tGroups\t\t\tKeyPair'
for item in self.items:
print '%s\t%s\t%s\t%s\t%s\t%s' % (item.quantity, item.name, item.instance_type,
item.ami.id, item.groups, item.key.name)
def place(self, block=True):
if get_domain() == None:
print 'SDB Persistence Domain not set'
domain_name = self.get_string('Specify SDB Domain')
set_domain(domain_name)
s = ServerSet()
for item in self.items:
r = item.ami.run(min_count=1, max_count=item.quantity,
key_name=item.key.name, user_data=item.get_userdata_string(),
security_groups=item.groups, instance_type=item.instance_type,
placement=item.zone.name)
if block:
states = [i.state for i in r.instances]
if states.count('running') != len(states):
print states
time.sleep(15)
states = [i.update() for i in r.instances]
for i in r.instances:
server = Server()
server.name = item.name
server.instance_id = i.id
server.reservation = r
server.save()
s.append(server)
if len(s) == 1:
return s[0]
else:
return s
|
dracos/django
|
refs/heads/master
|
tests/template_tests/templatetags/inclusion.py
|
65
|
import operator
from django.template import Engine, Library
engine = Engine(app_dirs=True)
register = Library()
@register.inclusion_tag('inclusion.html')
def inclusion_no_params():
"""Expected inclusion_no_params __doc__"""
return {"result": "inclusion_no_params - Expected result"}
inclusion_no_params.anything = "Expected inclusion_no_params __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'))
def inclusion_no_params_from_template():
"""Expected inclusion_no_params_from_template __doc__"""
return {"result": "inclusion_no_params_from_template - Expected result"}
inclusion_no_params_from_template.anything = "Expected inclusion_no_params_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_one_param(arg):
"""Expected inclusion_one_param __doc__"""
return {"result": "inclusion_one_param - Expected result: %s" % arg}
inclusion_one_param.anything = "Expected inclusion_one_param __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'))
def inclusion_one_param_from_template(arg):
"""Expected inclusion_one_param_from_template __doc__"""
return {"result": "inclusion_one_param_from_template - Expected result: %s" % arg}
inclusion_one_param_from_template.anything = "Expected inclusion_one_param_from_template __dict__"
@register.inclusion_tag('inclusion.html', takes_context=False)
def inclusion_explicit_no_context(arg):
"""Expected inclusion_explicit_no_context __doc__"""
return {"result": "inclusion_explicit_no_context - Expected result: %s" % arg}
inclusion_explicit_no_context.anything = "Expected inclusion_explicit_no_context __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'), takes_context=False)
def inclusion_explicit_no_context_from_template(arg):
"""Expected inclusion_explicit_no_context_from_template __doc__"""
return {"result": "inclusion_explicit_no_context_from_template - Expected result: %s" % arg}
inclusion_explicit_no_context_from_template.anything = "Expected inclusion_explicit_no_context_from_template __dict__"
@register.inclusion_tag('inclusion.html', takes_context=True)
def inclusion_no_params_with_context(context):
"""Expected inclusion_no_params_with_context __doc__"""
return {"result": "inclusion_no_params_with_context - Expected result (context value: %s)" % context['value']}
inclusion_no_params_with_context.anything = "Expected inclusion_no_params_with_context __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'), takes_context=True)
def inclusion_no_params_with_context_from_template(context):
"""Expected inclusion_no_params_with_context_from_template __doc__"""
return {
"result": (
"inclusion_no_params_with_context_from_template - Expected result (context value: %s)" % context['value']
)
}
inclusion_no_params_with_context_from_template.anything = (
"Expected inclusion_no_params_with_context_from_template __dict__"
)
@register.inclusion_tag('inclusion.html', takes_context=True)
def inclusion_params_and_context(context, arg):
"""Expected inclusion_params_and_context __doc__"""
return {
"result": "inclusion_params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)
}
inclusion_params_and_context.anything = "Expected inclusion_params_and_context __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'), takes_context=True)
def inclusion_params_and_context_from_template(context, arg):
"""Expected inclusion_params_and_context_from_template __doc__"""
return {
"result": (
"inclusion_params_and_context_from_template - Expected result "
"(context value: %s): %s" % (context['value'], arg)
)
}
inclusion_params_and_context_from_template.anything = "Expected inclusion_params_and_context_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_two_params(one, two):
"""Expected inclusion_two_params __doc__"""
return {"result": "inclusion_two_params - Expected result: %s, %s" % (one, two)}
inclusion_two_params.anything = "Expected inclusion_two_params __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'))
def inclusion_two_params_from_template(one, two):
"""Expected inclusion_two_params_from_template __doc__"""
return {"result": "inclusion_two_params_from_template - Expected result: %s, %s" % (one, two)}
inclusion_two_params_from_template.anything = "Expected inclusion_two_params_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_one_default(one, two='hi'):
"""Expected inclusion_one_default __doc__"""
return {"result": "inclusion_one_default - Expected result: %s, %s" % (one, two)}
inclusion_one_default.anything = "Expected inclusion_one_default __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'))
def inclusion_one_default_from_template(one, two='hi'):
"""Expected inclusion_one_default_from_template __doc__"""
return {"result": "inclusion_one_default_from_template - Expected result: %s, %s" % (one, two)}
inclusion_one_default_from_template.anything = "Expected inclusion_one_default_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_unlimited_args(one, two='hi', *args):
"""Expected inclusion_unlimited_args __doc__"""
return {
"result": (
"inclusion_unlimited_args - Expected result: %s" % (
', '.join(str(arg) for arg in [one, two] + list(args))
)
)
}
inclusion_unlimited_args.anything = "Expected inclusion_unlimited_args __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'))
def inclusion_unlimited_args_from_template(one, two='hi', *args):
"""Expected inclusion_unlimited_args_from_template __doc__"""
return {
"result": (
"inclusion_unlimited_args_from_template - Expected result: %s" % (
', '.join(str(arg) for arg in [one, two] + list(args))
)
)
}
inclusion_unlimited_args_from_template.anything = "Expected inclusion_unlimited_args_from_template __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_only_unlimited_args(*args):
"""Expected inclusion_only_unlimited_args __doc__"""
return {
"result": "inclusion_only_unlimited_args - Expected result: %s" % (
', '.join(str(arg) for arg in args)
)
}
inclusion_only_unlimited_args.anything = "Expected inclusion_only_unlimited_args __dict__"
@register.inclusion_tag(engine.get_template('inclusion.html'))
def inclusion_only_unlimited_args_from_template(*args):
"""Expected inclusion_only_unlimited_args_from_template __doc__"""
return {
"result": "inclusion_only_unlimited_args_from_template - Expected result: %s" % (
', '.join(str(arg) for arg in args)
)
}
inclusion_only_unlimited_args_from_template.anything = "Expected inclusion_only_unlimited_args_from_template __dict__"
@register.inclusion_tag('test_incl_tag_use_l10n.html', takes_context=True)
def inclusion_tag_use_l10n(context):
"""Expected inclusion_tag_use_l10n __doc__"""
return {}
inclusion_tag_use_l10n.anything = "Expected inclusion_tag_use_l10n __dict__"
@register.inclusion_tag('inclusion.html')
def inclusion_unlimited_args_kwargs(one, two='hi', *args, **kwargs):
"""Expected inclusion_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(kwargs.items(), key=operator.itemgetter(0))
return {"result": "inclusion_unlimited_args_kwargs - Expected result: %s / %s" % (
', '.join(str(arg) for arg in [one, two] + list(args)),
', '.join('%s=%s' % (k, v) for (k, v) in sorted_kwarg)
)}
inclusion_unlimited_args_kwargs.anything = "Expected inclusion_unlimited_args_kwargs __dict__"
@register.inclusion_tag('inclusion.html', takes_context=True)
def inclusion_tag_without_context_parameter(arg):
"""Expected inclusion_tag_without_context_parameter __doc__"""
return {}
inclusion_tag_without_context_parameter.anything = "Expected inclusion_tag_without_context_parameter __dict__"
@register.inclusion_tag('inclusion_extends1.html')
def inclusion_extends1():
return {}
@register.inclusion_tag('inclusion_extends2.html')
def inclusion_extends2():
return {}
|
hassanshamim/ask-alexa-pykit
|
refs/heads/master
|
tests/test_response_builder.py
|
3
|
from unittest import skip
from nose.tools import assert_equal, assert_dict_equal
from .context import ask
RAW_TEMPLATE = {
"version": "1.0",
"response": {
"outputSpeech": {
"type": "PlainText",
"text": "Some default text goes here."
},
"shouldEndSession": False
}
}
class TestResponeHandler(object):
def setup(self):
self.default_speech = {"outputSpeech": {"type": "PlainText",
"text": None}
}
def test_response_builder_stores_base_response(self):
assert_equal(RAW_TEMPLATE, ask.ResponseBuilder.base_response)
def test_speech_defaults(self):
output = ask.ResponseBuilder.create_speech()
assert_dict_equal(output, self.default_speech)
def test_speech_takes_message(self):
message = 'My New Message'
output = ask.ResponseBuilder.create_speech(message=message)
assert_equal(output['outputSpeech']['text'], message)
def test_speech_can_return_ssml_message(self):
message = 'Yet another message'
output = ask.ResponseBuilder.create_speech(message=message, is_ssml=True)
assert_equal(output['outputSpeech']['type'], 'SSML')
assert_equal(output['outputSpeech']['ssml'], message)
def test_create_card_defaults(self):
card = ask.ResponseBuilder.create_card()
assert_dict_equal(card, {'type': 'Simple'})
def test_create_card_adds_kwargs_when_present(self):
expected = {'type': 'Simple', 'title': 'Welcome'}
output = ask.ResponseBuilder.create_card(title='Welcome')
assert_dict_equal(output, expected)
expected = {'type': 'Simple', 'subtitle': 'some words'}
output = ask.ResponseBuilder.create_card(subtitle='some words')
assert_dict_equal(output, expected)
expected = {'type': 'Simple', 'content': 'interesting info'}
output = ask.ResponseBuilder.create_card(content='interesting info')
assert_dict_equal(output, expected)
expected = {'type': 'Something else'}
output = ask.ResponseBuilder.create_card(card_type='Something else')
assert_dict_equal(output, expected)
@skip('Until it actually tests something useful')
def test_create_response_defaults(self):
output = ask.ResponseBuilder.create_response()
assert_dict_equal(ask.ResponseBuilder.base_response, output)
|
EducationalTestingService/rsmtool
|
refs/heads/stable
|
tests/test_utils_prmse.py
|
1
|
import os
import warnings
from pathlib import Path
import numpy as np
import pandas as pd
from nose.tools import assert_almost_equal, eq_, ok_, raises
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal
from rsmtool.utils.prmse import (get_n_human_scores,
get_true_score_evaluations,
mse_true,
prmse_true,
true_score_variance,
variance_of_errors)
# allow test directory to be set via an environment variable
# which is needed for package testing
TEST_DIR = os.environ.get('TESTDIR', None)
if TEST_DIR:
rsmtool_test_dir = TEST_DIR
else:
from rsmtool.test_utils import rsmtool_test_dir
def test_compute_n_human_scores():
df = pd.DataFrame({'h1': [1, 2, 3, 4],
'h2': [1, None, 2, None],
'h3': [None, None, 1, None]})
expected_n = pd.Series([2, 1, 3, 1])
n_scores = get_n_human_scores(df)
assert_array_equal(expected_n, n_scores)
def test_compute_n_human_scores_zeros():
df = pd.DataFrame({'h1': [1, 2, 3, None],
'h2': [1, None, 2, None],
'h3': [None, None, 1, None]})
expected_n = pd.Series([2, 1, 3, 0])
n_scores = get_n_human_scores(df)
assert_array_equal(expected_n, n_scores)
def test_compute_n_human_scores_array():
df = pd.DataFrame({'h1': [1, 2, 3, None],
'h2': [1, None, 2, None],
'h3': [None, None, 1, None]})
arr = df.to_numpy()
expected_n = pd.Series([2, 1, 3, 0])
n_scores = get_n_human_scores(arr)
assert_array_equal(expected_n, n_scores)
def test_prmse_single_human_ve():
df = pd.DataFrame({'system': [1, 2, 5],
'sc1': [2, 3, 5]})
prmse = prmse_true(df['system'], df['sc1'], 0.5)
eq_(prmse, 0.9090909090909091)
def test_prmse_single_human_ve_array_as_input():
system_scores = np.array([1, 2, 5])
human_scores = np.array([2, 3, 5])
prmse = prmse_true(system_scores, human_scores, 0.5)
eq_(prmse, 0.9090909090909091)
def test_variance_of_errors_all_single_scored():
# this test should raise a UserWarning
sc1 = [1, 2, 3, None, None]
sc2 = [None, None, None, 2, 3]
df = pd.DataFrame({'sc1': sc1,
'sc2': sc2})
with warnings.catch_warnings(record=True) as warning_list:
variance_of_errors_human = variance_of_errors(df)
ok_(variance_of_errors_human is None)
assert issubclass(warning_list[-1].category, UserWarning)
def test_prmse_all_single_scored():
# this test should raise a UserWarning
system_scores = [1, 2, 3, 4, 5]
sc1 = [1, 2, 3, None, None]
sc2 = [None, None, None, 2, 3]
df = pd.DataFrame({'sc1': sc1,
'sc2': sc2,
'system': system_scores})
with warnings.catch_warnings(record=True) as warning_list:
prmse = prmse_true(df['system'], df[['sc1', 'sc2']])
ok_(prmse is None)
assert issubclass(warning_list[-1].category, UserWarning)
@raises(ValueError)
def test_get_true_score_evaluations_single_human_no_ve():
df = pd.DataFrame({'system': [1, 2, 5],
'sc1': [2, 3, 5]})
get_true_score_evaluations(df, 'system', 'sc1')
class TestPrmseJohnsonData():
"""
This class tests the PRMSE functions against the benchmarks
provided by Matt Johnson who did the original derivation and
implemented the function in R. This test ensures that Python
implementation results in the same values
"""
def setUp(self):
full_matrix_file = Path(rsmtool_test_dir) / 'data' / 'files' / 'prmse_data.csv'
sparse_matrix_file = Path(rsmtool_test_dir) / 'data' / 'files' / 'prmse_data_sparse_matrix.csv'
self.data_full = pd.read_csv(full_matrix_file)
self.data_sparse = pd.read_csv(sparse_matrix_file)
self.human_score_columns = ['h1', 'h2', 'h3', 'h4']
self.system_score_columns = ['system']
def test_variance_of_errors_full_matrix(self):
human_scores = self.human_score_columns
df_humans = self.data_full[human_scores]
variance_errors_human = variance_of_errors(df_humans)
expected_v_e = 0.509375
eq_(variance_errors_human, expected_v_e)
def test_variance_of_errors_sparse_matrix(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores]
variance_errors_human = variance_of_errors(df_humans)
expected_v_e = 0.5150882
assert_almost_equal(variance_errors_human, expected_v_e, 7)
def test_variance_of_true_scores_full_matrix(self):
human_scores = self.human_score_columns
df_humans = self.data_full[human_scores]
variance_errors_human = 0.509375
expected_var_true = 0.7765515
var_true = true_score_variance(df_humans,
variance_errors_human)
assert_almost_equal(var_true, expected_var_true, 7)
def test_variance_of_true_scores_sparse_matrix(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores]
variance_errors_human = 0.5150882
expected_var_true = 0.769816
var_true = true_score_variance(df_humans,
variance_errors_human)
assert_almost_equal(var_true, expected_var_true, 7)
def test_variance_of_true_scores_sparse_matrix_computed_ve(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores]
expected_var_true = 0.769816
var_true = true_score_variance(df_humans)
assert_almost_equal(var_true, expected_var_true, 7)
def test_mse_full_matrix(self):
human_scores = self.human_score_columns
df_humans = self.data_full[human_scores]
system = self.data_full['system']
variance_errors_human = 0.509375
expected_mse_true = 0.3564625
mse = mse_true(system,
df_humans,
variance_errors_human)
assert_almost_equal(mse, expected_mse_true, 7)
def test_mse_sparse_matrix(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores]
system = self.data_sparse['system']
variance_errors_human = 0.5150882
expected_mse_true = 0.3550792
mse = mse_true(system,
df_humans,
variance_errors_human)
assert_almost_equal(mse, expected_mse_true, 7)
def test_mse_sparse_matrix_computed_ve(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores]
system = self.data_sparse['system']
expected_mse_true = 0.3550792
mse = mse_true(system,
df_humans)
assert_almost_equal(mse, expected_mse_true, 7)
def test_prmse_full_matrix_given_ve(self):
human_scores = self.human_score_columns
df_humans = self.data_full[human_scores]
system = self.data_full['system']
variance_errors_human = 0.509375
expected_prmse_true = 0.5409673
prmse = prmse_true(system,
df_humans,
variance_errors_human)
assert_almost_equal(prmse, expected_prmse_true, 7)
def test_prmse_sparse_matrix_given_ve(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores]
system = self.data_sparse['system']
variance_errors_human = 0.5150882
expected_prmse_true = 0.538748
prmse = prmse_true(system,
df_humans,
variance_errors_human)
assert_almost_equal(prmse, expected_prmse_true, 7)
def test_prmse_full_matrix_computed_ve(self):
human_scores = self.human_score_columns
df_humans = self.data_full[human_scores]
system = self.data_full['system']
expected_prmse_true = 0.5409673
prmse = prmse_true(system,
df_humans)
assert_almost_equal(prmse, expected_prmse_true, 7)
def test_prmse_sparse_matrix_computed_ve(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores]
system = self.data_sparse['system']
expected_prmse_true = 0.538748
prmse = prmse_true(system,
df_humans)
assert_almost_equal(prmse, expected_prmse_true, 7)
def test_prmse_sparse_matrix_array_as_input(self):
human_scores = self.human_score_columns
df_humans = self.data_sparse[human_scores].to_numpy()
system = np.array(self.data_sparse['system'])
expected_prmse_true = 0.538748
prmse = prmse_true(system,
df_humans)
assert_almost_equal(prmse, expected_prmse_true, 7)
def test_compute_true_score_evaluations_full(self):
expected_df = pd.DataFrame({'N': 10000,
"N raters": 4,
"N single": 0,
"N multiple": 10000,
"Variance of errors": 0.509375,
"True score var": 0.7765515,
'MSE true': 0.3564625,
'PRMSE true': 0.5409673},
index=['system'])
df_prmse = get_true_score_evaluations(self.data_full,
self.system_score_columns,
self.human_score_columns)
assert_frame_equal(df_prmse, expected_df, check_dtype=False)
def test_compute_true_score_evaluations_sparse(self):
expected_df = pd.DataFrame({'N': 10000,
"N raters": 4,
"N single": 3421,
"N multiple": 6579,
"Variance of errors": 0.5150882,
"True score var": 0.769816,
'MSE true': 0.3550792,
'PRMSE true': 0.538748},
index=['system'])
df_prmse = get_true_score_evaluations(self.data_sparse,
self.system_score_columns,
self.human_score_columns)
assert_frame_equal(df_prmse, expected_df, check_dtype=False)
def test_compute_true_score_evaluations_given_ve(self):
expected_df = pd.DataFrame({'N': 10000,
"N raters": 4,
"N single": 3421,
"N multiple": 6579,
"Variance of errors": 0.5150882,
"True score var": 0.769816,
'MSE true': 0.3550792,
'PRMSE true': 0.538748},
index=['system'])
df_prmse = get_true_score_evaluations(self.data_sparse,
self.system_score_columns,
self.human_score_columns,
variance_errors_human=0.5150882)
assert_frame_equal(df_prmse, expected_df, check_dtype=False)
|
a-olszewski/django-s3direct
|
refs/heads/master
|
s3direct/tests.py
|
5
|
import json
from django.test.utils import override_settings
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse, resolve
from django.test import TestCase
from s3direct import widgets
HTML_OUTPUT = (
'<div class="s3direct" data-policy-url="/get_upload_params/">'
' <a class="file-link" target="_blank" href=""></a>'
' <a class="file-remove" href="#remove">Remove</a>'
' <input class="file-url" type="hidden" value="" id="None" name="filename" />'
' <input class="file-dest" type="hidden" value="foo">'
' <input class="file-input" type="file" />'
' <div class="progress progress-striped active">'
' <div class="bar"></div>'
' </div>'
'</div>'
)
FOO_RESPONSE = {
u'AWSAccessKeyId': u'',
u'form_action': u'https://s3.amazonaws.com/test-bucket',
u'success_action_status': u'201',
u'acl': u'public-read',
u'key': u'uploads/imgs/${filename}',
u'Content-Type': u'image/jpeg'
}
class WidgetTest(TestCase):
def setUp(self):
admin = User.objects.create_superuser('admin', 'u@email.com', 'admin')
admin.save()
def test_urls(self):
reversed_url = reverse('s3direct')
resolved_url = resolve('/get_upload_params/')
self.assertEqual(reversed_url, '/get_upload_params/')
self.assertEqual(resolved_url.view_name, 's3direct')
def test_widget_html(self):
widget = widgets.S3DirectWidget(dest='foo')
self.assertEqual(widget.render('filename', None), HTML_OUTPUT)
def test_signing_logged_in(self):
self.client.login(username='admin', password='admin')
data = {'dest': 'files', 'name': 'image.jpg', 'type': 'image/jpeg'}
response = self.client.post(reverse('s3direct'), data)
self.assertEqual(response.status_code, 200)
def test_signing_logged_out(self):
data = {'dest': 'files', 'name': 'image.jpg', 'type': 'image/jpeg'}
response = self.client.post(reverse('s3direct'), data)
self.assertEqual(response.status_code, 403)
def test_allowed_type(self):
data = {'dest': 'imgs', 'name': 'image.jpg', 'type': 'image/jpeg'}
response = self.client.post(reverse('s3direct'), data)
self.assertEqual(response.status_code, 200)
def test_disallowed_type(self):
data = {'dest': 'imgs', 'name': 'image.mp4', 'type': 'video/mp4'}
response = self.client.post(reverse('s3direct'), data)
self.assertEqual(response.status_code, 400)
def test_allowed_type_logged_in(self):
self.client.login(username='admin', password='admin')
data = {'dest': 'vids', 'name': 'video.mp4', 'type': 'video/mp4'}
response = self.client.post(reverse('s3direct'), data)
self.assertEqual(response.status_code, 200)
def test_disallowed_type_logged_out(self):
data = {'dest': 'vids', 'name': 'video.mp4', 'type': 'video/mp4'}
response = self.client.post(reverse('s3direct'), data)
self.assertEqual(response.status_code, 403)
def test_signing_fields(self):
self.client.login(username='admin', password='admin')
data = {'dest': 'imgs', 'name': 'image.jpg', 'type': 'image/jpeg'}
response = self.client.post(reverse('s3direct'), data)
response_dict = json.loads(response.content.decode())
self.assertTrue(u'signature' in response_dict)
self.assertTrue(u'policy' in response_dict)
self.assertDictContainsSubset(FOO_RESPONSE, response_dict)
def test_signing_fields_unique_filename(self):
data = {'dest': 'misc', 'name': 'image.jpg', 'type': 'image/jpeg'}
response = self.client.post(reverse('s3direct'), data)
response_dict = json.loads(response.content.decode())
self.assertTrue(u'signature' in response_dict)
self.assertTrue(u'policy' in response_dict)
FOO_RESPONSE['key'] = 'images/unique.jpg'
self.assertDictContainsSubset(FOO_RESPONSE, response_dict)
|
udxxabp/zulip
|
refs/heads/master
|
zerver/management/commands/realm_emoji.py
|
114
|
from __future__ import absolute_import
from django.core.management.base import BaseCommand
from zerver.models import Realm
from zerver.lib.actions import do_add_realm_emoji, do_remove_realm_emoji
import sys
class Command(BaseCommand):
help = """Manage emoji for the specified realm
Example: python manage.py realm_emoji --realm=zulip.com --op=add robotheart https://humbug-user-avatars.s3.amazonaws.com/95ffa70fe0e7aea3c052ba91b38a28d8779f5705
Example: python manage.py realm_emoji --realm=zulip.com --op=remove robotheart
Example: python manage.py realm_emoji --realm=zulip.com --op=show
"""
def add_arguments(self, parser):
parser.add_argument('-r', '--realm',
dest='domain',
type=str,
required=True,
help='The name of the realm.')
parser.add_argument('--op',
dest='op',
type=str,
default="show",
help='What operation to do (add, show, remove).')
parser.add_argument('name', metavar='<name>', type=str, nargs='?', default=None,
help="name of the emoji")
parser.add_argument('img_url', metavar='<image url>', type=str, nargs='?',
help="URL of image to display for the emoji")
def handle(self, *args, **options):
realm = Realm.objects.get(domain=options["domain"])
if options["op"] == "show":
for name, url in realm.get_emoji().iteritems():
print name, url
sys.exit(0)
name = options['name']
if name is None:
self.print_help("python manage.py", "realm_emoji")
sys.exit(1)
if options["op"] == "add":
img_url = options['img_url']
if img_url is None:
self.print_help("python manage.py", "realm_emoji")
sys.exit(1)
do_add_realm_emoji(realm, name, img_url)
sys.exit(0)
elif options["op"] == "remove":
do_remove_realm_emoji(realm, name)
sys.exit(0)
else:
self.print_help("python manage.py", "realm_emoji")
sys.exit(1)
|
texastribune/tx_salaries
|
refs/heads/master
|
tx_salaries/utils/transformers/ut_health_northeast.py
|
1
|
from . import base
from . import mixins
from datetime import date
class TransformedRecord(
mixins.GenericCompensationMixin,
mixins.GenericDepartmentMixin, mixins.GenericIdentifierMixin,
mixins.GenericJobTitleMixin, mixins.GenericPersonMixin,
mixins.MembershipMixin, mixins.OrganizationMixin, mixins.PostMixin,
mixins.RaceMixin, mixins.LinkMixin, base.BaseTransformedRecord):
MAP = {
'full_name': 'Name',
'department': 'Department',
'job_title': 'Job Title',
'hire_date': 'Hire Dt',
'compensation': 'Annual Rt',
'gender': 'Sex',
'nationality': 'Ethnic Grp',
'employee_type': 'Full/Part',
}
#This organization used to be named 'University of Texas Health Science Center at Tyler'
#CHECK THE OLD ONE BEFORE PUBLISHING!!!
ORGANIZATION_NAME = 'UT Health Northeast'
# What type of organization is this? This MUST match what we use on the site, double check against salaries.texastribune.org
ORGANIZATION_CLASSIFICATION = 'University Hospital'
DATE_PROVIDED = date(2017, 5, 12)
# The URL to find the raw data in our S3 bucket.
URL = ('http://raw.texastribune.org.s3.amazonaws.com/'
'ut_health_northeast/2017-05/uthealth-tyler.xls')
race_map = {
'AMIND': 'American Indian/Alaska Native',
'BLACK': 'Black/African American',
'WHITE': 'White',
'ASIAN': 'Asian',
'UNK': 'Ethnicity Unknown',
'HISPA': 'Hispanic/Latino',
'PACIF': 'Native Hawaiian/Other Pacific Island'
}
# This is how the loader checks for valid people. Defaults to checking to see if `last_name` is empty.
@property
def is_valid(self):
# Adjust to return False on invalid fields. For example:
if self.compensation:
return self.full_name.strip() != ''
@property
def compensation_type(self):
if self.employee_type == 'P':
return 'PT'
else:
return 'FT'
@property
def description(self):
if self.employee_type == 'P':
return "Part-time annual rate"
else:
return "Annual rate"
@property
def race(self):
return {
'name': self.race_map[self.nationality.strip()]
}
@property
def person(self):
name = self.get_name()
r = {
'family_name': name.last,
'given_name': name.first,
'additional_name': name.middle,
'name': unicode(name),
'gender': self.gender.strip()
}
return r
def get_raw_name(self):
split_name = self.full_name.split(',')
last_name = split_name[0]
split_firstname = split_name[1].split(' ')
first_name = split_firstname[0]
if len(split_firstname) == 2 and len(split_firstname[1]) == 1:
middle_name = split_firstname[1]
else:
first_name = split_name[1]
middle_name = ''
return u' '.join([first_name, middle_name, last_name])
transform = base.transform_factory(TransformedRecord)
|
kleientertainment/ds_mod_tools
|
refs/heads/master
|
pkg/win32/Python27/Lib/encodings/cp037.py
|
93
|
""" Python Character Mapping Codec cp037 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP037.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp037',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xa2' # 0x4A -> CENT SIGN
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'|' # 0x4F -> VERTICAL LINE
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'!' # 0x5A -> EXCLAMATION MARK
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'\xac' # 0x5F -> NOT SIGN
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'^' # 0xB0 -> CIRCUMFLEX ACCENT
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'[' # 0xBA -> LEFT SQUARE BRACKET
u']' # 0xBB -> RIGHT SQUARE BRACKET
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.