repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
tntraina/aff4
|
refs/heads/master
|
python2.6/tests/encrypted.py
|
3
|
import pyaff4
import os, time, sys
time.sleep(1)
oracle = pyaff4.Resolver()
CERT_LOCATION = os.getcwd() + "/tests/sign.key"
SOURCE = "/bin/ls"
## For this test make sure there are certs:
try:
data = open(CERT_LOCATION).read()
except IOError:
print "Creating certs on %s" % CERT_LOCATION
os.system("openssl req -x509 -newkey rsa:1024 -keyout %s -out %s -nodes" %(
CERT_LOCATION, CERT_LOCATION))
class SecurityProvider:
""" This is a demonstration security provider object which will be
called by the AFF4 library to get keying material for different
streams.
"""
def passphrase(self, cipher, subject):
print "Setting passphrase for subject %s" % subject.value
return "Hello"
def x509_private_key(self, cert_name, subject):
""" Returns the private key (in pem format) for the certificate name provided. """
print "Certificate for %s" % cert_name
return open(CERT_LOCATION).read()
## This registers the security provider
oracle.register_security_provider(pyaff4.ProxiedSecurityProvider(SecurityProvider()))
url = pyaff4.RDFURN()
url.set("/tmp/test.zip")
try:
url.set(sys.argv[1])
fd = oracle.open(url, 'r')
while 1:
data = fd.read(1024*1024)
if not data: break
sys.stdout.write(data)
sys.exit(0)
except IndexError:
pass
try:
os.unlink(url.parser.query)
except: pass
## Make the volume
volume = oracle.create(pyaff4.AFF4_ZIP_VOLUME)
volume.set(pyaff4.AFF4_STORED, url)
volume = volume.finish()
volume_urn = volume.urn
volume.cache_return()
## Make the image
image = oracle.create(pyaff4.AFF4_IMAGE)
image.set(pyaff4.AFF4_STORED, volume_urn)
image = image.finish()
image_urn = image.urn
image.cache_return()
# Make the encrypted stream
encrypted = oracle.create(pyaff4.AFF4_ENCRYTED)
encrypted.urn.set(volume_urn.value)
encrypted.urn.add(SOURCE)
encrypted.set(pyaff4.AFF4_STORED, volume_urn)
encrypted.set(pyaff4.AFF4_TARGET, image_urn)
## Set the certificate:
cipher = oracle.new_rdfvalue(pyaff4.AFF4_AES256_X509)
cert_urn = pyaff4.RDFURN()
cert_urn.set(CERT_LOCATION)
cipher.set_authority(cert_urn)
encrypted.add(pyaff4.AFF4_CIPHER, cipher)
cipher = oracle.new_rdfvalue(pyaff4.AFF4_AES256_PASSWORD)
encrypted.add(pyaff4.AFF4_CIPHER, cipher)
encrypted = encrypted.finish()
encrypted_urn = encrypted.urn
print "Encrypted URN: %s" % encrypted.urn.value
infd = open(SOURCE)
while 1:
data = infd.read(2**24)
if not data: break
encrypted.write(data)
encrypted.close()
image = oracle.open(image_urn, "w")
image.close()
volume = oracle.open(volume_urn, 'w')
volume.close()
## Check the data
fd = oracle.open(encrypted_urn, 'r')
print fd.read(10)
fd.cache_return()
|
wolfier/incubator-airflow
|
refs/heads/master
|
tests/contrib/operators/test_dataproc_operator.py
|
2
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import re
import unittest
from airflow import DAG
from airflow.contrib.operators.dataproc_operator import \
DataprocClusterCreateOperator, \
DataprocClusterDeleteOperator, \
DataProcHadoopOperator, \
DataProcHiveOperator, \
DataProcPySparkOperator, \
DataProcSparkOperator, \
DataprocWorkflowTemplateInstantiateInlineOperator, \
DataprocWorkflowTemplateInstantiateOperator, \
DataprocClusterScaleOperator
from airflow.version import version
from copy import deepcopy
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
from mock import Mock
from mock import patch
TASK_ID = 'test-dataproc-operator'
CLUSTER_NAME = 'test-cluster-name'
PROJECT_ID = 'test-project-id'
NUM_WORKERS = 123
ZONE = 'us-central1-a'
NETWORK_URI = '/projects/project_id/regions/global/net'
SUBNETWORK_URI = '/projects/project_id/regions/global/subnet'
TAGS = ['tag1', 'tag2']
STORAGE_BUCKET = 'gs://airflow-test-bucket/'
IMAGE_VERSION = '1.1'
MASTER_MACHINE_TYPE = 'n1-standard-2'
MASTER_DISK_SIZE = 100
WORKER_MACHINE_TYPE = 'n1-standard-2'
WORKER_DISK_SIZE = 100
NUM_PREEMPTIBLE_WORKERS = 2
GET_INIT_ACTION_TIMEOUT = "600s" # 10m
LABEL1 = {}
LABEL2 = {'application': 'test', 'year': 2017}
SERVICE_ACCOUNT_SCOPES = [
'https://www.googleapis.com/auth/bigquery',
'https://www.googleapis.com/auth/bigtable.data'
]
IDLE_DELETE_TTL = 321
AUTO_DELETE_TIME = datetime.datetime(2017, 6, 7)
AUTO_DELETE_TTL = 654
DEFAULT_DATE = datetime.datetime(2017, 6, 6)
REGION = 'test-region'
MAIN_URI = 'test-uri'
TEMPLATE_ID = 'template-id'
HOOK = 'airflow.contrib.operators.dataproc_operator.DataProcHook'
class DataprocClusterCreateOperatorTest(unittest.TestCase):
# Unit test for the DataprocClusterCreateOperator
def setUp(self):
# instantiate two different test cases with different labels.
self.labels = [LABEL1, LABEL2]
self.dataproc_operators = []
self.mock_conn = Mock()
for labels in self.labels:
self.dataproc_operators.append(
DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
network_uri=NETWORK_URI,
subnetwork_uri=SUBNETWORK_URI,
tags=TAGS,
storage_bucket=STORAGE_BUCKET,
image_version=IMAGE_VERSION,
master_machine_type=MASTER_MACHINE_TYPE,
master_disk_size=MASTER_DISK_SIZE,
worker_machine_type=WORKER_MACHINE_TYPE,
worker_disk_size=WORKER_DISK_SIZE,
num_preemptible_workers=NUM_PREEMPTIBLE_WORKERS,
labels=deepcopy(labels),
service_account_scopes=SERVICE_ACCOUNT_SCOPES,
idle_delete_ttl=IDLE_DELETE_TTL,
auto_delete_time=AUTO_DELETE_TIME,
auto_delete_ttl=AUTO_DELETE_TTL
)
)
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_init(self):
"""Test DataProcClusterOperator instance is properly initialized."""
for suffix, dataproc_operator in enumerate(self.dataproc_operators):
self.assertEqual(dataproc_operator.cluster_name, CLUSTER_NAME)
self.assertEqual(dataproc_operator.project_id, PROJECT_ID)
self.assertEqual(dataproc_operator.num_workers, NUM_WORKERS)
self.assertEqual(dataproc_operator.zone, ZONE)
self.assertEqual(dataproc_operator.network_uri, NETWORK_URI)
self.assertEqual(dataproc_operator.subnetwork_uri, SUBNETWORK_URI)
self.assertEqual(dataproc_operator.tags, TAGS)
self.assertEqual(dataproc_operator.storage_bucket, STORAGE_BUCKET)
self.assertEqual(dataproc_operator.image_version, IMAGE_VERSION)
self.assertEqual(dataproc_operator.master_machine_type, MASTER_MACHINE_TYPE)
self.assertEqual(dataproc_operator.master_disk_size, MASTER_DISK_SIZE)
self.assertEqual(dataproc_operator.worker_machine_type, WORKER_MACHINE_TYPE)
self.assertEqual(dataproc_operator.worker_disk_size, WORKER_DISK_SIZE)
self.assertEqual(dataproc_operator.num_preemptible_workers,
NUM_PREEMPTIBLE_WORKERS)
self.assertEqual(dataproc_operator.labels, self.labels[suffix])
self.assertEqual(dataproc_operator.service_account_scopes,
SERVICE_ACCOUNT_SCOPES)
self.assertEqual(dataproc_operator.idle_delete_ttl, IDLE_DELETE_TTL)
self.assertEqual(dataproc_operator.auto_delete_time, AUTO_DELETE_TIME)
self.assertEqual(dataproc_operator.auto_delete_ttl, AUTO_DELETE_TTL)
def test_get_init_action_timeout(self):
for suffix, dataproc_operator in enumerate(self.dataproc_operators):
timeout = dataproc_operator._get_init_action_timeout()
self.assertEqual(timeout, "600s")
def test_build_cluster_data(self):
for suffix, dataproc_operator in enumerate(self.dataproc_operators):
cluster_data = dataproc_operator._build_cluster_data()
self.assertEqual(cluster_data['clusterName'], CLUSTER_NAME)
self.assertEqual(cluster_data['projectId'], PROJECT_ID)
self.assertEqual(cluster_data['config']['softwareConfig'], {'imageVersion': IMAGE_VERSION})
self.assertEqual(cluster_data['config']['configBucket'], STORAGE_BUCKET)
self.assertEqual(cluster_data['config']['workerConfig']['numInstances'], NUM_WORKERS)
self.assertEqual(cluster_data['config']['secondaryWorkerConfig']['numInstances'],
NUM_PREEMPTIBLE_WORKERS)
self.assertEqual(cluster_data['config']['gceClusterConfig']['serviceAccountScopes'],
SERVICE_ACCOUNT_SCOPES)
self.assertEqual(cluster_data['config']['gceClusterConfig']['subnetworkUri'],
SUBNETWORK_URI)
self.assertEqual(cluster_data['config']['gceClusterConfig']['networkUri'],
NETWORK_URI)
self.assertEqual(cluster_data['config']['gceClusterConfig']['tags'],
TAGS)
self.assertEqual(cluster_data['config']['lifecycleConfig']['idleDeleteTtl'],
"321s")
self.assertEqual(cluster_data['config']['lifecycleConfig']['autoDeleteTime'],
"2017-06-07T00:00:00.000000Z")
# test whether the default airflow-version label has been properly
# set to the dataproc operator.
merged_labels = {}
merged_labels.update(self.labels[suffix])
merged_labels.update({'airflow-version': 'v' + version.replace('.', '-').replace('+','-')})
self.assertTrue(re.match(r'[a-z]([-a-z0-9]*[a-z0-9])?',
cluster_data['labels']['airflow-version']))
self.assertEqual(cluster_data['labels'], merged_labels)
def test_build_cluster_data_with_autoDeleteTime(self):
dataproc_operator = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
dag=self.dag,
auto_delete_time=AUTO_DELETE_TIME,
)
cluster_data = dataproc_operator._build_cluster_data()
self.assertEqual(cluster_data['config']['lifecycleConfig']['autoDeleteTime'],
"2017-06-07T00:00:00.000000Z")
def test_build_cluster_data_with_autoDeleteTtl(self):
dataproc_operator = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
dag=self.dag,
auto_delete_ttl=AUTO_DELETE_TTL,
)
cluster_data = dataproc_operator._build_cluster_data()
self.assertEqual(cluster_data['config']['lifecycleConfig']['autoDeleteTtl'],
"654s")
def test_build_cluster_data_with_autoDeleteTime_and_autoDeleteTtl(self):
dataproc_operator = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
dag=self.dag,
auto_delete_time=AUTO_DELETE_TIME,
auto_delete_ttl=AUTO_DELETE_TTL,
)
cluster_data = dataproc_operator._build_cluster_data()
if 'autoDeleteTtl' in cluster_data['config']['lifecycleConfig']:
self.fail("If 'auto_delete_time' and 'auto_delete_ttl' is set, " +
"only `auto_delete_time` is used")
self.assertEqual(cluster_data['config']['lifecycleConfig']['autoDeleteTime'],
"2017-06-07T00:00:00.000000Z")
def test_cluster_name_log_no_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') \
as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
with self.assertRaises(TypeError) as _:
dataproc_task.execute(None)
mock_info.assert_called_with('Creating cluster: %s', CLUSTER_NAME)
def test_cluster_name_log_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') \
as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name='smoke-cluster-{{ ts_nodash }}',
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
context = {'ts_nodash': 'testnodash'}
rendered = dataproc_task.render_template(
'cluster_name',
getattr(dataproc_task, 'cluster_name'), context)
setattr(dataproc_task, 'cluster_name', rendered)
with self.assertRaises(TypeError):
dataproc_task.execute(None)
mock_info.assert_called_with('Creating cluster: %s',
u'smoke-cluster-testnodash')
class DataprocClusterScaleOperatorTest(unittest.TestCase):
# Unit test for the DataprocClusterScaleOperator
def setUp(self):
self.mock_execute = Mock()
self.mock_execute.execute = Mock(return_value={'done': True})
self.mock_get = Mock()
self.mock_get.get = Mock(return_value=self.mock_execute)
self.mock_operations = Mock()
self.mock_operations.get = Mock(return_value=self.mock_get)
self.mock_regions = Mock()
self.mock_regions.operations = Mock(return_value=self.mock_operations)
self.mock_projects = Mock()
self.mock_projects.regions = Mock(return_value=self.mock_regions)
self.mock_conn = Mock()
self.mock_conn.projects = Mock(return_value=self.mock_projects)
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_cluster_name_log_no_sub(self):
with patch('airflow.contrib.hooks.gcp_dataproc_hook.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterScaleOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
num_preemptible_workers=NUM_PREEMPTIBLE_WORKERS,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
with self.assertRaises(TypeError):
dataproc_task.execute(None)
mock_info.assert_called_with('Scaling cluster: %s', CLUSTER_NAME)
def test_cluster_name_log_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') \
as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterScaleOperator(
task_id=TASK_ID,
cluster_name='smoke-cluster-{{ ts_nodash }}',
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
num_preemptible_workers=NUM_PREEMPTIBLE_WORKERS,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
context = {'ts_nodash': 'testnodash'}
rendered = dataproc_task.render_template(
'cluster_name',
getattr(dataproc_task, 'cluster_name'), context)
setattr(dataproc_task, 'cluster_name', rendered)
with self.assertRaises(TypeError):
dataproc_task.execute(None)
mock_info.assert_called_with('Scaling cluster: %s',
u'smoke-cluster-testnodash')
class DataprocClusterDeleteOperatorTest(unittest.TestCase):
# Unit test for the DataprocClusterDeleteOperator
def setUp(self):
self.mock_execute = Mock()
self.mock_execute.execute = Mock(return_value={'done' : True})
self.mock_get = Mock()
self.mock_get.get = Mock(return_value=self.mock_execute)
self.mock_operations = Mock()
self.mock_operations.get = Mock(return_value=self.mock_get)
self.mock_regions = Mock()
self.mock_regions.operations = Mock(return_value=self.mock_operations)
self.mock_projects=Mock()
self.mock_projects.regions = Mock(return_value=self.mock_regions)
self.mock_conn = Mock()
self.mock_conn.projects = Mock(return_value=self.mock_projects)
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_cluster_name_log_no_sub(self):
with patch('airflow.contrib.hooks.gcp_dataproc_hook.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterDeleteOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
with self.assertRaises(TypeError) as _:
dataproc_task.execute(None)
mock_info.assert_called_with('Deleting cluster: %s', CLUSTER_NAME)
def test_cluster_name_log_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterDeleteOperator(
task_id=TASK_ID,
cluster_name='smoke-cluster-{{ ts_nodash }}',
project_id=PROJECT_ID,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
context = {'ts_nodash': 'testnodash'}
rendered = dataproc_task.render_template(
'cluster_name',
getattr(dataproc_task, 'cluster_name'), context)
setattr(dataproc_task, 'cluster_name', rendered)
with self.assertRaises(TypeError):
dataproc_task.execute(None)
mock_info.assert_called_with('Deleting cluster: %s',
u'smoke-cluster-testnodash')
class DataProcHadoopOperatorTest(unittest.TestCase):
# Unit test for the DataProcHadoopOperator
def test_hook_correct_region(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
dataproc_task = DataProcHadoopOperator(
task_id=TASK_ID,
region=REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY, REGION)
class DataProcHiveOperatorTest(unittest.TestCase):
# Unit test for the DataProcHiveOperator
def test_hook_correct_region(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
dataproc_task = DataProcHiveOperator(
task_id=TASK_ID,
region=REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY, REGION)
class DataProcPySparkOperatorTest(unittest.TestCase):
# Unit test for the DataProcPySparkOperator
def test_hook_correct_region(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
dataproc_task = DataProcPySparkOperator(
task_id=TASK_ID,
main=MAIN_URI,
region=REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY, REGION)
class DataProcSparkOperatorTest(unittest.TestCase):
# Unit test for the DataProcSparkOperator
def test_hook_correct_region(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
dataproc_task = DataProcSparkOperator(
task_id=TASK_ID,
region=REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY, REGION)
class DataprocWorkflowTemplateInstantiateOperatorTest(unittest.TestCase):
def setUp(self):
# Setup service.projects().regions().workflowTemplates().instantiate().execute()
self.operation = {'name': 'operation', 'done': True}
self.mock_execute = Mock()
self.mock_execute.execute.return_value = self.operation
self.mock_workflows = Mock()
self.mock_workflows.instantiate.return_value = self.mock_execute
self.mock_regions = Mock()
self.mock_regions.workflowTemplates.return_value = self.mock_workflows
self.mock_projects = Mock()
self.mock_projects.regions.return_value = self.mock_regions
self.mock_conn = Mock()
self.mock_conn.projects.return_value = self.mock_projects
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_workflow(self):
with patch(HOOK) as MockHook:
hook = MockHook()
hook.get_conn.return_value = self.mock_conn
hook.await.return_value = None
dataproc_task = DataprocWorkflowTemplateInstantiateOperator(
task_id=TASK_ID,
project_id=PROJECT_ID,
region=REGION,
template_id=TEMPLATE_ID,
dag=self.dag
)
dataproc_task.execute(None)
template_name = (
'projects/test-project-id/regions/test-region/'
'workflowTemplates/template-id')
self.mock_workflows.instantiate.assert_called_once_with(
name=template_name,
body=mock.ANY)
hook.await.assert_called_once_with(self.operation)
class DataprocWorkflowTemplateInstantiateInlineOperatorTest(unittest.TestCase):
def setUp(self):
# Setup service.projects().regions().workflowTemplates().instantiateInline()
# .execute()
self.operation = {'name': 'operation', 'done': True}
self.mock_execute = Mock()
self.mock_execute.execute.return_value = self.operation
self.mock_workflows = Mock()
self.mock_workflows.instantiateInline.return_value = self.mock_execute
self.mock_regions = Mock()
self.mock_regions.workflowTemplates.return_value = self.mock_workflows
self.mock_projects = Mock()
self.mock_projects.regions.return_value = self.mock_regions
self.mock_conn = Mock()
self.mock_conn.projects.return_value = self.mock_projects
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_iniline_workflow(self):
with patch(HOOK) as MockHook:
hook = MockHook()
hook.get_conn.return_value = self.mock_conn
hook.await.return_value = None
template = {
"placement": {
"managed_cluster": {
"cluster_name": CLUSTER_NAME,
"config": {
"gce_cluster_config": {
"zone_uri": ZONE,
}
}
}
},
"jobs": [
{
"step_id": "say-hello",
"pig_job": {
"query": "sh echo hello"
}
}],
}
dataproc_task = DataprocWorkflowTemplateInstantiateInlineOperator(
task_id=TASK_ID,
project_id=PROJECT_ID,
region=REGION,
template=template,
dag=self.dag
)
dataproc_task.execute(None)
self.mock_workflows.instantiateInline.assert_called_once_with(
parent='projects/test-project-id/regions/test-region',
instanceId=mock.ANY,
body=template)
hook.await.assert_called_once_with(self.operation)
|
jinnykoo/christmas
|
refs/heads/master
|
sites/demo/tests/test_shipping_methods.py
|
34
|
from decimal import Decimal as D
from django.test import TestCase
import mock
from apps.shipping import methods
class TestStandard(TestCase):
def setUp(self):
self.method = methods.Standard()
self.basket = mock.Mock()
def test_is_free_over_threshold(self):
self.basket.total_incl_tax = D('20.00')
price = self.method.calculate(self.basket)
self.assertEqual(price.incl_tax, D('0.00'))
def test_is_per_item_under_threshold(self):
self.basket.total_incl_tax = D('10.00')
self.basket.num_items = 3
price = self.method.calculate(self.basket)
self.assertEqual(
price.incl_tax, 3 * self.method.charge_per_item)
class TestExpress(TestCase):
def setUp(self):
self.method = methods.Express()
self.basket = mock.Mock()
def test_is_per_item_under_threshold(self):
self.basket.total_incl_tax = D('10.00')
self.basket.num_items = 3
price = self.method.calculate(self.basket)
self.assertEqual(
price.incl_tax, 3 * self.method.charge_per_item)
|
TangHao1987/intellij-community
|
refs/heads/master
|
python/testData/mover/lastComment1.py
|
166
|
def f():
if True:
a = 1
else:
a = 2
#comment <caret>
|
Pure-Aosp/android_external_skia
|
refs/heads/5.1
|
experimental/benchtools/greenify.py
|
85
|
#!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""greenify.py: standalone script to correct flaky bench expectations.
Requires Rietveld credentials on the running machine.
Usage:
Copy script to a separate dir outside Skia repo. The script will create a
skia dir on the first run to host the repo, and will create/delete
temp dirs as needed.
./greenify.py --url <the stdio url from failed CheckForRegressions step>
"""
import argparse
import filecmp
import os
import re
import shutil
import subprocess
import time
import urllib2
# Regular expression for matching exception data.
EXCEPTION_RE = ('Bench (\S+) out of range \[(\d+.\d+), (\d+.\d+)\] \((\d+.\d+) '
'vs (\d+.\d+), ')
EXCEPTION_RE_COMPILED = re.compile(EXCEPTION_RE)
def clean_dir(d):
if os.path.exists(d):
shutil.rmtree(d)
os.makedirs(d)
def checkout_or_update_skia(repo_dir):
status = True
old_cwd = os.getcwd()
os.chdir(repo_dir)
print 'CHECK SKIA REPO...'
if subprocess.call(['git', 'pull'],
stderr=subprocess.PIPE):
print 'Checking out Skia from git, please be patient...'
os.chdir(old_cwd)
clean_dir(repo_dir)
os.chdir(repo_dir)
if subprocess.call(['git', 'clone', '-q', '--depth=50', '--single-branch',
'https://skia.googlesource.com/skia.git', '.']):
status = False
subprocess.call(['git', 'checkout', 'master'])
subprocess.call(['git', 'pull'])
os.chdir(old_cwd)
return status
def git_commit_expectations(repo_dir, exp_dir, bot, build, commit):
commit_msg = """Greenify bench bot %s at build %s
TBR=bsalomon@google.com
Bypassing trybots:
NOTRY=true""" % (bot, build)
old_cwd = os.getcwd()
os.chdir(repo_dir)
upload = ['git', 'cl', 'upload', '-f', '--bypass-hooks',
'--bypass-watchlists', '-m', commit_msg]
if commit:
upload.append('--use-commit-queue')
branch = exp_dir[exp_dir.rfind('/') + 1:]
filename = 'bench_expectations_%s.txt' % bot
cmds = ([['git', 'checkout', 'master'],
['git', 'pull'],
['git', 'checkout', '-b', branch, '-t', 'origin/master'],
['cp', '%s/%s' % (exp_dir, filename), 'expectations/bench'],
['git', 'add', 'expectations/bench/' + filename],
['git', 'commit', '-m', commit_msg],
upload,
['git', 'checkout', 'master'],
['git', 'branch', '-D', branch],
])
status = True
for cmd in cmds:
print 'Running ' + ' '.join(cmd)
if subprocess.call(cmd):
print 'FAILED. Please check if skia git repo is present.'
subprocess.call(['git', 'checkout', 'master'])
status = False
break
os.chdir(old_cwd)
return status
def delete_dirs(li):
for d in li:
print 'Deleting directory %s' % d
shutil.rmtree(d)
def widen_bench_ranges(url, bot, repo_dir, exp_dir):
fname = 'bench_expectations_%s.txt' % bot
src = os.path.join(repo_dir, 'expectations', 'bench', fname)
if not os.path.isfile(src):
print 'This bot has no expectations! %s' % bot
return False
row_dic = {}
for l in urllib2.urlopen(url).read().split('\n'):
data = EXCEPTION_RE_COMPILED.search(l)
if data:
row = data.group(1)
lb = float(data.group(2))
ub = float(data.group(3))
actual = float(data.group(4))
exp = float(data.group(5))
avg = (actual + exp) / 2
shift = avg - exp
lb = lb + shift
ub = ub + shift
# In case outlier really fluctuates a lot
if actual < lb:
lb = actual - abs(shift) * 0.1 + 0.5
elif actual > ub:
ub = actual + abs(shift) * 0.1 + 0.5
row_dic[row] = '%.2f,%.2f,%.2f' % (avg, lb, ub)
if not row_dic:
print 'NO out-of-range benches found at %s' % url
return False
changed = 0
li = []
for l in open(src).readlines():
parts = l.strip().split(',')
if parts[0].startswith('#') or len(parts) != 5:
li.append(l.strip())
continue
if ','.join(parts[:2]) in row_dic:
li.append(','.join(parts[:2]) + ',' + row_dic[','.join(parts[:2])])
changed += 1
else:
li.append(l.strip())
if not changed:
print 'Not in source file:\n' + '\n'.join(row_dic.keys())
return False
dst = os.path.join(exp_dir, fname)
with open(dst, 'w+') as f:
f.write('\n'.join(li))
return True
def main():
d = os.path.dirname(os.path.abspath(__file__))
os.chdir(d)
if not subprocess.call(['git', 'rev-parse'], stderr=subprocess.PIPE):
print 'Please copy script to a separate dir outside git repos to use.'
return
ts_str = '%s' % time.time()
parser = argparse.ArgumentParser()
parser.add_argument('--url',
help='Broken bench build CheckForRegressions page url.')
parser.add_argument('--commit', action='store_true',
help='Whether to commit changes automatically.')
args = parser.parse_args()
repo_dir = os.path.join(d, 'skia')
if not os.path.exists(repo_dir):
os.makedirs(repo_dir)
if not checkout_or_update_skia(repo_dir):
print 'ERROR setting up Skia repo at %s' % repo_dir
return 1
file_in_repo = os.path.join(d, 'skia/experimental/benchtools/greenify.py')
if not filecmp.cmp(__file__, file_in_repo):
shutil.copy(file_in_repo, __file__)
print 'Updated this script from repo; please run again.'
return
if not args.url:
raise Exception('Please provide a url with broken CheckForRegressions.')
path = args.url.split('/')
if len(path) != 11 or not path[6].isdigit():
raise Exception('Unexpected url format: %s' % args.url)
bot = path[4]
build = path[6]
commit = False
if args.commit:
commit = True
exp_dir = os.path.join(d, 'exp' + ts_str)
clean_dir(exp_dir)
if not widen_bench_ranges(args.url, bot, repo_dir, exp_dir):
print 'NO bench exceptions found! %s' % args.url
elif not git_commit_expectations(
repo_dir, exp_dir, bot, build, commit):
print 'ERROR uploading expectations using git.'
elif not commit:
print 'CL created. Please take a look at the link above.'
else:
print 'New bench baselines should be in CQ now.'
delete_dirs([exp_dir])
if __name__ == "__main__":
main()
|
uw-it-aca/bridge-sis-provisioner
|
refs/heads/master
|
sis_provisioner/tests/csv/__init__.py
|
1
|
from django.test.utils import override_settings
from sis_provisioner.tests import (
fdao_pws_override, fdao_hrp_override, fdao_bridge_override)
from sis_provisioner.tests.account_managers import set_uw_account
user_file_name_override = override_settings(
BRIDGE_IMPORT_USER_FILENAME="users")
def set_db_records():
affiemp = set_uw_account("affiemp")
javerage = set_uw_account("javerage")
ellen = set_uw_account("ellen")
staff = set_uw_account("staff")
staff.set_disable()
retiree = set_uw_account("retiree")
tyler = set_uw_account("faculty")
leftuw = set_uw_account("leftuw")
leftuw.set_terminate_date()
testid = set_uw_account("testid")
|
mseaborn/switch
|
refs/heads/master
|
switch_mod/project/build.py
|
2
|
# Copyright 2015 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
"""
Defines model components to describe generation projects build-outs for
the SWITCH-Pyomo model.
SYNOPSIS
>>> from switch_mod.utilities import define_AbstractModel
>>> model = define_AbstractModel(
... 'timescales', 'financials', 'load_zones', 'fuels',
... 'gen_tech', 'project.build')
>>> instance = model.load_inputs(inputs_dir='test_dat')
"""
import os
from pyomo.environ import *
from switch_mod.financials import capital_recovery_factor as crf
def define_components(mod):
"""
Adds components to a Pyomo abstract model object to describe
generation and storage projects. Unless otherwise stated, all power
capacity is specified in units of MW and all sets and parameters
are mandatory.
PROJECTS is the set of generation and storage projects that have
been built or could potentially be built. A project is a combination
of generation technology, load zone and location. A particular
build-out of a project should also include the year in which
construction was complete and additional capacity came online.
Members of this set are abbreviated as proj or prj in parameter
names and indexes. Use of p instead of prj is discouraged because p
is reserved for period.
proj_dbid[prj] is an external database id for each project. This is
an optional parameter than defaults to the project index.
proj_gen_tech[prj] describes what kind of generation technology a
projects is using.
proj_load_zone[prj] is the load zone this project is built in.
VARIABLE_PROJECTS is a subset of PROJECTS that only includes
variable generators such as wind or solar that have exogenous
constraints on their energy production.
BASELOAD_PROJECTS is a subset of PROJECTS that only includes
baseload generators such as coal or geothermal.
LZ_PROJECTS[lz in LOAD_ZONES] is an indexed set that lists all
projects within each load zone.
PROJECTS_CAP_LIMITED is the subset of PROJECTS that are capacity
limited. Most of these will be generator types that are resource
limited like wind, solar or geothermal, but this can be specified
for any project. Some existing or proposed projects may have upper
bounds on increasing capacity or replacing capacity as it is retired
based on permits or local air quality regulations.
proj_capacity_limit_mw[proj] is defined for generation technologies
that are resource limited and do not compete for land area. This
describes the maximum possible capacity of a project in units of
megawatts.
proj_full_load_heat_rate[proj] is the full load heat rate in units
of MMBTU/MWh that describes the thermal efficiency of a project when
runnign at full load. This optional parameter overrides the generic
heat rate of a generation technology. In the future, we may expand
this to be indexed by fuel source as well if we need to support a
multi-fuel generator whose heat rate depends on fuel source.
proj_energy_source[proj] is the primary energy source for a project.
This is derived from the generation technology description and
assumes one energy source per generation technology. This parameter
may be altered in the future to support generators that use multiple
energy sources.
-- CONSTRUCTION --
PROJECT_BUILDYEARS is a two-dimensional set of projects and the
years in which construction or expansion occured or can occur. You
can think of a project as a physical site that can be built out over
time. BuildYear is the year in which construction is completed and
new capacity comes online, not the year when constrution begins.
BuildYear will be in the past for existing projects and will be the
first year of an investment period for new projects. Investment
decisions are made for each project/invest period combination. This
set is derived from other parameters for all new construction. This
set also includes entries for existing projects that have already
been built; information for legacy projects come from other files
and their build years will usually not correspond to the set of
investment periods. There are two recommended options for
abbreviating this set for denoting indexes: typically this should be
written out as (proj, build_year) for clarity, but when brevity is
more important (prj, b) is acceptable.
NEW_PROJ_BUILDYEARS is a subset of PROJECT_BUILDYEARS that only
includes projects that have not yet been constructed. This is
derived by joining the set of PROJECTS with the set of
NEW_GENERATION_BUILDYEARS using generation technology.
EXISTING_PROJ_BUILDYEARS is a subset of PROJECT_BUILDYEARS that
only includes existing projects.
proj_existing_cap[(proj, build_year) in EXISTING_PROJ_BUILDYEARS] is
a parameter that describes how much capacity was built in the past
for existing projects.
BuildProj[proj, build_year] is a decision variable that describes
how much capacity of a project to install in a given period. This also
stores the amount of capacity that was installed in existing projects
that are still online.
ProjCapacity[proj, period] is an expression that returns the total
capacity online in a given period. This is the sum of installed capacity
minus all retirements.
Max_Build_Potential[proj] is a constraint defined for each project
that enforces maximum capacity limits for resource-limited projects.
ProjCapacity <= proj_capacity_limit_mw
proj_end_year[(proj, build_year) in PROJECT_BUILDYEARS] is the last
investment period in the simulation that a given project build will
be operated. It can either indicate retirement or the end of the
simulation. This is derived from g_max_age.
--- OPERATIONS ---
PROJECT_BUILDS_OPERATIONAL_PERIODS[proj, build_year] is an indexed
set that describes which periods a given project build will be
operational.
PROJECT_PERIOD_ONLINE_BUILD_YRS[proj, period] is a complementary
indexed set that identify which build years will still be online
for the given project in the given period. For some project-period
combinations, this will be an empty set.
PROJECT_OPERATIONAL_PERIODS describes periods in which projects
could be operational. Unlike the related sets above, it is not
indexed. Instead it is specified as a set of (proj, period)
combinations useful for indexing other model components.
--- COSTS ---
proj_connect_cost_per_mw[prj] is the cost of grid upgrades to support a
new project, in dollars per peak MW. These costs include new
transmission lines to a substation, substation upgrades and any
other grid upgrades that are needed to deliver power from the
interconnect point to the load center or from the load center to the
broader transmission network.
The following cost components are defined for each project and build
year. These parameters will always be available, but will typically
be populated by the generic costs specified in generator costs
inputs file and the load zone cost adjustment multipliers from
load_zones inputs file.
proj_overnight_cost[proj, build_year] is the overnight capital cost per
MW of capacity for building a project in the given period. By
"installed in the given period", I mean that it comes online at the
beginning of the given period and construction starts before that.
proj_fixed_om[proj, build_year] is the annual fixed Operations and
Maintenance costs (O&M) per MW of capacity for given project that
was installed in the given period.
-- Derived cost parameters --
proj_capital_cost_annual[proj, build_year] is the annualized loan
payments for a project's capital and connection costs in units of
$/MW per year. This is specified in non-discounted real dollars in a
future period, not real dollars in net present value.
Proj_Fixed_Costs_Annual[proj, period] is the total annual fixed
costs (capital as well as fixed operations & maintenance) incurred
by a project in a period. This reflects all of the builds are
operational in the given period. This is an expression that reflect
decision variables.
Total_Proj_Fixed_Costs_Annual[period] is the sum of
Proj_Fixed_Costs_Annual[proj, period] for all projects that could be
online in the target period. This aggregation is performed for the
benefit of the objective function.
--- DELAYED IMPLEMENATION ---
The following components are not implemented at this time.
proj_energy_capacity_overnight_cost[proj, period] defaults to the
generic costs of the energy component of a storage technology. It
can be overridden if different projects have different cost
components. For new CAES projects, this could easily be overridden
based on whether an empty gas well was nearby that could be reused,
whether the local geological conditions made it easy or difficult to
drill and construct underground storage, or whether an above-ground
pressurized vessel would be needed. For new battery projects, a
generic cost would be completely sufficient.
proj_replacement_id[prj] is defined for projects that could replace
existing generators.
LOCATIONS_WITH_COMPETITION is the set of locations that have limited
land area where multiple projects can compete for space. Members of
this set are abbreviated as either loc or a lowercase L "l" in
parameter names and indexes.
loc_area_km2[l] describes the land area available for development
at a particular location in units of square kilometers.
proj_location[prj] is only defined for projects that compete with each
other for limited land space at a given location. It refers to a
member of the set LOCATIONS_WITH_COMPETITION. For example, if solar
thermal and solar PV projects were competing for the same parcel of
land, they would need the same location id.
proj_land_footprint_mw_km2[prj] describes the land footprint of a project
in units of megawatts per square kilometer.
Max_Build_Location[location] is a constraint defined for each project
that enforces maximum capacity limits for resource-limited locations.
sum(BuildProj/proj_land_footprint_mw_km2) <= loc_area_km2
ccs_pipeline_cost_per_mw[proj, build_year] is the normalize cost of
a ccs pipeline sized relative to a project's emissions intensity.
ProjCommitToMinBuild[proj, build_year] is a binary decision variable
that is only defined for generation technologies that have minimum
build requirements as specified by g_min_build_capacity[g].
Enforce_Min_Build[proj, build_year] is a constraint that forces
project build-outs to meet the minimum build requirements for
generation technologies that have those requirements. This is
defined as a pair of constraints that force BuildProj to be 0 when
ProjCommitToMinBuild is 0, and force BuildProj to be greater than
g_min_build_capacity when ProjCommitToMinBuild is 1. The value used
for max_reasonable_build_capacity can be set to something like three
times the sum of the peak demand of all load zones in a given
period, or just to the maximum possible floating point value. When
ProjCommitToMinBuild is 1, the upper constraint should be non-binding.
ProjCommitToMinBuild * g_min_build_capacity <= BuildProj ...
<= ProjCommitToMinBuild * max_reasonable_build_capacity
Decommission[proj, build_year, period] is a decision variable that
allows early retirement of portions of projects. Any portion of a
project that is decomisssioned early will not incur fixed O&M
costs and cannot be brought back into service in later periods.
NameplateCapacity[proj, build_year, period] is an expression that
describes the amount of capacity available from a particular project
build in a given period. This takes into account any decomissioning
that occured.
NameplateCapacity = BuildProj - sum(Decommission)
"""
mod.PROJECTS = Set()
mod.proj_dbid = Param(mod.PROJECTS, default=lambda m, proj: proj)
mod.proj_gen_tech = Param(mod.PROJECTS, within=mod.GENERATION_TECHNOLOGIES)
mod.proj_load_zone = Param(mod.PROJECTS, within=mod.LOAD_ZONES)
mod.min_data_check('PROJECTS', 'proj_gen_tech', 'proj_load_zone')
mod.VARIABLE_PROJECTS = Set(
initialize=mod.PROJECTS,
filter=lambda m, proj: (
m.g_is_variable[m.proj_gen_tech[proj]]))
mod.BASELOAD_PROJECTS = Set(
initialize=mod.PROJECTS,
filter=lambda m, proj: (
m.g_is_baseload[m.proj_gen_tech[proj]]))
mod.LZ_PROJECTS = Set(
mod.LOAD_ZONES,
initialize=lambda m, lz: set(
p for p in m.PROJECTS if m.proj_load_zone[p] == lz))
mod.PROJECTS_CAP_LIMITED = Set(within=mod.PROJECTS)
mod.proj_capacity_limit_mw = Param(
mod.PROJECTS_CAP_LIMITED,
within=PositiveReals)
# Add PROJECTS_LOCATION_LIMITED & associated stuff later
mod.FUEL_BASED_PROJECTS = Set(
initialize=lambda m: set(
p for p in m.PROJECTS if m.g_uses_fuel[m.proj_gen_tech[p]]))
mod.proj_fuel = Param(
mod.FUEL_BASED_PROJECTS,
within=mod.FUELS,
initialize=lambda m, proj: set(
set(m.G_ENERGY_SOURCES[m.proj_gen_tech[proj]]) &
set(m.FUELS)).pop())
mod.NON_FUEL_BASED_PROJECTS = Set(
initialize=lambda m: set(
p for p in m.PROJECTS if not m.g_uses_fuel[m.proj_gen_tech[p]]))
mod.proj_non_fuel_energy_source = Param(
mod.NON_FUEL_BASED_PROJECTS,
within=mod.NON_FUEL_ENERGY_SOURCES,
initialize=lambda m, proj: set(
set(m.G_ENERGY_SOURCES[m.proj_gen_tech[proj]]) &
set(m.NON_FUEL_ENERGY_SOURCES)).pop())
mod.proj_energy_source = Param(
mod.PROJECTS,
within=mod.ENERGY_SOURCES,
initialize=lambda m, proj: set(
m.G_ENERGY_SOURCES[m.proj_gen_tech[proj]]).pop())
# For now, I've only implemented support for each project having a
# single fuel type. Throw an error if that is not the case, which
# can prompt us to expand the model to support that.
mod.thermal_generators_use_single_fuel = BuildCheck(
mod.FUEL_BASED_PROJECTS,
rule=lambda m, proj: len(set(
set(m.G_ENERGY_SOURCES[m.proj_gen_tech[proj]]) &
set(m.FUELS))) == 1)
# proj_full_load_heat_rate defaults to g_full_load_heat_rate if it
# is available. Throw an error if no data was given for
# proj_full_load_heat_rate or g_full_load_heat_rate.
def proj_full_load_heat_rate_default_rule(m, pr):
g = m.proj_gen_tech[pr]
if g in m.g_full_load_heat_rate:
return m.g_full_load_heat_rate[g]
else:
raise ValueError(
("Project {} uses a fuel, but there is no heat rate " +
"specified for this project or its generation technology " +
"{}.").format(pr, m.proj_gen_tech[pr]))
mod.proj_full_load_heat_rate = Param(
mod.FUEL_BASED_PROJECTS,
within=PositiveReals,
default=proj_full_load_heat_rate_default_rule)
def init_proj_buildyears(m):
project_buildyears = set()
for proj in m.PROJECTS:
g = m.proj_gen_tech[proj]
build_years = [
b for (gen, b) in m.NEW_GENERATION_BUILDYEARS if gen == g]
for b in build_years:
project_buildyears.add((proj, b))
return project_buildyears
mod.NEW_PROJ_BUILDYEARS = Set(
dimen=2,
initialize=init_proj_buildyears)
mod.EXISTING_PROJ_BUILDYEARS = Set(
dimen=2)
mod.proj_existing_cap = Param(
mod.EXISTING_PROJ_BUILDYEARS,
within=PositiveReals)
mod.min_data_check('proj_existing_cap')
mod.PROJECT_BUILDYEARS = Set(
dimen=2,
initialize=lambda m: set(
m.EXISTING_PROJ_BUILDYEARS | m.NEW_PROJ_BUILDYEARS))
def init_proj_end_year(m, proj, build_year):
g = m.proj_gen_tech[proj]
max_age = m.g_max_age[g]
earliest_study_year = m.period_start[m.PERIODS.first()]
if build_year + max_age < earliest_study_year:
return build_year + max_age
for p in m.PERIODS:
if build_year + max_age < m.period_end[p]:
break
return p
mod.proj_end_year = Param(
mod.PROJECT_BUILDYEARS,
initialize=init_proj_end_year)
mod.min_data_check('proj_end_year')
mod.PROJECT_BUILDS_OPERATIONAL_PERIODS = Set(
mod.PROJECT_BUILDYEARS,
within=mod.PERIODS,
ordered=True,
initialize=lambda m, proj, bld_yr: set(
p for p in m.PERIODS
if bld_yr <= p <= m.proj_end_year[proj, bld_yr]))
# The set of build years that could be online in the given period
# for the given project.
mod.PROJECT_PERIOD_ONLINE_BUILD_YRS = Set(
mod.PROJECTS, mod.PERIODS,
initialize=lambda m, proj, p: set(
bld_yr for (prj, bld_yr) in m.PROJECT_BUILDYEARS
if prj == proj and bld_yr <= p <= m.proj_end_year[proj, bld_yr]))
def bounds_BuildProj(model, proj, bld_yr):
if((proj, bld_yr) in model.EXISTING_PROJ_BUILDYEARS):
return (model.proj_existing_cap[proj, bld_yr],
model.proj_existing_cap[proj, bld_yr])
elif(proj in model.PROJECTS_CAP_LIMITED):
# This does not replace Max_Build_Potential because
# Max_Build_Potential applies across all build years.
return (0, model.proj_capacity_limit_mw[proj])
else:
return (0, None)
mod.BuildProj = Var(
mod.PROJECT_BUILDYEARS,
within=NonNegativeReals,
bounds=bounds_BuildProj)
# To Do: Subtract retirements after I write support for that.
mod.ProjCapacity = Expression(
mod.PROJECTS, mod.PERIODS,
initialize=lambda m, proj, period: sum(
m.BuildProj[proj, bld_yr]
for bld_yr in m.PROJECT_PERIOD_ONLINE_BUILD_YRS[proj, period]))
mod.Max_Build_Potential = Constraint(
mod.PROJECTS_CAP_LIMITED, mod.PERIODS,
rule=lambda m, proj, p: (
m.proj_capacity_limit_mw[proj] >= m.ProjCapacity[proj, p]))
# Costs
mod.proj_connect_cost_per_mw = Param(mod.PROJECTS, within=NonNegativeReals)
mod.min_data_check('proj_connect_cost_per_mw')
mod.proj_overnight_cost = Param(
mod.PROJECT_BUILDYEARS,
within=NonNegativeReals,
default=lambda m, proj, bld_yr: (
m.g_overnight_cost[m.proj_gen_tech[proj], bld_yr] *
m.lz_cost_multipliers[m.proj_load_zone[proj]]))
mod.proj_fixed_om = Param(
mod.PROJECT_BUILDYEARS,
within=NonNegativeReals,
default=lambda m, proj, bld_yr: (
m.g_fixed_o_m[m.proj_gen_tech[proj], bld_yr] *
m.lz_cost_multipliers[m.proj_load_zone[proj]]))
# Derived annual costs
mod.proj_capital_cost_annual = Param(
mod.PROJECT_BUILDYEARS,
initialize=lambda m, proj, bld_yr: (
(m.proj_overnight_cost[proj, bld_yr] +
m.proj_connect_cost_per_mw[proj]) *
crf(m.interest_rate, m.g_max_age[m.proj_gen_tech[proj]])))
mod.PROJECT_OPERATIONAL_PERIODS = Set(
dimen=2,
initialize=lambda m: set(
(proj, p)
for (proj, bld_yr) in m.PROJECT_BUILDYEARS
for p in m.PROJECT_BUILDS_OPERATIONAL_PERIODS[proj, bld_yr]))
mod.Proj_Fixed_Costs_Annual = Expression(
mod.PROJECT_OPERATIONAL_PERIODS,
initialize=lambda m, proj, p: sum(
m.BuildProj[proj, bld_yr] *
(m.proj_capital_cost_annual[proj, bld_yr] +
m.proj_fixed_om[proj, bld_yr])
for (prj, bld_yr) in m.PROJECT_BUILDYEARS
if (p in m.PROJECT_BUILDS_OPERATIONAL_PERIODS[prj, bld_yr] and
proj == prj)))
# Summarize costs for the objective function. Units should be total
# annual future costs in $base_year real dollars. The objective
# function will convert these to base_year Net Present Value in
# $base_year real dollars.
mod.Total_Proj_Fixed_Costs_Annual = Expression(
mod.PERIODS,
initialize=lambda m, p: sum(
m.Proj_Fixed_Costs_Annual[proj, p]
for (proj, period) in m.PROJECT_OPERATIONAL_PERIODS
if p == period))
mod.cost_components_annual.append('Total_Proj_Fixed_Costs_Annual')
def load_inputs(mod, switch_data, inputs_dir):
"""
Import project-specific data. The following files are expected in
the input directory.
all_projects.tab
PROJECT, proj_dbid, proj_gen_tech, proj_load_zone,
proj_connect_cost_per_mw
existing_projects.tab
PROJECT, build_year, proj_existing_cap
cap_limited_projects is optional because some systems will not have
capacity limited projects.
cap_limited_projects.tab
PROJECT, proj_capacity_limit_mw
The following files are optional because they override generic
values given by descriptions of generation technologies.
proj_heat_rate.tab
PROJECT, proj_heat_rate
Note: Load-zone cost adjustments will not be applied to any costs
specified in project_specific_costs.
project_specific_costs.tab
PROJECT, build_year, proj_overnight_cost, proj_fixed_om
"""
switch_data.load_aug(
filename=os.path.join(inputs_dir, 'all_projects.tab'),
select=('PROJECT', 'proj_dbid', 'proj_gen_tech',
'proj_load_zone', 'proj_connect_cost_per_mw'),
index=mod.PROJECTS,
param=(mod.proj_dbid, mod.proj_gen_tech,
mod.proj_load_zone, mod.proj_connect_cost_per_mw))
switch_data.load_aug(
filename=os.path.join(inputs_dir, 'existing_projects.tab'),
select=('PROJECT', 'build_year', 'proj_existing_cap'),
index=mod.EXISTING_PROJ_BUILDYEARS,
param=(mod.proj_existing_cap))
switch_data.load_aug(
optional=True,
filename=os.path.join(inputs_dir, 'cap_limited_projects.tab'),
select=('PROJECT', 'proj_capacity_limit_mw'),
index=mod.PROJECTS_CAP_LIMITED,
param=(mod.proj_capacity_limit_mw))
switch_data.load_aug(
optional=True,
filename=os.path.join(inputs_dir, 'proj_heat_rate.tab'),
select=('PROJECT', 'full_load_heat_rate'),
param=(mod.proj_full_load_heat_rate))
switch_data.load_aug(
optional=True,
filename=os.path.join(inputs_dir, 'project_specific_costs.tab'),
select=('PROJECT', 'build_year',
'proj_overnight_cost', 'proj_fixed_om'),
param=(mod.proj_overnight_cost, mod.proj_fixed_om))
|
Maximilian-Reuter/SickRage
|
refs/heads/master
|
lib/pyasn1/codec/cer/decoder.py
|
261
|
# CER decoder
from pyasn1.type import univ
from pyasn1.codec.ber import decoder
from pyasn1.compat.octets import oct2int
from pyasn1 import error
class BooleanDecoder(decoder.AbstractSimpleDecoder):
protoComponent = univ.Boolean(0)
def valueDecoder(self, fullSubstrate, substrate, asn1Spec, tagSet, length,
state, decodeFun, substrateFun):
head, tail = substrate[:length], substrate[length:]
if not head:
raise error.PyAsn1Error('Empty substrate')
byte = oct2int(head[0])
# CER/DER specifies encoding of TRUE as 0xFF and FALSE as 0x0, while
# BER allows any non-zero value as TRUE; cf. sections 8.2.2. and 11.1
# in http://www.itu.int/ITU-T/studygroups/com17/languages/X.690-0207.pdf
if byte == 0xff:
value = 1
elif byte == 0x00:
value = 0
else:
raise error.PyAsn1Error('Boolean CER violation: %s' % byte)
return self._createComponent(asn1Spec, tagSet, value), tail
tagMap = decoder.tagMap.copy()
tagMap.update({
univ.Boolean.tagSet: BooleanDecoder()
})
typeMap = decoder.typeMap
class Decoder(decoder.Decoder): pass
decode = Decoder(tagMap, decoder.typeMap)
|
inasafe/inasafe
|
refs/heads/develop
|
safe/report/test/__init__.py
|
44
|
# coding=utf-8
__copyright__ = "Copyright 2016, The InaSAFE Project"
__license__ = "GPL version 3"
__email__ = "info@inasafe.org"
__revision__ = '$Format:%H$'
|
Microsoft/PTVS
|
refs/heads/master
|
Python/Tests/TestData/Grammar/Literals.py
|
7
|
"abc"
r"raw string"
R"raw string"
"""abc"""
r"""raw string"""
R"""raw string"""
'abc'
r'raw string'
R'raw string'
'''abc'''
r'''raw string'''
R'''raw string'''
1000
2147483647
3.14
10.
.001
1e100
3.14e-10
0e0
3.14j
10.j
10j
.001j
1e100j
3.14e-10j
-2147483648
-100
|
minesense/VisTrails
|
refs/heads/master
|
contrib/itk/__init__.py
|
6
|
############################################################################
##
## Copyright (C) 2006-2007 University of Utah. All rights reserved.
##
## This file is part of VisTrails.
##
## This file may be used under the terms of the GNU General Public
## License version 2.0 as published by the Free Software Foundation
## and appearing in the file LICENSE.GPL included in the packaging of
## this file. Please review the following to ensure GNU General Public
## Licensing requirements will be met:
## http://www.opensource.org/licenses/gpl-license.php
##
## If you are unsure which license is appropriate for your use (for
## instance, you are interested in developing a commercial derivative
## of VisTrails), please contact us at contact@vistrails.org.
##
## This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
## WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
##
############################################################################
# ITK package for VisTrails
############################################################################
"""ITK is an open-source, cross-platform system that provides developers with an extensive suite of software tools for image analysis. www.itk.org
For info on installing itk and python-itk on ubuntu see: http://paulnovo.org/node/2
For info on generating wrapper modules for more itk filters see itk/filter_generator.py"""
version = '0.2'
identifier = 'edu.utah.sci.vistrails.itk'
name = 'ITK'
import core.bundles.utils
import core.requirements
from core.modules.vistrails_module import Module, ModuleError
# Ugly, but Carlos doesnt know any better
if core.bundles.utils.guess_system() == 'linux-ubuntu':
import sys
sys.path.append('/usr/local/lib/VisTrailsITK')
try:
from core.bundles import py_import
itk = py_import('itk', {'linux-ubuntu': 'vistrails-itk'})
except ImportError:
raise core.requirements.MissingRequirement("ITK and WrapITK")
import core.modules
import core.modules.module_registry
# ITK Package imports
from PixelType import *
from FeatureExtractionFilters import *
from ITK import *
from Image import Image
from IntensityFilters import *
from SegmentationFilters import *
from SelectionFilters import *
from SmoothingFilters import *
from ThresholdFilters import *
from GradientFilters import *
from NeighborhoodFilters import *
from ImageReader import *
def initialize(*args, **keywords):
reg = core.modules.module_registry
basic = core.modules.basic_modules
########################################################################################
# Misc.
Index2D.register(reg,basic)
Index3D.register(reg,basic)
Size.register(reg,basic)
Region.register(reg,basic)
PixelType.register(reg,basic)
Filter.register(reg,basic)
Kernel.register(reg,basic)
Image.register(reg,basic)
########################################################################################
# Pixel Types
pixeltypes = [PixelTypeFloat,
PixelTypeUnsignedChar,
PixelTypeUnsignedShort,
PixelTypeRGB]
for cls in pixeltypes:
cls.register(reg,basic)
########################################################################################
# Feature Extraction Filters
featurefilters = [GradientMagnitudeRecursiveGaussianImageFilter,
DanielssonDistanceMapImageFilter,
SobelEdgeDetectionImageFilter]
for cls in featurefilters:
cls.register(reg,basic)
########################################################################################
# Intensity Filters
intensityfilters = [RescaleIntensityImageFilter,
SigmoidImageFilter,
ThresholdImageFilter,
ShiftScaleImageFilter,
NormalizeImageFilter]
for cls in intensityfilters:
cls.register(reg,basic)
########################################################################################
# Segmentation Filters
segmentationfilters = [IsolatedWatershedImageFilter,
ConnectedThresholdImageFilter,
ConfidenceConnectedImageFilter,
IsolatedConnectedImageFilter]
for cls in segmentationfilters:
cls.register(reg,basic)
########################################################################################
# Selection Filters
selectionfilters = [RegionOfInterestImageFilter,
CastImageFilter,
ExtractImageFilter]
for cls in selectionfilters:
cls.register(reg,basic)
########################################################################################
# Smoothing Filters
smoothingfilters = [CurvatureAnisotropicDiffusionFilter,
RecursiveGaussianImageFilter,
DiscreteGaussianImageFilter,
GradientAnisotropicDiffusionImageFilter,
MinMaxCurvatureFlowImageFilter,
BinomialBlurImageFilter,
BilateralImageFilter,
CurvatureFlowImageFilter]
for cls in smoothingfilters:
cls.register(reg,basic)
########################################################################################
# Threshold Filters
thresholdfilters = [BinaryThresholdImageFilter]
for cls in thresholdfilters:
cls.register(reg,basic)
########################################################################################
# Gradient Filters
gradientfilters = [GradientMagnitudeImageFilter]
for cls in gradientfilters:
cls.register(reg,basic)
########################################################################################
# Neighborhood Filters
neighborhoodfilters = [MeanImageFilter,
MedianImageFilter,
BinaryErodeImageFilter]
for cls in neighborhoodfilters:
cls.register(reg,basic)
########################################################################################
# Image Reader
imagereader = [ImageReader,
ImageToFile,
GDCMReader,
DICOMReader]
for cls in imagereader:
cls.register(reg,basic)
|
alex/bcrypt
|
refs/heads/master
|
bcrypt/__init__.py
|
4
|
# Author:: Donald Stufft (<donald@stufft.io>)
# Copyright:: Copyright (c) 2013 Donald Stufft
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import hashlib
import os
import sys
from cffi import FFI
from . import __about__
from .__about__ import *
__all__ = ["gensalt", "hashpw"] + __about__.__all__
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
text_type = str
else:
text_type = unicode
_crypt_blowfish_dir = "crypt_blowfish-1.2"
_bundled_dir = os.path.join(os.path.dirname(__file__), _crypt_blowfish_dir)
_ffi = FFI()
_ffi.cdef("""
char *crypt_gensalt_rn(const char *prefix, unsigned long count,
const char *input, int size, char *output, int output_size);
char *crypt_rn(const char *key, const char *setting, void *data, int size);
""")
_bcrypt_lib = _ffi.verify('#include "ow-crypt.h"',
sources=[
str(os.path.join(_bundled_dir, "crypt_blowfish.c")),
str(os.path.join(_bundled_dir, "crypt_gensalt.c")),
str(os.path.join(_bundled_dir, "wrapper.c")),
# How can we get distutils to work with a .S file?
# Set https://github.com/dstufft/bcrypt/blob/4c939e895bd9607301cda6d6f05ef3c1146eb658/bcrypt/crypt_blowfish-1.2/crypt_blowfish.c#L57
# back to 1 if we get ASM loaded.
# str(os.path.join(_bundled_dir, "x86.S")),
],
include_dirs=[str(_bundled_dir)],
modulename=str("_".join([
"_cffi",
hashlib.sha1(
"".join(_ffi._cdefsources).encode("utf-8")
).hexdigest()[:6],
hashlib.sha1(
_crypt_blowfish_dir.encode("utf-8")
).hexdigest()[:6],
])),
)
def gensalt(rounds=12):
salt = os.urandom(16)
output = _ffi.new("unsigned char[]", 30)
retval = _bcrypt_lib.crypt_gensalt_rn(
b"$2a$", rounds, salt, len(salt), output, len(output))
if not retval:
raise ValueError("Invalid rounds")
return _ffi.string(output)
def hashpw(password, salt):
if isinstance(password, text_type) or isinstance(salt, text_type):
raise TypeError("Unicode-objects must be encoded before hashing")
hashed = _ffi.new("unsigned char[]", 128)
retval = _bcrypt_lib.crypt_rn(password, salt, hashed, len(hashed))
if not retval:
raise ValueError("Invalid salt")
return _ffi.string(hashed)
|
ville-k/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/__init__.py
|
959
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
teknolab/django.org.tr
|
refs/heads/develop
|
manage.py
|
404
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
infoscout/python-bootcamp-pv
|
refs/heads/master
|
bootcamp/core.py
|
1
|
def say_hello():
print "\n* Hello InfoScout Team!\n"
def test_helper(got, expected):
if got == expected:
prefix = 'Correct!'
else:
prefix = 'Fail!'
print " {prefix} got: {got} expected: {expected}".format(prefix=prefix, got=got, expected=expected)
|
TB-Budget/T9_XMLRPC
|
refs/heads/master
|
ExtUtil/T9rpc.py
|
1
|
# coding=UTF-8
'''
Модуль удаленного вызова процедур Турбо9
На основе func_rpc.php
Данный проект является свободным программным обеспечением. Вы вправе распространять его
и/или модифицировать в соответствии с условиями версии 2.1 либо по вашему выбору с условиями
более поздней версии Стандартной Общественной Лицензии Ограниченного Применения GNU,
опубликованной Free Software Foundation.
Мы распространяем этот проект в надежде на то, что он будет вам полезен, однако
НЕ ПРЕДОСТАВЛЯЕМ НА НЕГО НИКАКИХ ГАРАНТИЙ, в том числе ГАРАНТИИ ТОВАРНОГО СОСТОЯНИЯ ПРИ ПРОДАЖЕ
и ПРИГОДНОСТИ ДЛЯ ИСПОЛЬЗОВАНИЯ В КОНКРЕТНЫХ ЦЕЛЯХ. Для получения более подробной информации
ознакомьтесь со Стандартной Общественной Лицензией Ограниченного Применений GNU.
Вместе с данным проектом вы должны были получить экземпляр Стандартной Общественной Лицензии
Ограниченного Применения GNU. Если вы его не получили, сообщите об этом в Software Foundation, Inc.,
59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
'''
class T9RPCError(Exception):
"""Ошибка протокола или подкулючения T9"""
pass
#Блок сериализации- десериализации
def unserialize(Str, Len):
'''Выборка данных из строки'''
Res=Str[:Len]
Str=Str[Len:]
return (Res, Str)
def int_serialize(Val,Size=4):
'''Упаковка целого числа фиксированного размера'''
Str=""
for i in xrange(0,Size):
(Val,Chr)=divmod(Val,256)
Str=Str+chr(Chr)
return Str
def int_unserialize(Str,Size=4):
'''Распаковка целого числа фиксированного размера'''
Res=0
for i in xrange(Size-1,-1,-1):
Res=Res*256+ord(Str[i])
Str=Str[Size:]
return (Res, Str)
def int_ex_serialize(Val):
'''Упаковка целого числа вместе с его размером'''
if (Val > 0) and (Val <= 253):
return chr(Val)
elif (Val >= -32768) and (Val <= 32767):
return chr(254)+int_serialize(Val,2)
else:
return chr(255)+int_serialize(Val,4)
def int_ex_unserialize(Str):
'''Распаковка целого числа, запакованного вместе с его размером'''
(Res, Str)=int_unserialize(Str,1)
if Res<=253:
return (Res, Str)
elif Res==254:
return int_unserialize(Str,2)
else:
return int_unserialize(Str,4)
#def str_serialize(Val):
# '''Упаковка строки (добавление длины) v63'''
# Len=len(Val)
# if Len<255:
# return chr(Len)+Val
# else:
# return chr(255)+int_serialize(Len)+Val
def str_serialize(Val):
'''Упаковка строки (добавление длины)'''
if not Val:
return chr(0)
else:
return chr(1) + int_ex_serialize(len(Val)) + Val;
#def str_unserialize(Str):
# '''Распаковка строки по длине из потока v63'''
# (Len, Str)=int_unserialize(Str,1)
# if Len==255:
# (Len, Str)=int_unserialize(Str,4)
# return unserialize(Str, Len)
def str_unserialize(Str):
(code, Str) = int_unserialize(Str, 1)
if not code:
return ("", Str)
elif code == 1:
(Len, Str) = int_ex_unserialize(Str)
return unserialize(Str, Len)
else:
raise T9RPCError("RPC protocol error: unicode not supported")
import uuid
def guid_serialize(Id):
'''Упаковка GUID'''
Res=uuid.UUID(Id).bytes_le
return Res
def guid_unserialize(Str):
'''Распаковка GUID в текстовое представление (у ДИЦ не было, использовалась простая распаковка)'''
(Bin, Str)=unserialize(Str,16)
Guid=uuid.UUID(bytes_le=Bin).hex
Guid=Guid.upper()
Guid="{"+Guid[0:8]+"-"+Guid[8:12]+"-"+Guid[12:16]+"-"+Guid[16:20]+"-"+Guid[20:]+"}"
return (Guid,Str)
def exception_unserialize(Str):
'''Распаковка ошибки'''
(Class, Str)=str_unserialize(Str)
(Code, Str)=int_ex_unserialize(Str)
(Mess, Str)=str_unserialize(Str)
return (Mess, Str)
##Сетевое взаимодействие
import socket
NetRetOk=chr(1)
NetRetError=chr(2)
NetRetCallback=chr(3)
def recv(Sock, Len):
'''Получение данных из сокета'''
Res = "";
while (Len > 0):
Str = Sock.recv(Len);
if not Str:
raise T9RPCError("Socket read error")
Len-=len(Str)
Res+=Str
return Res;
def recv_packet(Sock):
'''Получение пакета из сокета'''
Header=recv(Sock,12)
if Header[:4]<>"TBNP":
raise T9RPCError("RPC packet error")
Header=Header[4:]
(Count1,Header)=int_unserialize(Header)
(Count2,Header)=int_unserialize(Header)
Str1=recv(Sock,Count1)
Str2=recv(Sock,Count2)
return Str1
def send_packet(Sock,Data1,Data2):
'''Отправка пакета'''
Packet="TBNP"+int_serialize(len(Data1))+int_serialize(len(Data2))+Data1+Data2
Res=Sock.send(Packet)
if not Res:
raise T9RPCError("Socket write error")
return Res
def communicate(Sock,Data1):
'''обмен данными'''
send_packet(Sock,Data1,"")
Res=recv_packet(Sock)
Code=Res[0]
Res=Res[1:]
if Code==NetRetError:
raise T9RPCError("RPC call return error: "+exception_unserialize(Res)[0])
elif Code==NetRetCallback:
raise T9RPCError("RPC callback not supported")
return Res
##Работа с сессиями и соединениями
NetCmdNegotiating=1
NetCmdCall=2
NetCmdRelease=5
NetCmdReconnect=6
NetCmdDisconnect=7
NetProtocolVersion1=7
NetProtocolVersion2=4
diUser=3+9
dispIn=64
dispOut=128
dispString=6
ProxyService="T9WebProxy"
ProxyGUID="{9B4F96CB-39A1-4EA7-B3BB-052203517FD9}"
def connect(Server,Port,Service,Guid,Info,SID,RemoteAddr,UserAgent):
'''Подключение к серверу
Возвращает кортеж из сокета и информации подключения
'''
if Info==None:
Info=["","","",""]
if Guid==None or Guid=="":
Guid=ProxyGUID
if Service==None or Service=="":
Service=ProxyService
Sock=socket.create_connection((Server,Port))
Data=chr(NetCmdNegotiating)+chr(NetProtocolVersion1)+chr(NetProtocolVersion2) \
+str_serialize(Service)+guid_serialize(Guid) \
+str_serialize(SID)+str_serialize("")+str_serialize(RemoteAddr)+str_serialize(UserAgent)+chr(0)+chr(0)+chr(0)
Res=communicate(Sock,Data)
(Info[0],Res)=guid_unserialize(Res)
(Info[1],Res)=int_ex_unserialize(Res)
(Info[2],Res)=int_ex_unserialize(Res)
(Info[3],Res)=guid_unserialize(Res)
return (Sock, Info)
def reconnect(Server,Port,Info):
'''Восстановление подключения по информации
Возвращает кортеж из сокета и информации подключения'''
Sock=socket.create_connection((Server,Port))
Data=chr(NetCmdReconnect)+chr(NetProtocolVersion1)+chr(NetProtocolVersion2) \
+guid_serialize(Info[0])+int_ex_serialize(Info[1])+int_ex_serialize(Info[2])+guid_serialize(Info[3])
communicate(Sock,Data)
return (Sock, Info)
def call(Sock,Method_idx,Arg):
'''Вызов процедуры по номеру через встроенный диспетчер'''
Data=chr(NetCmdCall)+chr(diUser+Method_idx)+chr(1)+chr(dispOut+dispString)+chr(dispIn+dispString)+str_serialize(Arg)
Res=communicate(Sock,Data)
(N,Res)=int_ex_unserialize(Res)
if N<>1:
raise T9RPCError("RPC protocol error: invalid parameter count: "+str(N))
Res=int_unserialize(Res,1)[1]
# if T<>chr(dispOut+dispString):
# raise AssertionError("RPC protocol error: invalid result returned")
Res=int_unserialize(Res,1)[1]
return str_unserialize(Res)[0]
def disconnect(Sock):
'''Отключение от сервера'''
communicate(Sock,chr(NetCmdDisconnect))
Sock.close()
def standby(Sock):
'''Приостановка обмена данными'''
communicate(Sock,chr(NetCmdRelease))
Sock.close()
def login(Sock,ProcServ,DataServ,Infobase,Login,Password,Role):
'''Авторизация на сервере, подключение к функционалу Турбо9'''
try:
call(Sock, 0, ProcServ+"|"+DataServ+"|"+Infobase\
+"|"+Login+"|"+Password+"|"+Role)
except:
disconnect(Sock)
raise
def proc_call(Sock,aClass,aProc,aParam=""):
'''Вызов функции через встроенный диспетчер
Поддерживаются только функции с одним строковым параметром, возвращающие строку'''
return call(Sock,1,aClass+"|"+aProc+"|"+aParam)
import xmlrpclib
def xml_call(Sock,fName,Params):
'''Вызов функции через XML-RPC диспетчер прикладного уровня'''
Request = xmlrpclib.dumps(Params, fName, encoding="windows-1251")
Resp = call(Sock,1,"XMLRPC.Calling|Caller|"+Request)
Res = xmlrpclib.loads(Resp)
return Res[0]
###
# Session handling
# config
Server="127.0.0.1"
Port=25700
DBServ="localhost"
ProcServ="localhost"
Conns = dict()
#id:{username,password,info,infobase,lastip,lastagent}
def T9exec(*params):
try:
Conn=reconnect(Server, Port, Conns[params[0]]["info"])
except:
info = Conns[params[0]]["info"]
Conn = connect(Server, Port, ProxyService, ProxyGUID,
info, str(params[0]),
Conns[params[0]]["lastip"], Conns[params[0]]["lastagent"])
login(Conn[0], ProcServ, DBServ, Conns[params[0]]["infobase"],
Conns[params[0]]["login"], Conns[params[0]]["password"], "")
Conns[params[0]]["info"] = Conn[1]
Res=xml_call(Conn[0], params[1], params[2:])[0]
standby(Conn[0])
return Res
def T9login(infobase, username, password, RemoteIp, UserAgent):
if isinstance(infobase, unicode):
infobase = infobase.encode('1251')
if isinstance(username, unicode):
username = username.encode('1251')
if isinstance(password, unicode):
password = password.encode('1251')
nextSID = len(Conns) + 1
while nextSID in Conns:
nextSID += 1
(Sock, info) = connect(Server, Port, ProxyService, ProxyGUID, None,
str(nextSID), RemoteIp, UserAgent)
login(Sock, ProcServ, DBServ, infobase, username, password, "")
Conns[nextSID] = {"username": username,
"password": password,
"info": info,
'lastip': RemoteIp,
'lastagent': UserAgent,
'infobase': infobase}
standby(Sock)
return nextSID
def T9drop(logid):
try:
Conn=reconnect(Server, Port, Conns[logid]["info"])
except:
return ""
if Conn<>None:
disconnect(Conn[0])
return ""
class T9disp():
def _dispatch(self,method,params):
if method[:3]=="T9.":
return T9exec(params[0],method[3:], *params[1:])
if __name__ == "__main__":
from SimpleXMLRPCServer import SimpleXMLRPCServer
from SimpleXMLRPCServer import SimpleXMLRPCRequestHandler
# Restrict to a particular path.
class RequestHandler(SimpleXMLRPCRequestHandler):
rpc_paths = ('/RPC2',)
def _dispatch(self, method, params):
if method == 'T9login':
params = list(params)
if len(params) < 4:
params.append(self.client_address[0])
if len(params) < 5:
params.append(self.headers['user-agent'])
return SimpleXMLRPCServer._dispatch(self.server, method, params)
# Create server
server = SimpleXMLRPCServer(("localhost", 8001),
requestHandler=RequestHandler,
encoding="windows-1251")
server.register_function(T9login)
server.register_function(T9exec)
server.register_function(T9drop)
server.register_introspection_functions()
T9disp1 = T9disp()
server.register_instance(T9disp1, True)
server.serve_forever()
|
TalkingCactus/Citadel-Station
|
refs/heads/master
|
tools/mapmerge/dmm2tgm.py
|
24
|
import map_helpers
import sys
import shutil
#main("../../_maps/")
def main(map_folder):
tgm = "1"
maps = map_helpers.prompt_maps(map_folder, "convert", tgm)
print("\nConverting these maps:")
for i in maps.indices:
print(str(maps.files[i])[len(map_folder):])
convert = input("\nPress Enter to convert...\n")
if convert == "abort":
print("\nAborted map convert.")
sys.exit()
else:
for i in maps.indices:
path_str = str(maps.files[i])
path_str_pretty = path_str[len(map_folder):]
error = map_helpers.merge_map(path_str, path_str, tgm)
if error > 1:
print(map_helpers.error[error])
continue
if error == 1:
print(map_helpers.error[1])
print("CONVERTED: {}".format(path_str_pretty))
print(" - ")
print("\nFinished converting.")
def string_to_num(s):
try:
return int(s)
except ValueError:
return -1
main(sys.argv[1])
|
rentodev/p2pool-nzs
|
refs/heads/master
|
p2pool/bitcoin/networks/frycoin.py
|
10
|
import os
import platform
from twisted.internet import defer
from .. import data, helper
from p2pool.util import pack
P2P_PREFIX = 'fbc0b6db'.decode('hex')
P2P_PORT = 55901
ADDRESS_VERSION = 35
RPC_PORT = 55900
RPC_CHECK = defer.inlineCallbacks(lambda bitcoind: defer.returnValue(
'Frycoinaddress' in (yield bitcoind.rpc_help()) and
not (yield bitcoind.rpc_getinfo())['testnet']
))
SUBSIDY_FUNC = lambda height: 300*100000000 >> (height + 1)//16666
POW_FUNC = lambda data: pack.IntType(256).unpack(__import__('ltc_scrypt').getPoWHash(data))
BLOCK_PERIOD = 120 # s
SYMBOL = 'FRY'
CONF_FILE_FUNC = lambda: os.path.join(os.path.join(os.environ['APPDATA'], 'Frycoin') if platform.system() == 'Windows' else os.path.expanduser('~/Library/Application Support/Frycoin/') if platform.system() == 'Darwin' else os.path.expanduser('~/.Frycoin'), 'Frycoin.conf')
BLOCK_EXPLORER_URL_PREFIX = 'http://altexplorer.net/block/' #dummy
ADDRESS_EXPLORER_URL_PREFIX = 'http://altexplorer.net/address/'
TX_EXPLORER_URL_PREFIX = 'http://altexplorer.net/tx/'
SANE_TARGET_RANGE = (2**256//1000000000 - 1, 2**256//1000 - 1)
DUMB_SCRYPT_DIFF = 2**16
DUST_THRESHOLD = 0.0001e8
|
skuda/client-python
|
refs/heads/master
|
kubernetes/client/models/v1_resource_quota_spec.py
|
1
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1ResourceQuotaSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, hard=None, scopes=None):
"""
V1ResourceQuotaSpec - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'hard': 'dict(str, ResourceQuantity)',
'scopes': 'list[str]'
}
self.attribute_map = {
'hard': 'hard',
'scopes': 'scopes'
}
self._hard = hard
self._scopes = scopes
@property
def hard(self):
"""
Gets the hard of this V1ResourceQuotaSpec.
Hard is the set of desired hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota
:return: The hard of this V1ResourceQuotaSpec.
:rtype: dict(str, ResourceQuantity)
"""
return self._hard
@hard.setter
def hard(self, hard):
"""
Sets the hard of this V1ResourceQuotaSpec.
Hard is the set of desired hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota
:param hard: The hard of this V1ResourceQuotaSpec.
:type: dict(str, ResourceQuantity)
"""
self._hard = hard
@property
def scopes(self):
"""
Gets the scopes of this V1ResourceQuotaSpec.
A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.
:return: The scopes of this V1ResourceQuotaSpec.
:rtype: list[str]
"""
return self._scopes
@scopes.setter
def scopes(self, scopes):
"""
Sets the scopes of this V1ResourceQuotaSpec.
A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.
:param scopes: The scopes of this V1ResourceQuotaSpec.
:type: list[str]
"""
self._scopes = scopes
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Softmotions/edx-platform
|
refs/heads/master
|
common/test/acceptance/pages/common/logout.py
|
162
|
"""
Logout Page.
"""
from bok_choy.page_object import PageObject
from . import BASE_URL
class LogoutPage(PageObject):
"""
Logout page to logout current logged in user.
"""
url = BASE_URL + "/logout"
def is_browser_on_page(self):
return self.q(css='.cta-login').present
|
hanlind/nova
|
refs/heads/master
|
doc/ext/nova_todo.py
|
3
|
# -*- coding: utf-8 -*-
# This is a hack of the builtin todo extension, to make the todo_list
# more user friendly.
import re
from sphinx.ext.todo import depart_todo_node
from sphinx.ext.todo import NoUri
from sphinx.ext.todo import nodes
from sphinx.ext.todo import process_todos
from sphinx.ext.todo import purge_todos
from sphinx.ext.todo import Todo
from sphinx.ext.todo import TodoList
from sphinx.ext.todo import todolist
from sphinx.ext.todo import todo_node
from sphinx.ext.todo import visit_todo_node
def _(s):
return s
def process_todo_nodes(app, doctree, fromdocname):
if not app.config['todo_include_todos']:
for node in doctree.traverse(todo_node):
node.parent.remove(node)
# Replace all todolist nodes with a list of the collected todos.
# Augment each todo with a backlink to the original location.
env = app.builder.env
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = []
# remove the item that was added in the constructor, since I'm tired of
# reading through docutils for the proper way to construct an empty list
lists = []
for i in range(5):
lists.append(nodes.bullet_list("", nodes.Text('', '')))
lists[i].remove(lists[i][0])
lists[i]['classes'].append('todo_list')
for node in doctree.traverse(todolist):
if not app.config['todo_include_todos']:
node.replace_self([])
continue
for todo_info in env.todo_all_todos:
para = nodes.paragraph()
# Create a reference
newnode = nodes.reference('', '')
filename = env.doc2path(todo_info['docname'], base=None)
link = (_('%(filename)s, line %(line_info)d') %
{'filename': filename, 'line_info': todo_info['lineno']})
innernode = nodes.emphasis(link, link)
newnode['refdocname'] = todo_info['docname']
try:
newnode['refuri'] = app.builder.get_relative_uri(
fromdocname, todo_info['docname'])
newnode['refuri'] += '#' + todo_info['target']['refid']
except NoUri:
# ignore if no URI can be determined, e.g. for LaTeX output
pass
newnode.append(innernode)
para += newnode
para['classes'].append('todo_link')
todo_entry = todo_info['todo']
env.resolve_references(todo_entry, todo_info['docname'],
app.builder)
item = nodes.list_item('', para)
todo_entry[1]['classes'].append('details')
comment = todo_entry[1]
m = re.match(r"^P(\d)", comment.astext())
priority = 5
if m:
priority = int(m.group(1))
if priority < 0:
priority = 1
if priority > 5:
priority = 5
item['classes'].append('todo_p' + str(priority))
todo_entry['classes'].append('todo_p' + str(priority))
item.append(comment)
lists[priority - 1].insert(0, item)
node.replace_self(lists)
def setup(app):
app.add_config_value('todo_include_todos', False, False)
app.add_node(todolist)
app.add_node(todo_node,
html=(visit_todo_node, depart_todo_node),
latex=(visit_todo_node, depart_todo_node),
text=(visit_todo_node, depart_todo_node))
app.add_directive('todo', Todo)
app.add_directive('todolist', TodoList)
app.connect('doctree-read', process_todos)
app.connect('doctree-resolved', process_todo_nodes)
app.connect('env-purge-doc', purge_todos)
|
madhurrajn/samashthi
|
refs/heads/master
|
lib/django/contrib/gis/db/backends/mysql/base.py
|
444
|
from django.db.backends.mysql.base import \
DatabaseWrapper as MySQLDatabaseWrapper
from .features import DatabaseFeatures
from .introspection import MySQLIntrospection
from .operations import MySQLOperations
from .schema import MySQLGISSchemaEditor
class DatabaseWrapper(MySQLDatabaseWrapper):
SchemaEditorClass = MySQLGISSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures(self)
self.ops = MySQLOperations(self)
self.introspection = MySQLIntrospection(self)
|
arenadata/ambari
|
refs/heads/branch-adh-1.6
|
ambari-server/src/main/resources/common-services/SQOOP/1.4.4.2.0/package/scripts/params.py
|
4
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ambari_commons import OSCheck
from resource_management.libraries.functions.default import default
if OSCheck.is_windows_family():
from params_windows import *
else:
from params_linux import *
|
isandlaTech/cohorte-3rdparty
|
refs/heads/master
|
sleekxmpp/src/main/python/sleekxmpp/plugins/xep_0047/stream.py
|
12
|
import socket
import threading
import logging
from sleekxmpp.stanza import Iq
from sleekxmpp.util import Queue
from sleekxmpp.exceptions import XMPPError
log = logging.getLogger(__name__)
class IBBytestream(object):
def __init__(self, xmpp, sid, block_size, jid, peer, window_size=1, use_messages=False):
self.xmpp = xmpp
self.sid = sid
self.block_size = block_size
self.window_size = window_size
self.use_messages = use_messages
if jid is None:
jid = xmpp.boundjid
self.self_jid = jid
self.peer_jid = peer
self.send_seq = -1
self.recv_seq = -1
self._send_seq_lock = threading.Lock()
self._recv_seq_lock = threading.Lock()
self.stream_started = threading.Event()
self.stream_in_closed = threading.Event()
self.stream_out_closed = threading.Event()
self.recv_queue = Queue()
self.send_window = threading.BoundedSemaphore(value=self.window_size)
self.window_ids = set()
self.window_empty = threading.Event()
self.window_empty.set()
def send(self, data):
if not self.stream_started.is_set() or \
self.stream_out_closed.is_set():
raise socket.error
data = data[0:self.block_size]
self.send_window.acquire()
with self._send_seq_lock:
self.send_seq = (self.send_seq + 1) % 65535
seq = self.send_seq
if self.use_messages:
msg = self.xmpp.Message()
msg['to'] = self.peer_jid
msg['from'] = self.self_jid
msg['id'] = self.xmpp.new_id()
msg['ibb_data']['sid'] = self.sid
msg['ibb_data']['seq'] = seq
msg['ibb_data']['data'] = data
msg.send()
self.send_window.release()
else:
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['to'] = self.peer_jid
iq['from'] = self.self_jid
iq['ibb_data']['sid'] = self.sid
iq['ibb_data']['seq'] = seq
iq['ibb_data']['data'] = data
self.window_empty.clear()
self.window_ids.add(iq['id'])
iq.send(block=False, callback=self._recv_ack)
return len(data)
def sendall(self, data):
sent_len = 0
while sent_len < len(data):
sent_len += self.send(data[sent_len:])
def _recv_ack(self, iq):
self.window_ids.remove(iq['id'])
if not self.window_ids:
self.window_empty.set()
self.send_window.release()
if iq['type'] == 'error':
self.close()
def _recv_data(self, stanza):
with self._recv_seq_lock:
new_seq = stanza['ibb_data']['seq']
if new_seq != (self.recv_seq + 1) % 65535:
self.close()
raise XMPPError('unexpected-request')
self.recv_seq = new_seq
data = stanza['ibb_data']['data']
if len(data) > self.block_size:
self.close()
raise XMPPError('not-acceptable')
self.recv_queue.put(data)
self.xmpp.event('ibb_stream_data', {'stream': self, 'data': data})
if isinstance(stanza, Iq):
stanza.reply()
stanza.send()
def recv(self, *args, **kwargs):
return self.read(block=True)
def read(self, block=True, timeout=None, **kwargs):
if not self.stream_started.is_set() or \
self.stream_in_closed.is_set():
raise socket.error
if timeout is not None:
block = True
try:
return self.recv_queue.get(block, timeout)
except:
return None
def close(self):
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['to'] = self.peer_jid
iq['from'] = self.self_jid
iq['ibb_close']['sid'] = self.sid
self.stream_out_closed.set()
iq.send(block=False,
callback=lambda x: self.stream_in_closed.set())
self.xmpp.event('ibb_stream_end', self)
def _closed(self, iq):
self.stream_in_closed.set()
self.stream_out_closed.set()
iq.reply()
iq.send()
self.xmpp.event('ibb_stream_end', self)
def makefile(self, *args, **kwargs):
return self
def connect(*args, **kwargs):
return None
def shutdown(self, *args, **kwargs):
return None
|
berkeley-stat159/project-delta
|
refs/heads/master
|
code/scripts/pca.py
|
1
|
"""
Purpose
-------
The script performs principal component analysis on the filtered data set to
determine spatial patterns that account for the greatest amount of variability
in a time series. This requires finding the singular value decomposition of the
data matrix, which also has the advantage of providing a way to simplify the
data and filter out unwanted components.
This script should output a total of
"""
from __future__ import absolute_import, division, print_function
from sklearn.decomposition import PCA
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import numpy.linalg as npl
import os, sys
sys.path.append("code/utils")
from make_class import *
# Create a collection of all subject IDs and all run IDs
run_IDs = [str(i).zfill(3) for i in range(1, 4)]
subject_IDs = [str(i).zfill(3) for i in range(1, 17)]
IDs = list(zip([run_ID for _ in range(16) for run_ID in run_IDs],
[subject_ID for _ in range(3) for subject_ID in subject_IDs]))
IDs.sort()
# We perform the procedure outlined in this script for each run of each subject:
for ID in IDs:
run, subject = ID
# Define results directories to which to save the figures produced
path_result = "results/run%s/pca/sub%s/" % ID
try:
os.makedirs(path_result)
except OSError:
if not os.path.isdir(path_result):
raise
# Extract the data of interest
data = ds005(subject, run).filtered.data
# Define some useful variables for later on
volume_shape, num_volumes = data.shape[:3], data.shape[3]
# Reshape array for easy manipulation of voxels
data2d = np.reshape(data, (-1, num_volumes))
# Subtract the mean of first dimension
data_red = data2d - np.mean(data2d, axis=0)
# Compute covariance matrix
cov_matrix = data_red.T.dot(data_red)
# Find the singular value decomposition of the covariance matrix
U1, S, U2 = npl.svd(cov_matrix)
# We will now produce a graphical display of each dataset component's
# variance to determine how many components should be retained.
# For reference, a Scree plot shows the fraction of the total variance that
# is explained or represented by each principal component (the eigenvalue!)
eigenvalues = S ** 2 / np.cumsum(S)[-1]
fig = plt.figure(figsize = (8, 5))
singular_values = np.arange(num_volumes) + 1
plt.plot(singular_values, eigenvalues, "ro-", linewidth=2)
plt.title("Scree Plot")
plt.xlabel("Principal Component")
plt.ylabel("Eigenvalue")
leg = plt.legend(["Eigenvalues from SVD"], loc="best", borderpad=0.3,
shadow=False, markerscale=0.4,
prop=matplotlib.font_manager.FontProperties(size='small'))
leg.get_frame().set_alpha(0.4)
leg.draggable(state=True)
#plt.show()
plt.savefig(path_result + "scree_plot_1.png")
plt.close()
# We repeat the process a second time, the idea being that some linear
# combination of the loadings from second dimension will have a square sum
# of 1. This particular linear combination yields the highest variance, and
# we seek to find it.
# Subtract the mean of the second dimension
data_red_x2 = data_red - np.mean(data_red, axis=1).reshape((-1, 1))
# Compute the covariance matrix
cov_matrix_red_x2 = data_red_x2.T.dot(data_red_x2)
# Find the singular value decomposition of the covariance matrix
U1, S, U2 = npl.svd(cov_matrix_red_x2)
# Again, let's produce a figure. This time, you can clearly see the variance
# explained by the first component and then the additional variance for each
# subsequent component. Projection of the observations onto this vector
# yields the highest possible variance among observations.
eigenvalues = S ** 2 / np.cumsum(S)[-1]
fig = plt.figure(figsize=(8, 5))
singular_values = np.arange(num_volumes) + 1
plt.plot(singular_values, eigenvalues, 'ro-', linewidth=2)
plt.title("Scree Plot")
plt.xlabel("Principal Component")
plt.ylabel("Eigenvalue")
leg = plt.legend(["Eigenvalues from SVD"], loc="best", borderpad=0.3,
shadow=False, markerscale=0.4,
prop=matplotlib.font_manager.FontProperties(size="small"))
leg.get_frame().set_alpha(0.4)
leg.draggable(state=True)
#plt.show()
plt.savefig(path_result + "scree_plot_2.png")
plt.close()
# Save the eigenvalues to a plaintext file
pca = PCA(n_components=5)
pca.fit(cov_matrix_red_x2)
np.savetxt(path_result + "eigenvalues.txt", pca.explained_variance_ratio_)
# Fit the original model and apply to it the dimensionality reduction
model = pca.fit_transform(cov_matrix_red_x2)
np.save(path_result + "model.txt", model)
|
itsjeyd/edx-platform
|
refs/heads/master
|
lms/djangoapps/bulk_email/migrations/0004_add_email_targets.py
|
36
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_groups', '0001_initial'),
('bulk_email', '0003_config_model_feature_flag'),
]
operations = [
migrations.CreateModel(
name='Target',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('target_type', models.CharField(max_length=64, choices=[(b'myself', b'Myself'), (b'staff', b'Staff and instructors'), (b'learners', b'All students'), (b'cohort', b'Specific cohort')])),
],
),
migrations.AlterField(
model_name='courseemail',
name='to_option',
field=models.CharField(max_length=64, choices=[(b'deprecated', b'deprecated')]),
),
migrations.CreateModel(
name='CohortTarget',
fields=[
('target_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='bulk_email.Target')),
('cohort', models.ForeignKey(to='course_groups.CourseUserGroup')),
],
bases=('bulk_email.target',),
),
migrations.AddField(
model_name='courseemail',
name='targets',
field=models.ManyToManyField(to='bulk_email.Target'),
),
]
|
introlab/rtabmap_ros
|
refs/heads/master
|
scripts/point_to_tf.py
|
1
|
#!/usr/bin/env python
import rospy
import tf
from geometry_msgs.msg import PointStamped
def callback(point):
global br
global frame_id
local_frame_id = point.header.frame_id
if not local_frame_id:
local_frame_id = frame_id
br.sendTransform(
(point.point.x, point.point.y, point.point.z),
tf.transformations.quaternion_from_euler(0,0,0),
point.header.stamp,
local_frame_id,
fixed_frame_id)
if __name__ == "__main__":
rospy.init_node("point_to_tf", anonymous=True)
frame_id = rospy.get_param('~frame_id', 'point')
fixed_frame_id = rospy.get_param('~fixed_frame_id', 'world')
br = tf.TransformBroadcaster()
rospy.Subscriber("point", PointStamped, callback, queue_size=1)
rospy.spin()
|
swails/mdtraj
|
refs/heads/master
|
mdtraj/utils/unit/mymatrix.py
|
17
|
"""
Pure python inversion of small matrices, to avoid requiring numpy or similar in SimTK.
This is part of the OpenMM molecular simulation toolkit originating from
Simbios, the NIH National Center for Physics-Based Simulation of
Biological Structures at Stanford, funded under the NIH Roadmap for
Medical Research, grant U54 GM072970. See https://simtk.org.
Portions copyright (c) 2012 Stanford University and the Authors.
Authors: Christopher M. Bruns
Contributors: Peter Eastman
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS, CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
def eye(size):
"""
Returns identity matrix.
>>> print(eye(3))
[[1, 0, 0]
[0, 1, 0]
[0, 0, 1]]
"""
result = []
for row in range(0, size):
r = []
for col in range(0, size):
if row == col:
r.append(1)
else:
r.append(0)
result.append(r)
return MyMatrix(result)
def zeros(m, n=None):
"""
Returns matrix of zeroes
>>> print(zeros(3))
[[0, 0, 0]
[0, 0, 0]
[0, 0, 0]]
"""
if n == None:
n = m
result = []
for row in range(0, m):
r = []
for col in range(0, n):
r.append(0)
result.append(r)
return MyMatrix(result)
class MyVector(object):
"""
Parent class of MyMatrix and type of Matrix Row.
"""
def __init__(self, collection):
if isinstance(collection, MyVector):
self.data = collection.data
else:
self.data = collection
def __str__(self):
return str(self.data)
def __repr__(self):
return self.__class__.__name__ + "(" + repr(self.data) + ")"
def __getitem__(self, key):
return self.data[key]
def __contains__(self, item):
return item in self.data
def __delitem__(self, key):
del self.data[key]
def __iter__(self):
for item in self.data:
yield item
def __len__(self):
return len(self.data)
def __setitem__(self, key, value):
self.data[key] = value
def __rmul__(self, lhs):
try:
len(lhs)
# left side is not scalar, delegate mul to that class
return NotImplemented
except TypeError:
new_vec = []
for element in self:
new_vec.append(lhs * element)
return self.__class__(new_vec)
class MyMatrix(MyVector):
"""
Pure python linear algebra matrix for internal matrix inversion in UnitSystem.
>>> m = MyMatrix([[1,0,],[0,1,]])
>>> print(m)
[[1, 0]
[0, 1]]
>>> print(~m)
[[1.0, 0.0]
[0.0, 1.0]]
>>> print(eye(5))
[[1, 0, 0, 0, 0]
[0, 1, 0, 0, 0]
[0, 0, 1, 0, 0]
[0, 0, 0, 1, 0]
[0, 0, 0, 0, 1]]
>>> m = eye(5)
>>> m[1][1]
1
>>> m[1:4]
MyMatrixTranspose([[0, 0, 0],[1, 0, 0],[0, 1, 0],[0, 0, 1],[0, 0, 0]])
>>> print(m[1:4])
[[0, 0, 0]
[1, 0, 0]
[0, 1, 0]
[0, 0, 1]
[0, 0, 0]]
>>> print(m[1:4][0:2])
[[0, 1]
[0, 0]
[0, 0]]
>>> m[1:4][0:2] = [[9,8],[7,6],[5,4]]
>>> print(m)
[[1, 0, 0, 0, 0]
[9, 8, 0, 0, 0]
[7, 6, 1, 0, 0]
[5, 4, 0, 1, 0]
[0, 0, 0, 0, 1]]
"""
def numRows(self):
return len(self.data)
def numCols(self):
if len(self.data) == 0:
return 0
else:
return len(self.data[0])
def __len__(self):
return self.numRows()
def __str__(self):
result = ""
start_char = "["
for m in range(0, self.numRows()):
result += start_char
result += str(self[m])
if m < self.numRows() - 1:
result += "\n"
start_char = " "
result += "]"
return result
def __repr__(self):
return 'MyMatrix(' + MyVector.__repr__(self) + ')'
def is_square(self):
return self.numRows() == self.numCols()
def __iter__(self):
for item in self.data:
yield MyVector(item)
def __getitem__(self, m):
if isinstance(m, slice):
return MyMatrixTranspose(self.data[m])
else:
return MyVector(self.data[m])
def __setitem__(self, key, rhs):
if isinstance(key, slice):
self.data[key] = rhs
else:
assert len(rhs) == self.numCols()
self.data[key] = MyVector(rhs)
def __mul__(self, rhs):
"""
Matrix multiplication.
>>> a = MyMatrix([[1,2],[3,4]])
>>> b = MyMatrix([[5,6],[7,8]])
>>> print(a)
[[1, 2]
[3, 4]]
>>> print(b)
[[5, 6]
[7, 8]]
>>> print(a*b)
[[19, 22]
[43, 50]]
"""
m = self.numRows()
n = len(rhs[0])
r = len(rhs)
if self.numCols() != r:
raise ArithmeticError("Matrix multplication size mismatch (%d vs %d)" % (self.numCols(), r))
result = zeros(m, n)
for i in range(0, m):
for j in range(0, n):
for k in range(0, r):
result[i][j] += self[i][k]*rhs[k][j]
return result
def __add__(self, rhs):
"""
Matrix addition.
>>> print(MyMatrix([[1, 2],[3, 4]]) + MyMatrix([[5, 6],[7, 8]]))
[[6, 8]
[10, 12]]
"""
m = self.numRows()
n = self.numCols()
assert len(rhs) == m
assert len(rhs[0]) == n
result = zeros(m,n)
for i in range(0,m):
for j in range(0,n):
result[i][j] = self[i][j] + rhs[i][j]
return result
def __sub__(self, rhs):
"""
Matrix subtraction.
>>> print(MyMatrix([[1, 2],[3, 4]]) - MyMatrix([[5, 6],[7, 8]]))
[[-4, -4]
[-4, -4]]
"""
m = self.numRows()
n = self.numCols()
assert len(rhs) == m
assert len(rhs[0]) == n
result = zeros(m,n)
for i in range(0,m):
for j in range(0,n):
result[i][j] = self[i][j] - rhs[i][j]
return result
def __pos__(self):
return self
def __neg__(self):
m = self.numRows()
n = self.numCols()
result = zeros(m, n)
for i in range(0,m):
for j in range(0,n):
result[i][j] = -self[i][j]
return result
def __invert__(self):
"""
>>> m = MyMatrix([[1,1],[0,1]])
>>> print(m)
[[1, 1]
[0, 1]]
>>> print(~m)
[[1.0, -1.0]
[0.0, 1.0]]
>>> print(m*~m)
[[1.0, 0.0]
[0.0, 1.0]]
>>> print(~m*m)
[[1.0, 0.0]
[0.0, 1.0]]
>>> m = MyMatrix([[1,0,0],[0,0,1],[0,-1,0]])
>>> print(m)
[[1, 0, 0]
[0, 0, 1]
[0, -1, 0]]
>>> print(~m)
[[1.0, 0.0, 0.0]
[0.0, 0.0, -1.0]
[0.0, 1.0, 0.0]]
>>> print(m*~m)
[[1.0, 0.0, 0.0]
[0.0, 1.0, 0.0]
[0.0, 0.0, 1.0]]
>>> print(~m*m)
[[1.0, 0.0, 0.0]
[0.0, 1.0, 0.0]
[0.0, 0.0, 1.0]]
"""
assert self.is_square()
if self.numRows() == 0:
return self
elif self.numRows() == 1:
val = self[0][0]
val = 1.0/val
return MyMatrix([[val]])
elif self.numRows() == 2: # 2x2 is analytic
# http://en.wikipedia.org/wiki/Invertible_matrix#Inversion_of_2.C3.972_matrices
a = self[0][0]
b = self[0][1]
c = self[1][0]
d = self[1][1]
determinant = a*d - b*c
if determinant == 0:
raise ArithmeticError("Cannot invert 2x2 matrix with zero determinant")
else:
return 1.0/(a*d - b*c) * MyMatrix([[d, -b],[-c, a]])
else:
# Gauss Jordan elimination from numerical recipes
n = self.numRows()
m1 = self.numCols()
assert n == m1
# Copy initial matrix into result matrix
a = zeros(n, n)
for i in range (0,n):
for j in range (0,n):
a[i][j] = self[i][j]
# These arrays are used for bookkeeping on the pivoting
indxc = [0] * n
indxr = [0] * n
ipiv = [0] * n
for i in range (0,n):
big = 0.0
for j in range (0,n):
if ipiv[j] != 1:
for k in range (0,n):
if ipiv[k] == 0:
if abs(a[j][k]) >= big:
big = abs(a[j][k])
irow = j
icol = k
ipiv[icol] += 1
# We now have the pivot element, so we interchange rows...
if irow != icol:
for l in range(0,n):
temp = a[irow][l]
a[irow][l] = a[icol][l]
a[icol][l] = temp
indxr[i] = irow
indxc[i] = icol
if a[icol][icol] == 0:
raise ArithmeticError("Cannot invert singular matrix")
pivinv = 1.0/a[icol][icol]
a[icol][icol] = 1.0
for l in range(0,n):
a[icol][l] *= pivinv
for ll in range(0,n): # next we reduce the rows
if ll == icol:
continue # except the pivot one, of course
dum = a[ll][icol]
a[ll][icol] = 0.0
for l in range(0,n):
a[ll][l] -= a[icol][l]*dum
# Unscramble the permuted columns
for l in range(n-1, -1, -1):
if indxr[l] == indxc[l]:
continue
for k in range(0,n):
temp = a[k][indxr[l]]
a[k][indxr[l]] = a[k][indxc[l]]
a[k][indxc[l]] = temp
return a
def transpose(self):
return MyMatrixTranspose(self.data)
class MyMatrixTranspose(MyMatrix):
def transpose(self):
return MyMatrix(self.data)
def numRows(self):
if len(self.data) == 0:
return 0
else:
return len(self.data[0])
def numCols(self):
return len(self.data)
def __getitem__(self, key):
result = []
for row in self.data:
result.append(row[key])
if isinstance(key, slice):
return MyMatrix(result)
else:
return MyVector(result)
def __setitem__(self, key, rhs):
for n in range(0, len(self.data)):
self.data[n][key] = rhs[n]
def __str__(self):
if len(self.data) == 0:
return "[[]]"
start_char = "["
result = ""
for m in range(0, len(self.data[0])):
result += start_char
result += "["
sep_char = ""
for n in range(0, len(self.data)):
result += sep_char
result += str(self.data[n][m])
sep_char = ", "
result += "]"
if m < len(self.data[0]) - 1:
result += "\n"
start_char = " "
result += "]"
return result
def __repr__(self):
if len(self.data) == 0:
return "MyMatrixTranspose([[]])"
start_char = "["
result = 'MyMatrixTranspose('
for m in range(0, len(self.data[0])):
result += start_char
result += "["
sep_char = ""
for n in range(0, len(self.data)):
result += sep_char
result += repr(self.data[n][m])
sep_char = ", "
result += "]"
if m < len(self.data[0]) - 1:
result += ","
start_char = ""
result += '])'
return result
# run module directly for testing
if __name__=='__main__':
# Test the examples in the docstrings
import doctest, sys
doctest.testmod(sys.modules[__name__])
|
runjmc/maraschino
|
refs/heads/master
|
lib/transmissionrpc/client.py
|
10
|
# -*- coding: utf-8 -*-
# Copyright (c) 2008-2011 Erik Svensson <erik.public@gmail.com>
# Licensed under the MIT license.
import re, time
import urllib2, urlparse, base64
try:
import json
except ImportError:
import simplejson as json
from transmissionrpc.constants import DEFAULT_PORT, DEFAULT_TIMEOUT
from transmissionrpc.error import TransmissionError, HTTPHandlerError
from transmissionrpc.utils import LOGGER, get_arguments, make_rpc_name, argument_value_convert, rpc_bool
from transmissionrpc.httphandler import DefaultHTTPHandler
from transmissionrpc.torrent import Torrent
from transmissionrpc.session import Session
def debug_httperror(error):
"""
Log the Transmission RPC HTTP error.
"""
try:
data = json.loads(error.data)
except ValueError:
data = error.data
LOGGER.debug(
json.dumps(
{
'response': {
'url': error.url,
'code': error.code,
'msg': error.message,
'headers': error.headers,
'data': data,
}
},
indent=2
)
)
"""
Torrent ids
Many functions in Client takes torrent id. A torrent id can either be id or
hashString. When supplying multiple id's it is possible to use a list mixed
with both id and hashString.
Timeouts
Since most methods results in HTTP requests against Transmission, it is
possible to provide a argument called ``timeout``. Timeout is only effective
when using Python 2.6 or later and the default timeout is 30 seconds.
"""
class Client(object):
"""
Client is the class handling the Transmission JSON-RPC client protocol.
"""
def __init__(self, address='localhost', port=DEFAULT_PORT, user=None, password=None, http_handler=None, timeout=None):
if isinstance(timeout, (int, long, float)):
self._query_timeout = float(timeout)
else:
self._query_timeout = DEFAULT_TIMEOUT
urlo = urlparse.urlparse(address)
if urlo.scheme == '':
base_url = 'http://' + address + ':' + str(port)
self.url = base_url + '/transmission/rpc'
else:
if urlo.port:
self.url = urlo.scheme + '://' + urlo.hostname + ':' + str(urlo.port) + urlo.path
else:
self.url = urlo.scheme + '://' + urlo.hostname + urlo.path
LOGGER.info('Using custom URL "' + self.url + '".')
if urlo.username and urlo.password:
user = urlo.username
password = urlo.password
elif urlo.username or urlo.password:
LOGGER.warning('Either user or password missing, not using authentication.')
if http_handler is None:
self.http_handler = DefaultHTTPHandler()
else:
if hasattr(http_handler, 'set_authentication') and hasattr(http_handler, 'request'):
self.http_handler = http_handler
else:
raise ValueError('Invalid HTTP handler.')
if user and password:
self.http_handler.set_authentication(self.url, user, password)
elif user or password:
LOGGER.warning('Either user or password missing, not using authentication.')
self._sequence = 0
self.session = Session()
self.session_id = 0
self.server_version = None
self.protocol_version = None
self.get_session()
self.torrent_get_arguments = get_arguments('torrent-get'
, self.rpc_version)
def _get_timeout(self):
"""
Get current timeout for HTTP queries.
"""
return self._query_timeout
def _set_timeout(self, value):
"""
Set timeout for HTTP queries.
"""
self._query_timeout = float(value)
def _del_timeout(self):
"""
Reset the HTTP query timeout to the default.
"""
self._query_timeout = DEFAULT_TIMEOUT
timeout = property(_get_timeout, _set_timeout, _del_timeout, doc="HTTP query timeout.")
def _http_query(self, query, timeout=None):
"""
Query Transmission through HTTP.
"""
headers = {'x-transmission-session-id': str(self.session_id)}
result = {}
request_count = 0
if timeout is None:
timeout = self._query_timeout
while True:
LOGGER.debug(json.dumps({'url': self.url, 'headers': headers, 'query': query, 'timeout': timeout}, indent=2))
try:
result = self.http_handler.request(self.url, query, headers, timeout)
break
except HTTPHandlerError, error:
if error.code == 409:
LOGGER.info('Server responded with 409, trying to set session-id.')
if request_count > 1:
raise TransmissionError('Session ID negotiation failed.', error)
if 'x-transmission-session-id' in error.headers:
self.session_id = error.headers['x-transmission-session-id']
headers = {'x-transmission-session-id': str(self.session_id)}
else:
debug_httperror(error)
raise TransmissionError('Unknown conflict.', error)
else:
debug_httperror(error)
raise TransmissionError('Request failed.', error)
request_count += 1
return result
def _request(self, method, arguments=None, ids=None, require_ids=False, timeout=None):
"""
Send json-rpc request to Transmission using http POST
"""
if not isinstance(method, (str, unicode)):
raise ValueError('request takes method as string')
if arguments is None:
arguments = {}
if not isinstance(arguments, dict):
raise ValueError('request takes arguments as dict')
ids = self._format_ids(ids)
if len(ids) > 0:
arguments['ids'] = ids
elif require_ids:
raise ValueError('request require ids')
query = json.dumps({'tag': self._sequence, 'method': method
, 'arguments': arguments})
self._sequence += 1
start = time.time()
http_data = self._http_query(query, timeout)
elapsed = time.time() - start
LOGGER.info('http request took %.3f s' % (elapsed))
try:
data = json.loads(http_data)
except ValueError, error:
LOGGER.error('Error: ' + str(error))
LOGGER.error('Request: \"%s\"' % (query))
LOGGER.error('HTTP data: \"%s\"' % (http_data))
raise
LOGGER.debug(json.dumps(data, indent=2))
if 'result' in data:
if data['result'] != 'success':
raise TransmissionError('Query failed with result \"%s\".' % (data['result']))
else:
raise TransmissionError('Query failed without result.')
results = {}
if method == 'torrent-get':
for item in data['arguments']['torrents']:
results[item['id']] = Torrent(self, item)
if self.protocol_version == 2 and 'peers' not in item:
self.protocol_version = 1
elif method == 'torrent-add':
item = data['arguments']['torrent-added']
results[item['id']] = Torrent(self, item)
elif method == 'session-get':
self._update_session(data['arguments'])
elif method == 'session-stats':
# older versions of T has the return data in "session-stats"
if 'session-stats' in data['arguments']:
self._update_session(data['arguments']['session-stats'])
else:
self._update_session(data['arguments'])
elif method in ('port-test', 'blocklist-update'):
results = data['arguments']
else:
return None
return results
def _format_ids(self, args):
"""
Take things and make them valid torrent identifiers
"""
ids = []
if args is None:
pass
elif isinstance(args, (int, long)):
ids.append(args)
elif isinstance(args, (str, unicode)):
for item in re.split(u'[ ,]+', args):
if len(item) == 0:
continue
addition = None
try:
# handle index
addition = [int(item)]
except ValueError:
pass
if not addition:
# handle hashes
try:
int(item, 16)
addition = [item]
except ValueError:
pass
if not addition:
# handle index ranges i.e. 5:10
match = re.match(u'^(\d+):(\d+)$', item)
if match:
try:
idx_from = int(match.group(1))
idx_to = int(match.group(2))
addition = range(idx_from, idx_to + 1)
except ValueError:
pass
if not addition:
raise ValueError(u'Invalid torrent id, \"%s\"' % item)
ids.extend(addition)
elif isinstance(args, list):
for item in args:
ids.extend(self._format_ids(item))
else:
raise ValueError(u'Invalid torrent id')
return ids
def _update_session(self, data):
"""
Update session data.
"""
self.session.update(data)
def _update_server_version(self):
if self.server_version is None:
version_major = 1
version_minor = 30
version_changeset = 0
version_parser = re.compile('(\d).(\d+) \((\d+)\)')
if hasattr(self.session, 'version'):
match = version_parser.match(self.session.version)
if match:
version_major = int(match.group(1))
version_minor = int(match.group(2))
version_changeset = match.group(3)
self.server_version = (version_major, version_minor, version_changeset)
@property
def rpc_version(self):
"""
Get the Transmission RPC version. Trying to deduct if the server don't have a version value.
"""
if self.protocol_version is None:
# Ugly fix for 2.20 - 2.22 reporting rpc-version 11, but having new arguments
if self.server_version and (self.server_version[0] == 2 and self.server_version[1] in [20, 21, 22]):
self.protocol_version = 12
# Ugly fix for 2.12 reporting rpc-version 10, but having new arguments
elif self.server_version and (self.server_version[0] == 2 and self.server_version[1] == 12):
self.protocol_version = 11
elif hasattr(self.session, 'rpc_version'):
self.protocol_version = self.session.rpc_version
elif hasattr(self.session, 'version'):
self.protocol_version = 3
else:
self.protocol_version = 2
return self.protocol_version
def _rpc_version_warning(self, version):
"""
Add a warning to the log if the Transmission RPC version is lower then the provided version.
"""
if self.rpc_version < version:
LOGGER.warning('Using feature not supported by server. RPC version for server %d, feature introduced in %d.' % (self.rpc_version, version))
def add(self, data, timeout=None, **kwargs):
"""
Add torrent to transfers list. Takes a base64 encoded .torrent file in data.
Additional arguments are:
===================== ===== =========== =============================================================
Argument RPC Replaced by Description
===================== ===== =========== =============================================================
``bandwidthPriority`` 8 - Priority for this transfer.
``cookies`` 13 - One or more HTTP cookie(s).
``download_dir`` 1 - The directory where the downloaded contents will be saved in.
``files_unwanted`` 1 - A list of file id's that shouldn't be downloaded.
``files_wanted`` 1 - A list of file id's that should be downloaded.
``paused`` 1 - If True, does not start the transfer when added.
``peer_limit`` 1 - Maximum number of peers allowed.
``priority_high`` 1 - A list of file id's that should have high priority.
``priority_low`` 1 - A list of file id's that should have low priority.
``priority_normal`` 1 - A list of file id's that should have normal priority.
===================== ===== =========== =============================================================
"""
args = {}
if data:
args = {'metainfo': data}
elif 'metainfo' not in kwargs and 'filename' not in kwargs:
raise ValueError('No torrent data or torrent uri.')
for key, value in kwargs.iteritems():
argument = make_rpc_name(key)
(arg, val) = argument_value_convert('torrent-add',
argument, value, self.rpc_version)
args[arg] = val
return self._request('torrent-add', args, timeout=timeout)
def add_uri(self, uri, **kwargs):
"""
Add torrent to transfers list. Takes a uri to a torrent, supporting
all uri's supported by Transmissions torrent-add 'filename'
argument. Additional arguments are:
===================== ===== =========== =============================================================
Argument RPC Replaced by Description
===================== ===== =========== =============================================================
``bandwidthPriority`` 8 - Priority for this transfer.
``cookies`` 13 - One or more HTTP cookie(s).
``download_dir`` 1 - The directory where the downloaded contents will be saved in.
``files_unwanted`` 1 - A list of file id's that shouldn't be downloaded.
``files_wanted`` 1 - A list of file id's that should be downloaded.
``paused`` 1 - If True, does not start the transfer when added.
``peer_limit`` 1 - Maximum number of peers allowed.
``priority_high`` 1 - A list of file id's that should have high priority.
``priority_low`` 1 - A list of file id's that should have low priority.
``priority_normal`` 1 - A list of file id's that should have normal priority.
===================== ===== =========== =============================================================
"""
if uri is None:
raise ValueError('add_uri requires a URI.')
# there has been some problem with T's built in torrent fetcher,
# use a python one instead
parsed_uri = urlparse.urlparse(uri)
torrent_data = None
if parsed_uri.scheme in ['file', 'ftp', 'ftps', 'http', 'https']:
torrent_file = urllib2.urlopen(uri)
torrent_data = base64.b64encode(torrent_file.read())
if torrent_data:
return self.add(torrent_data, **kwargs)
else:
return self.add(None, filename=uri, **kwargs)
def remove(self, ids, delete_data=False, timeout=None):
"""
remove torrent(s) with provided id(s). Local data is removed if
delete_data is True, otherwise not.
"""
self._rpc_version_warning(3)
self._request('torrent-remove',
{'delete-local-data':rpc_bool(delete_data)}, ids, True, timeout=timeout)
def start(self, ids, bypass_queue=False, timeout=None):
"""start torrent(s) with provided id(s)"""
method = 'torrent-start'
if bypass_queue and self.rpc_version >= 14:
method = 'torrent-start-now'
self._request(method, {}, ids, True, timeout=timeout)
def stop(self, ids, timeout=None):
"""stop torrent(s) with provided id(s)"""
self._request('torrent-stop', {}, ids, True, timeout=timeout)
def verify(self, ids, timeout=None):
"""verify torrent(s) with provided id(s)"""
self._request('torrent-verify', {}, ids, True, timeout=timeout)
def reannounce(self, ids, timeout=None):
"""Reannounce torrent(s) with provided id(s)"""
self._rpc_version_warning(5)
self._request('torrent-reannounce', {}, ids, True, timeout=timeout)
def info(self, ids=None, arguments=None, timeout=None):
"""Get detailed information for torrent(s) with provided id(s)."""
if not arguments:
arguments = self.torrent_get_arguments
return self._request('torrent-get', {'fields': arguments}, ids, timeout=timeout)
def get_files(self, ids=None, timeout=None):
"""
Get list of files for provided torrent id(s). If ids is empty,
information for all torrents are fetched. This function returns a dictionary
for each requested torrent id holding the information about the files.
::
{
<torrent id>: {
<file id>: {
'name': <file name>,
'size': <file size in bytes>,
'completed': <bytes completed>,
'priority': <priority ('high'|'normal'|'low')>,
'selected': <selected for download (True|False)>
}
...
}
...
}
"""
fields = ['id', 'name', 'hashString', 'files', 'priorities', 'wanted']
request_result = self._request('torrent-get', {'fields': fields}, ids, timeout=timeout)
result = {}
for tid, torrent in request_result.iteritems():
result[tid] = torrent.files()
return result
def set_files(self, items, timeout=None):
"""
Set file properties. Takes a dictionary with similar contents as the result
of `get_files`.
::
{
<torrent id>: {
<file id>: {
'priority': <priority ('high'|'normal'|'low')>,
'selected': <selected for download (True|False)>
}
...
}
...
}
"""
if not isinstance(items, dict):
raise ValueError('Invalid file description')
for tid, files in items.iteritems():
if not isinstance(files, dict):
continue
wanted = []
unwanted = []
high = []
normal = []
low = []
for fid, file_desc in files.iteritems():
if not isinstance(file_desc, dict):
continue
if 'selected' in file_desc and file_desc['selected']:
wanted.append(fid)
else:
unwanted.append(fid)
if 'priority' in file_desc:
if file_desc['priority'] == 'high':
high.append(fid)
elif file_desc['priority'] == 'normal':
normal.append(fid)
elif file_desc['priority'] == 'low':
low.append(fid)
args = {
'timeout': timeout,
'files_wanted': wanted,
'files_unwanted': unwanted,
}
if len(high) > 0:
args['priority_high'] = high
if len(normal) > 0:
args['priority_normal'] = normal
if len(low) > 0:
args['priority_low'] = low
self.change([tid], **args)
def list(self, timeout=None):
"""list all torrents"""
fields = ['id', 'hashString', 'name', 'sizeWhenDone', 'leftUntilDone'
, 'eta', 'status', 'rateUpload', 'rateDownload', 'uploadedEver'
, 'downloadedEver', 'uploadRatio']
return self._request('torrent-get', {'fields': fields}, timeout=timeout)
def change(self, ids, timeout=None, **kwargs):
"""
Change torrent parameters for the torrent(s) with the supplied id's. The
parameters are:
============================ ===== =============== =======================================================================================
Argument RPC Replaced by Description
============================ ===== =============== =======================================================================================
``bandwidthPriority`` 5 - Priority for this transfer.
``downloadLimit`` 5 - Set the speed limit for download in Kib/s.
``downloadLimited`` 5 - Enable download speed limiter.
``files_unwanted`` 1 - A list of file id's that shouldn't be downloaded.
``files_wanted`` 1 - A list of file id's that should be downloaded.
``honorsSessionLimits`` 5 - Enables or disables the transfer to honour the upload limit set in the session.
``location`` 1 - Local download location.
``peer_limit`` 1 - The peer limit for the torrents.
``priority_high`` 1 - A list of file id's that should have high priority.
``priority_low`` 1 - A list of file id's that should have normal priority.
``priority_normal`` 1 - A list of file id's that should have low priority.
``queuePosition`` 14 - Position of this transfer in its queue.
``seedIdleLimit`` 10 - Seed inactivity limit in minutes.
``seedIdleMode`` 10 - Seed inactivity mode. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.
``seedRatioLimit`` 5 - Seeding ratio.
``seedRatioMode`` 5 - Which ratio to use. 0 = Use session limit, 1 = Use transfer limit, 2 = Disable limit.
``speed_limit_down`` 1 - 5 downloadLimit Set the speed limit for download in Kib/s.
``speed_limit_down_enabled`` 1 - 5 downloadLimited Enable download speed limiter.
``speed_limit_up`` 1 - 5 uploadLimit Set the speed limit for upload in Kib/s.
``speed_limit_up_enabled`` 1 - 5 uploadLimited Enable upload speed limiter.
``trackerAdd`` 10 - Array of string with announce URLs to add.
``trackerRemove`` 10 - Array of ids of trackers to remove.
``trackerReplace`` 10 - Array of (id, url) tuples where the announce URL should be replaced.
``uploadLimit`` 5 - Set the speed limit for upload in Kib/s.
``uploadLimited`` 5 - Enable upload speed limiter.
============================ ===== =============== =======================================================================================
.. NOTE::
transmissionrpc will try to automatically fix argument errors.
"""
args = {}
for key, value in kwargs.iteritems():
argument = make_rpc_name(key)
(arg, val) = argument_value_convert('torrent-set'
, argument, value, self.rpc_version)
args[arg] = val
if len(args) > 0:
self._request('torrent-set', args, ids, True, timeout=timeout)
else:
ValueError("No arguments to set")
def move(self, ids, location, timeout=None):
"""Move torrent data to the new location."""
self._rpc_version_warning(6)
args = {'location': location, 'move': True}
self._request('torrent-set-location', args, ids, True, timeout=timeout)
def locate(self, ids, location, timeout=None):
"""Locate torrent data at the location."""
self._rpc_version_warning(6)
args = {'location': location, 'move': False}
self._request('torrent-set-location', args, ids, True, timeout=timeout)
def queue_top(self, ids, timeout=None):
"""Move transfer to the top of the queue."""
self._rpc_version_warning(14)
self._request('queue-move-top', ids=ids, require_ids=True, timeout=timeout)
def queue_bottom(self, ids, timeout=None):
"""Move transfer to the bottom of the queue."""
self._rpc_version_warning(14)
self._request('queue-move-bottom', ids=ids, require_ids=True, timeout=timeout)
def queue_up(self, ids, timeout=None):
"""Move transfer up in the queue."""
self._rpc_version_warning(14)
self._request('queue-move-up', ids=ids, require_ids=True, timeout=timeout)
def queue_down(self, ids, timeout=None):
"""Move transfer down in the queue."""
self._rpc_version_warning(14)
self._request('queue-move-down', ids=ids, require_ids=True, timeout=timeout)
def get_session(self, timeout=None):
"""Get session parameters"""
self._request('session-get', timeout=timeout)
self._update_server_version()
return self.session
def set_session(self, timeout=None, **kwargs):
"""
Set session parameters. The parameters are:
================================ ===== ================= ==========================================================================================================================
Argument RPC Replaced by Description
================================ ===== ================= ==========================================================================================================================
``alt_speed_down`` 5 - Alternate session download speed limit (in Kib/s).
``alt_speed_enabled`` 5 - Enables alternate global download speed limiter.
``alt_speed_time_begin`` 5 - Time when alternate speeds should be enabled. Minutes after midnight.
``alt_speed_time_day`` 5 - Enables alternate speeds scheduling these days.
``alt_speed_time_enabled`` 5 - Enables alternate speeds scheduling.
``alt_speed_time_end`` 5 - Time when alternate speeds should be disabled. Minutes after midnight.
``alt_speed_up`` 5 - Alternate session upload speed limit (in Kib/s).
``blocklist_enabled`` 5 - Enables the block list
``blocklist_url`` 11 - Location of the block list. Updated with blocklist-update.
``cache_size_mb`` 10 - The maximum size of the disk cache in MB
``dht_enabled`` 6 - Enables DHT.
``download_dir`` 1 - Set the session download directory.
``download_queue_enabled`` 14 - Enable parallel download restriction.
``download_queue_size`` 14 - Number of parallel downloads.
``encryption`` 1 - Set the session encryption mode, one of ``required``, ``preferred`` or ``tolerated``.
``idle_seeding_limit`` 10 - The default seed inactivity limit in minutes.
``idle_seeding_limit_enabled`` 10 - Enables the default seed inactivity limit
``incomplete_dir`` 7 - The path to the directory of incomplete transfer data.
``incomplete_dir_enabled`` 7 - Enables the incomplete transfer data directory. Otherwise data for incomplete transfers are stored in the download target.
``lpd_enabled`` 9 - Enables local peer discovery for public torrents.
``peer_limit`` 1 - 5 peer-limit-global Maximum number of peers
``peer_limit_global`` 5 - Maximum number of peers
``peer_limit_per_torrent`` 5 - Maximum number of peers per transfer
``peer_port`` 5 - Peer port.
``peer_port_random_on_start`` 5 - Enables randomized peer port on start of Transmission.
``pex_allowed`` 1 - 5 pex-enabled Allowing PEX in public torrents.
``pex_enabled`` 5 - Allowing PEX in public torrents.
``port`` 1 - 5 peer-port Peer port.
``port_forwarding_enabled`` 1 - Enables port forwarding.
``queue_stalled_enabled`` 14 - Enable tracking of stalled transfers.
``queue_stalled_minutes`` 14 - Number of minutes of idle that marks a transfer as stalled.
``rename_partial_files`` 8 - Appends ".part" to incomplete files
``script_torrent_done_enabled`` 9 - Whether or not to call the "done" script.
``script_torrent_done_filename`` 9 - Filename of the script to run when the transfer is done.
``seed_queue_enabled`` 14 - Enable parallel upload restriction.
``seed_queue_size`` 14 - Number of parallel uploads.
``seedRatioLimit`` 5 - Seed ratio limit. 1.0 means 1:1 download and upload ratio.
``seedRatioLimited`` 5 - Enables seed ration limit.
``speed_limit_down`` 1 - Download speed limit (in Kib/s).
``speed_limit_down_enabled`` 1 - Enables download speed limiting.
``speed_limit_up`` 1 - Upload speed limit (in Kib/s).
``speed_limit_up_enabled`` 1 - Enables upload speed limiting.
``start_added_torrents`` 9 - Added torrents will be started right away.
``trash_original_torrent_files`` 9 - The .torrent file of added torrents will be deleted.
``utp_enabled`` 13 - Enables Micro Transport Protocol (UTP).
================================ ===== ================= ==========================================================================================================================
.. NOTE::
transmissionrpc will try to automatically fix argument errors.
"""
args = {}
for key, value in kwargs.iteritems():
if key == 'encryption' and value not in ['required', 'preferred', 'tolerated']:
raise ValueError('Invalid encryption value')
argument = make_rpc_name(key)
(arg, val) = argument_value_convert('session-set'
, argument, value, self.rpc_version)
args[arg] = val
if len(args) > 0:
self._request('session-set', args, timeout=timeout)
def blocklist_update(self, timeout=None):
"""Update block list. Returns the size of the block list."""
self._rpc_version_warning(5)
result = self._request('blocklist-update', timeout=timeout)
if 'blocklist-size' in result:
return result['blocklist-size']
return None
def port_test(self, timeout=None):
"""
Tests to see if your incoming peer port is accessible from the
outside world.
"""
self._rpc_version_warning(5)
result = self._request('port-test', timeout=timeout)
if 'port-is-open' in result:
return result['port-is-open']
return None
def session_stats(self, timeout=None):
"""Get session statistics"""
self._request('session-stats', timeout=timeout)
return self.session
|
crankyadmin/shadowsocks
|
refs/heads/master
|
tests/test.py
|
1016
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import signal
import select
import time
import argparse
from subprocess import Popen, PIPE
python = ['python']
default_url = 'http://localhost/'
parser = argparse.ArgumentParser(description='test Shadowsocks')
parser.add_argument('-c', '--client-conf', type=str, default=None)
parser.add_argument('-s', '--server-conf', type=str, default=None)
parser.add_argument('-a', '--client-args', type=str, default=None)
parser.add_argument('-b', '--server-args', type=str, default=None)
parser.add_argument('--with-coverage', action='store_true', default=None)
parser.add_argument('--should-fail', action='store_true', default=None)
parser.add_argument('--tcp-only', action='store_true', default=None)
parser.add_argument('--url', type=str, default=default_url)
parser.add_argument('--dns', type=str, default='8.8.8.8')
config = parser.parse_args()
if config.with_coverage:
python = ['coverage', 'run', '-p', '-a']
client_args = python + ['shadowsocks/local.py', '-v']
server_args = python + ['shadowsocks/server.py', '-v']
if config.client_conf:
client_args.extend(['-c', config.client_conf])
if config.server_conf:
server_args.extend(['-c', config.server_conf])
else:
server_args.extend(['-c', config.client_conf])
if config.client_args:
client_args.extend(config.client_args.split())
if config.server_args:
server_args.extend(config.server_args.split())
else:
server_args.extend(config.client_args.split())
if config.url == default_url:
server_args.extend(['--forbidden-ip', ''])
p1 = Popen(server_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p2 = Popen(client_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
p3 = None
p4 = None
p3_fin = False
p4_fin = False
# 1 shadowsocks started
# 2 curl started
# 3 curl finished
# 4 dig started
# 5 dig finished
stage = 1
try:
local_ready = False
server_ready = False
fdset = [p1.stdout, p2.stdout, p1.stderr, p2.stderr]
while True:
r, w, e = select.select(fdset, [], fdset)
if e:
break
for fd in r:
line = fd.readline()
if not line:
if stage == 2 and fd == p3.stdout:
stage = 3
if stage == 4 and fd == p4.stdout:
stage = 5
if bytes != str:
line = str(line, 'utf8')
sys.stderr.write(line)
if line.find('starting local') >= 0:
local_ready = True
if line.find('starting server') >= 0:
server_ready = True
if stage == 1:
time.sleep(2)
p3 = Popen(['curl', config.url, '-v', '-L',
'--socks5-hostname', '127.0.0.1:1081',
'-m', '15', '--connect-timeout', '10'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p3 is not None:
fdset.append(p3.stdout)
fdset.append(p3.stderr)
stage = 2
else:
sys.exit(1)
if stage == 3 and p3 is not None:
fdset.remove(p3.stdout)
fdset.remove(p3.stderr)
r = p3.wait()
if config.should_fail:
if r == 0:
sys.exit(1)
else:
if r != 0:
sys.exit(1)
if config.tcp_only:
break
p4 = Popen(['socksify', 'dig', '@%s' % config.dns,
'www.google.com'],
stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)
if p4 is not None:
fdset.append(p4.stdout)
fdset.append(p4.stderr)
stage = 4
else:
sys.exit(1)
if stage == 5:
r = p4.wait()
if config.should_fail:
if r == 0:
sys.exit(1)
print('test passed (expecting failure)')
else:
if r != 0:
sys.exit(1)
print('test passed')
break
finally:
for p in [p1, p2]:
try:
os.kill(p.pid, signal.SIGINT)
os.waitpid(p.pid, 0)
except OSError:
pass
|
cstipkovic/spidermonkey-research
|
refs/heads/master
|
js/src/build_OPT.OBJ/_virtualenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/__init__.py
|
1777
|
######################## BEGIN LICENSE BLOCK ########################
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
__version__ = "2.3.0"
from sys import version_info
def detect(aBuf):
if ((version_info < (3, 0) and isinstance(aBuf, unicode)) or
(version_info >= (3, 0) and not isinstance(aBuf, bytes))):
raise ValueError('Expected a bytes object, not a unicode object')
from . import universaldetector
u = universaldetector.UniversalDetector()
u.reset()
u.feed(aBuf)
u.close()
return u.result
|
garbled1/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/panos/panos_object.py
|
42
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# limitations under the License.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_object
short_description: create/read/update/delete object in PAN-OS or Panorama
description: >
- Policy objects form the match criteria for policy rules and many other functions in PAN-OS. These may include
address object, address groups, service objects, service groups, and tag.
author: "Bob Hagen (@rnh556)"
version_added: "2.4"
requirements:
- pan-python can be obtained from PyPi U(https://pypi.python.org/pypi/pan-python)
- pandevice can be obtained from PyPi U(https://pypi.python.org/pypi/pandevice)
notes:
- Checkmode is not supported.
- Panorama is supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device or Panorama management console being configured.
required: true
username:
description:
- Username credentials to use for authentication.
required: false
default: "admin"
password:
description:
- Password credentials to use for authentication.
required: true
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
operation:
description:
- The operation to be performed. Supported values are I(add)/I(delete)/I(find).
required: true
addressobject:
description:
- The name of the address object.
address:
description:
- The IP address of the host or network in CIDR notation.
address_type:
description:
- The type of address object definition. Valid types are I(ip-netmask) and I(ip-range).
addressgroup:
description:
- A static group of address objects or dynamic address group.
static_value:
description:
- A group of address objects to be used in an addressgroup definition.
dynamic_value:
description:
- The filter match criteria to be used in a dynamic addressgroup definition.
serviceobject:
description:
- The name of the service object.
source_port:
description:
- The source port to be used in a service object definition.
destination_port:
description:
- The destination port to be used in a service object definition.
protocol:
description:
- The IP protocol to be used in a service object definition. Valid values are I(tcp) or I(udp).
servicegroup:
description:
- A group of service objects.
services:
description:
- The group of service objects used in a servicegroup definition.
description:
description:
- The description of the object.
tag_name:
description:
- The name of an object or rule tag.
color:
description: >
- The color of the tag object. Valid values are I(red, green, blue, yellow, copper, orange, purple, gray,
light green, cyan, light gray, blue gray, lime, black, gold, and brown).
devicegroup:
description: >
- The name of the Panorama device group. The group must exist on Panorama. If device group is not defined it
is assumed that we are contacting a firewall.
required: false
default: None
'''
EXAMPLES = '''
- name: search for shared address object
panos_object:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'find'
address: 'DevNet'
- name: create an address group in devicegroup using API key
panos_object:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
operation: 'add'
addressgroup: 'Prod_DB_Svrs'
static_value: ['prod-db1', 'prod-db2', 'prod-db3']
description: 'Production DMZ database servers'
tag_name: 'DMZ'
devicegroup: 'DMZ Firewalls'
- name: create a global service for TCP 3306
panos_object:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
operation: 'add'
serviceobject: 'mysql-3306'
destination_port: '3306'
protocol: 'tcp'
description: 'MySQL on tcp/3306'
- name: create a global tag
panos_object:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'add'
tag_name: 'ProjectX'
color: 'yellow'
description: 'Associated with Project X'
- name: delete an address object from a devicegroup using API key
panos_object:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
operation: 'delete'
addressobject: 'Win2K test'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
try:
import pan.xapi
from pan.xapi import PanXapiError
import pandevice
from pandevice import base
from pandevice import firewall
from pandevice import panorama
from pandevice import objects
import xmltodict
import json
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_devicegroup(device, devicegroup):
dg_list = device.refresh_devices()
for group in dg_list:
if isinstance(group, pandevice.panorama.DeviceGroup):
if group.name == devicegroup:
return group
return False
def find_object(device, dev_group, obj_name, obj_type):
# Get the firewall objects
obj_type.refreshall(device)
if isinstance(device, pandevice.firewall.Firewall):
addr = device.find(obj_name, obj_type)
return addr
elif isinstance(device, pandevice.panorama.Panorama):
addr = device.find(obj_name, obj_type)
if addr is None:
if dev_group:
device.add(dev_group)
obj_type.refreshall(dev_group)
addr = dev_group.find(obj_name, obj_type)
return addr
else:
return False
def create_object(**kwargs):
if kwargs['addressobject']:
newobject = objects.AddressObject(
name=kwargs['addressobject'],
value=kwargs['address'],
type=kwargs['address_type'],
description=kwargs['description'],
tag=kwargs['tag_name']
)
if newobject.type and newobject.value:
return newobject
else:
return False
elif kwargs['addressgroup']:
newobject = objects.AddressGroup(
name=kwargs['addressgroup'],
static_value=kwargs['static_value'],
dynamic_value=kwargs['dynamic_value'],
description=kwargs['description'],
tag=kwargs['tag_name']
)
if newobject.static_value or newobject.dynamic_value:
return newobject
else:
return False
elif kwargs['serviceobject']:
newobject = objects.ServiceObject(
name=kwargs['serviceobject'],
protocol=kwargs['protocol'],
source_port=kwargs['source_port'],
destination_port=kwargs['destination_port'],
tag=kwargs['tag_name']
)
if newobject.protocol and newobject.destination_port:
return newobject
else:
return False
elif kwargs['servicegroup']:
newobject = objects.ServiceGroup(
name=kwargs['servicegroup'],
value=kwargs['services'],
tag=kwargs['tag_name']
)
if newobject.value:
return newobject
else:
return False
elif kwargs['tag_name']:
newobject = objects.Tag(
name=kwargs['tag_name'],
color=kwargs['color'],
comments=kwargs['description']
)
if newobject.name:
return newobject
else:
return False
else:
return False
def add_object(device, dev_group, new_object):
if dev_group:
dev_group.add(new_object)
else:
device.add(new_object)
new_object.create()
return True
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(no_log=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
operation=dict(required=True, choices=['add', 'update', 'delete', 'find']),
addressobject=dict(default=None),
addressgroup=dict(default=None),
serviceobject=dict(default=None),
servicegroup=dict(default=None),
address=dict(default=None),
address_type=dict(default='ip-netmask', choices=['ip-netmask', 'ip-range', 'fqdn']),
static_value=dict(type='list', default=None),
dynamic_value=dict(default=None),
protocol=dict(default=None, choices=['tcp', 'udp']),
source_port=dict(default=None),
destination_port=dict(default=None),
services=dict(type='list', default=None),
description=dict(default=None),
tag_name=dict(default=None),
color=dict(default=None, choices=['red', 'green', 'blue', 'yellow', 'copper', 'orange', 'purple',
'gray', 'light green', 'cyan', 'light gray', 'blue gray',
'lime', 'black', 'gold', 'brown']),
devicegroup=dict(default=None)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']],
mutually_exclusive=[['addressobject', 'addressgroup',
'serviceobject', 'servicegroup',
'tag_name']]
)
if not HAS_LIB:
module.fail_json(msg='Missing required libraries.')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
operation = module.params['operation']
addressobject = module.params['addressobject']
addressgroup = module.params['addressgroup']
serviceobject = module.params['serviceobject']
servicegroup = module.params['servicegroup']
address = module.params['address']
address_type = module.params['address_type']
static_value = module.params['static_value']
dynamic_value = module.params['dynamic_value']
protocol = module.params['protocol']
source_port = module.params['source_port']
destination_port = module.params['destination_port']
services = module.params['services']
description = module.params['description']
tag_name = module.params['tag_name']
color = module.params['color']
devicegroup = module.params['devicegroup']
# Create the device with the appropriate pandevice type
device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
# If Panorama, validate the devicegroup
dev_group = None
if devicegroup and isinstance(device, panorama.Panorama):
dev_group = get_devicegroup(device, devicegroup)
if dev_group:
device.add(dev_group)
else:
module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup)
# What type of object are we talking about?
if addressobject:
obj_name = addressobject
obj_type = objects.AddressObject
elif addressgroup:
obj_name = addressgroup
obj_type = objects.AddressGroup
elif serviceobject:
obj_name = serviceobject
obj_type = objects.ServiceObject
elif servicegroup:
obj_name = servicegroup
obj_type = objects.ServiceGroup
elif tag_name:
obj_name = tag_name
obj_type = objects.Tag
else:
module.fail_json(msg='No object type defined!')
# Which operation shall we perform on the object?
if operation == "find":
# Search for the object
match = find_object(device, dev_group, obj_name, obj_type)
# If found, format and return the result
if match:
match_dict = xmltodict.parse(match.element_str())
module.exit_json(
stdout_lines=json.dumps(match_dict, indent=2),
msg='Object matched'
)
else:
module.fail_json(msg='Object \'%s\' not found. Is the name correct?' % obj_name)
elif operation == "delete":
# Search for the object
match = find_object(device, dev_group, obj_name, obj_type)
# If found, delete it
if match:
try:
match.delete()
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
module.exit_json(changed=True, msg='Object \'%s\' successfully deleted' % obj_name)
else:
module.fail_json(msg='Object \'%s\' not found. Is the name correct?' % obj_name)
elif operation == "add":
# Search for the object. Fail if found.
match = find_object(device, dev_group, obj_name, obj_type)
if match:
module.fail_json(msg='Object \'%s\' already exists. Use operation: \'update\' to change it.' % obj_name)
else:
try:
new_object = create_object(
addressobject=addressobject,
addressgroup=addressgroup,
serviceobject=serviceobject,
servicegroup=servicegroup,
address=address,
address_type=address_type,
static_value=static_value,
dynamic_value=dynamic_value,
protocol=protocol,
source_port=source_port,
destination_port=destination_port,
services=services,
description=description,
tag_name=tag_name,
color=color
)
changed = add_object(device, dev_group, new_object)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
module.exit_json(changed=changed, msg='Object \'%s\' successfully added' % obj_name)
elif operation == "update":
# Search for the object. Update if found.
match = find_object(device, dev_group, obj_name, obj_type)
if match:
try:
new_object = create_object(
addressobject=addressobject,
addressgroup=addressgroup,
serviceobject=serviceobject,
servicegroup=servicegroup,
address=address,
address_type=address_type,
static_value=static_value,
dynamic_value=dynamic_value,
protocol=protocol,
source_port=source_port,
destination_port=destination_port,
services=services,
description=description,
tag_name=tag_name,
color=color
)
changed = add_object(device, dev_group, new_object)
except PanXapiError:
exc = get_exception()
module.fail_json(msg=exc.message)
module.exit_json(changed=changed, msg='Object \'%s\' successfully updated.' % obj_name)
else:
module.fail_json(msg='Object \'%s\' does not exist. Use operation: \'add\' to add it.' % obj_name)
if __name__ == '__main__':
main()
|
crpalmer/android_kernel_samsung_mondrianwifi
|
refs/heads/cm-11.0
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
|
12527
|
# Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
|
python-dirbtuves/Misago
|
refs/heads/ubuntu-lt
|
misago/management/commands/updateranking.py
|
3
|
from django.core.management.base import BaseCommand, CommandError
from django.db.models import F
from misago.conf import settings
from misago.models import Rank, User
class Command(BaseCommand):
"""
This command is intended to work as CRON job fired of once per day or less if you have more users to update user ranking.
"""
help = 'Updates users ranking'
def handle(self, *args, **options):
# Find special ranks
special_ranks = []
for rank in Rank.objects.filter(special=1):
special_ranks.append(str(rank.pk))
# Count users that are in ranking
users_total = User.objects.exclude(rank__in=special_ranks).count()
# Update Ranking
defaulted_ranks = False
for rank in Rank.objects.filter(special=0).order_by('-order'):
if defaulted_ranks:
# Set ranks according to ranking
rank.assign_rank(users_total, special_ranks)
else:
# Set default rank first
User.objects.exclude(rank__in=special_ranks).update(rank=rank)
defaulted_ranks = True
# Inflate scores
if settings.ranking_inflation:
inflation = float(100 - settings.ranking_inflation) / 100
User.objects.all().update(acl_key=None, score=F('score') * inflation, ranking=0)
else:
User.objects.all().update(acl_key=None)
self.stdout.write('Users ranking has been updated.\n')
|
probablytom/tomwallis.net
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/latin1prober.py
|
1777
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
/ total)
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.73
return confidence
|
wso2-dev/device-cloud-appliances
|
refs/heads/master
|
IU_workshop/Step 7/python_client/ControlAndPublish/BAMPythonPublisher/thrift/transport/TTransport.py
|
105
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from cStringIO import StringIO
from struct import pack, unpack
from thrift.Thrift import TException
class TTransportException(TException):
"""Custom Transport Exception class"""
UNKNOWN = 0
NOT_OPEN = 1
ALREADY_OPEN = 2
TIMED_OUT = 3
END_OF_FILE = 4
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
class TTransportBase:
"""Base class for Thrift transport layer."""
def isOpen(self):
pass
def open(self):
pass
def close(self):
pass
def read(self, sz):
pass
def readAll(self, sz):
buff = ''
have = 0
while (have < sz):
chunk = self.read(sz - have)
have += len(chunk)
buff += chunk
if len(chunk) == 0:
raise EOFError()
return buff
def write(self, buf):
pass
def flush(self):
pass
# This class should be thought of as an interface.
class CReadableTransport:
"""base class for transports that are readable from C"""
# TODO(dreiss): Think about changing this interface to allow us to use
# a (Python, not c) StringIO instead, because it allows
# you to write after reading.
# NOTE: This is a classic class, so properties will NOT work
# correctly for setting.
@property
def cstringio_buf(self):
"""A cStringIO buffer that contains the current chunk we are reading."""
pass
def cstringio_refill(self, partialread, reqlen):
"""Refills cstringio_buf.
Returns the currently used buffer (which can but need not be the same as
the old cstringio_buf). partialread is what the C code has read from the
buffer, and should be inserted into the buffer before any more reads. The
return value must be a new, not borrowed reference. Something along the
lines of self._buf should be fine.
If reqlen bytes can't be read, throw EOFError.
"""
pass
class TServerTransportBase:
"""Base class for Thrift server transports."""
def listen(self):
pass
def accept(self):
pass
def close(self):
pass
class TTransportFactoryBase:
"""Base class for a Transport Factory"""
def getTransport(self, trans):
return trans
class TBufferedTransportFactory:
"""Factory transport that builds buffered transports"""
def getTransport(self, trans):
buffered = TBufferedTransport(trans)
return buffered
class TBufferedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and buffers its I/O.
The implementation uses a (configurable) fixed-size read buffer
but buffers all writes until a flush is performed.
"""
DEFAULT_BUFFER = 4096
def __init__(self, trans, rbuf_size=DEFAULT_BUFFER):
self.__trans = trans
self.__wbuf = StringIO()
self.__rbuf = StringIO("")
self.__rbuf_size = rbuf_size
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.__rbuf = StringIO(self.__trans.read(max(sz, self.__rbuf_size)))
return self.__rbuf.read(sz)
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
out = self.__wbuf.getvalue()
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = StringIO()
self.__trans.write(out)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, partialread, reqlen):
retstring = partialread
if reqlen < self.__rbuf_size:
# try to make a read of as much as we can.
retstring += self.__trans.read(self.__rbuf_size)
# but make sure we do read reqlen bytes.
if len(retstring) < reqlen:
retstring += self.__trans.readAll(reqlen - len(retstring))
self.__rbuf = StringIO(retstring)
return self.__rbuf
class TMemoryBuffer(TTransportBase, CReadableTransport):
"""Wraps a cStringIO object as a TTransport.
NOTE: Unlike the C++ version of this class, you cannot write to it
then immediately read from it. If you want to read from a
TMemoryBuffer, you must either pass a string to the constructor.
TODO(dreiss): Make this work like the C++ version.
"""
def __init__(self, value=None):
"""value -- a value to read from for stringio
If value is set, this will be a transport for reading,
otherwise, it is for writing"""
if value is not None:
self._buffer = StringIO(value)
else:
self._buffer = StringIO()
def isOpen(self):
return not self._buffer.closed
def open(self):
pass
def close(self):
self._buffer.close()
def read(self, sz):
return self._buffer.read(sz)
def write(self, buf):
self._buffer.write(buf)
def flush(self):
pass
def getvalue(self):
return self._buffer.getvalue()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self._buffer
def cstringio_refill(self, partialread, reqlen):
# only one shot at reading...
raise EOFError()
class TFramedTransportFactory:
"""Factory transport that builds framed transports"""
def getTransport(self, trans):
framed = TFramedTransport(trans)
return framed
class TFramedTransport(TTransportBase, CReadableTransport):
"""Class that wraps another transport and frames its I/O when writing."""
def __init__(self, trans,):
self.__trans = trans
self.__rbuf = StringIO()
self.__wbuf = StringIO()
def isOpen(self):
return self.__trans.isOpen()
def open(self):
return self.__trans.open()
def close(self):
return self.__trans.close()
def read(self, sz):
ret = self.__rbuf.read(sz)
if len(ret) != 0:
return ret
self.readFrame()
return self.__rbuf.read(sz)
def readFrame(self):
buff = self.__trans.readAll(4)
sz, = unpack('!i', buff)
self.__rbuf = StringIO(self.__trans.readAll(sz))
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
wout = self.__wbuf.getvalue()
wsz = len(wout)
# reset wbuf before write/flush to preserve state on underlying failure
self.__wbuf = StringIO()
# N.B.: Doing this string concatenation is WAY cheaper than making
# two separate calls to the underlying socket object. Socket writes in
# Python turn out to be REALLY expensive, but it seems to do a pretty
# good job of managing string buffer operations without excessive copies
buf = pack("!i", wsz) + wout
self.__trans.write(buf)
self.__trans.flush()
# Implement the CReadableTransport interface.
@property
def cstringio_buf(self):
return self.__rbuf
def cstringio_refill(self, prefix, reqlen):
# self.__rbuf will already be empty here because fastbinary doesn't
# ask for a refill until the previous buffer is empty. Therefore,
# we can start reading new frames immediately.
while len(prefix) < reqlen:
self.readFrame()
prefix += self.__rbuf.getvalue()
self.__rbuf = StringIO(prefix)
return self.__rbuf
class TFileObjectTransport(TTransportBase):
"""Wraps a file-like object to make it work as a Thrift transport."""
def __init__(self, fileobj):
self.fileobj = fileobj
def isOpen(self):
return True
def close(self):
self.fileobj.close()
def read(self, sz):
return self.fileobj.read(sz)
def write(self, buf):
self.fileobj.write(buf)
def flush(self):
self.fileobj.flush()
|
chrsrds/scikit-learn
|
refs/heads/master
|
sklearn/utils/optimize.py
|
1
|
"""
Our own implementation of the Newton algorithm
Unlike the scipy.optimize version, this version of the Newton conjugate
gradient solver uses only one function call to retrieve the
func value, the gradient value and a callable for the Hessian matvec
product. If the function call is very expensive (e.g. for logistic
regression with large design matrix), this approach gives very
significant speedups.
"""
# This is a modified file from scipy.optimize
# Original authors: Travis Oliphant, Eric Jones
# Modifications by Gael Varoquaux, Mathieu Blondel and Tom Dupre la Tour
# License: BSD
import numpy as np
import warnings
from scipy.optimize.linesearch import line_search_wolfe2, line_search_wolfe1
from ..exceptions import ConvergenceWarning
class _LineSearchError(RuntimeError):
pass
def _line_search_wolfe12(f, fprime, xk, pk, gfk, old_fval, old_old_fval,
**kwargs):
"""
Same as line_search_wolfe1, but fall back to line_search_wolfe2 if
suitable step length is not found, and raise an exception if a
suitable step length is not found.
Raises
------
_LineSearchError
If no suitable step size is found
"""
ret = line_search_wolfe1(f, fprime, xk, pk, gfk,
old_fval, old_old_fval,
**kwargs)
if ret[0] is None:
# line search failed: try different one.
ret = line_search_wolfe2(f, fprime, xk, pk, gfk,
old_fval, old_old_fval, **kwargs)
if ret[0] is None:
raise _LineSearchError()
return ret
def _cg(fhess_p, fgrad, maxiter, tol):
"""
Solve iteratively the linear system 'fhess_p . xsupi = fgrad'
with a conjugate gradient descent.
Parameters
----------
fhess_p : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient
fgrad : ndarray, shape (n_features,) or (n_features + 1,)
Gradient vector
maxiter : int
Number of CG iterations.
tol : float
Stopping criterion.
Returns
-------
xsupi : ndarray, shape (n_features,) or (n_features + 1,)
Estimated solution
"""
xsupi = np.zeros(len(fgrad), dtype=fgrad.dtype)
ri = fgrad
psupi = -ri
i = 0
dri0 = np.dot(ri, ri)
while i <= maxiter:
if np.sum(np.abs(ri)) <= tol:
break
Ap = fhess_p(psupi)
# check curvature
curv = np.dot(psupi, Ap)
if 0 <= curv <= 3 * np.finfo(np.float64).eps:
break
elif curv < 0:
if i > 0:
break
else:
# fall back to steepest descent direction
xsupi += dri0 / curv * psupi
break
alphai = dri0 / curv
xsupi += alphai * psupi
ri = ri + alphai * Ap
dri1 = np.dot(ri, ri)
betai = dri1 / dri0
psupi = -ri + betai * psupi
i = i + 1
dri0 = dri1 # update np.dot(ri,ri) for next time.
return xsupi
def newton_cg(grad_hess, func, grad, x0, args=(), tol=1e-4,
maxiter=100, maxinner=200, line_search=True, warn=True):
"""
Minimization of scalar function of one or more variables using the
Newton-CG algorithm.
Parameters
----------
grad_hess : callable
Should return the gradient and a callable returning the matvec product
of the Hessian.
func : callable
Should return the value of the function.
grad : callable
Should return the function value and the gradient. This is used
by the linesearch functions.
x0 : array of float
Initial guess.
args : tuple, optional
Arguments passed to func_grad_hess, func and grad.
tol : float
Stopping criterion. The iteration will stop when
``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
maxiter : int
Number of Newton iterations.
maxinner : int
Number of CG iterations.
line_search : boolean
Whether to use a line search or not.
warn : boolean
Whether to warn when didn't converge.
Returns
-------
xk : ndarray of float
Estimated minimum.
"""
x0 = np.asarray(x0).flatten()
xk = x0
k = 0
if line_search:
old_fval = func(x0, *args)
old_old_fval = None
# Outer loop: our Newton iteration
while k < maxiter:
# Compute a search direction pk by applying the CG method to
# del2 f(xk) p = - fgrad f(xk) starting from 0.
fgrad, fhess_p = grad_hess(xk, *args)
absgrad = np.abs(fgrad)
if np.max(absgrad) < tol:
break
maggrad = np.sum(absgrad)
eta = min([0.5, np.sqrt(maggrad)])
termcond = eta * maggrad
# Inner loop: solve the Newton update by conjugate gradient, to
# avoid inverting the Hessian
xsupi = _cg(fhess_p, fgrad, maxiter=maxinner, tol=termcond)
alphak = 1.0
if line_search:
try:
alphak, fc, gc, old_fval, old_old_fval, gfkp1 = \
_line_search_wolfe12(func, grad, xk, xsupi, fgrad,
old_fval, old_old_fval, args=args)
except _LineSearchError:
warnings.warn('Line Search failed')
break
xk = xk + alphak * xsupi # upcast if necessary
k += 1
if warn and k >= maxiter:
warnings.warn("newton-cg failed to converge. Increase the "
"number of iterations.", ConvergenceWarning)
return xk, k
def _check_optimize_result(solver, result, max_iter=None):
"""Check the OptimizeResult for successful convergence
Parameters
----------
solver: str
solver name. Currently only `lbfgs` is supported.
result: OptimizeResult
result of the scipy.optimize.minimize function
max_iter: {int, None}
expected maximum number of iterations
Returns
-------
n_iter: int
number of iterations
"""
# handle both scipy and scikit-learn solver names
if solver == "lbfgs":
if result.status != 0:
warnings.warn("{} failed to converge (status={}): {}. "
"Increase the number of iterations."
.format(solver, result.status, result.message),
ConvergenceWarning)
if max_iter is not None:
# In scipy <= 1.0.0, nit may exceed maxiter for lbfgs.
# See https://github.com/scipy/scipy/issues/7854
n_iter_i = min(result.nit, max_iter)
else:
n_iter_i = result.nit
else:
raise NotImplementedError
return n_iter_i
|
Dentosal/python-sc2
|
refs/heads/master
|
setup.py
|
1
|
from setuptools import setup, find_packages
from pipenv.project import Project
from pipenv.utils import convert_deps_to_pip
pfile = Project(chdir=False).parsed_pipfile
requirements = convert_deps_to_pip(pfile['packages'], r=False)
test_requirements = convert_deps_to_pip(pfile['dev-packages'], r=False)
setup(
name = "sc2",
packages = find_packages(),
version = "0.11.1",
description = "A StarCraft II API Client for Python 3",
license="MIT",
author = "Hannes Karppila",
author_email = "hannes.karppila@gmail.com",
url = "https://github.com/Dentosal/python-sc2",
keywords = ["StarCraft", "StarCraft 2", "StarCraft II", "AI", "Bot"],
setup_requires=["pipenv"],
install_requires=requirements,
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Topic :: Games/Entertainment",
"Topic :: Games/Entertainment :: Real Time Strategy",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
]
)
|
thaim/ansible
|
refs/heads/fix-broken-link
|
test/units/modules/network/fortios/test_fortios_switch_controller_qos_qos_policy.py
|
21
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_switch_controller_qos_qos_policy
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_switch_controller_qos_qos_policy.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_switch_controller_qos_qos_policy_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_qos_qos_policy': {
'default_cos': '3',
'name': 'default_name_4',
'queue_policy': 'test_value_5',
'trust_dot1p_map': 'test_value_6',
'trust_ip_dscp_map': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_qos_qos_policy.fortios_switch_controller_qos(input_data, fos_instance)
expected_data = {
'default-cos': '3',
'name': 'default_name_4',
'queue-policy': 'test_value_5',
'trust-dot1p-map': 'test_value_6',
'trust-ip-dscp-map': 'test_value_7'
}
set_method_mock.assert_called_with('switch-controller.qos', 'qos-policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_switch_controller_qos_qos_policy_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_qos_qos_policy': {
'default_cos': '3',
'name': 'default_name_4',
'queue_policy': 'test_value_5',
'trust_dot1p_map': 'test_value_6',
'trust_ip_dscp_map': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_qos_qos_policy.fortios_switch_controller_qos(input_data, fos_instance)
expected_data = {
'default-cos': '3',
'name': 'default_name_4',
'queue-policy': 'test_value_5',
'trust-dot1p-map': 'test_value_6',
'trust-ip-dscp-map': 'test_value_7'
}
set_method_mock.assert_called_with('switch-controller.qos', 'qos-policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_switch_controller_qos_qos_policy_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'switch_controller_qos_qos_policy': {
'default_cos': '3',
'name': 'default_name_4',
'queue_policy': 'test_value_5',
'trust_dot1p_map': 'test_value_6',
'trust_ip_dscp_map': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_qos_qos_policy.fortios_switch_controller_qos(input_data, fos_instance)
delete_method_mock.assert_called_with('switch-controller.qos', 'qos-policy', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_switch_controller_qos_qos_policy_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'switch_controller_qos_qos_policy': {
'default_cos': '3',
'name': 'default_name_4',
'queue_policy': 'test_value_5',
'trust_dot1p_map': 'test_value_6',
'trust_ip_dscp_map': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_qos_qos_policy.fortios_switch_controller_qos(input_data, fos_instance)
delete_method_mock.assert_called_with('switch-controller.qos', 'qos-policy', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_switch_controller_qos_qos_policy_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_qos_qos_policy': {
'default_cos': '3',
'name': 'default_name_4',
'queue_policy': 'test_value_5',
'trust_dot1p_map': 'test_value_6',
'trust_ip_dscp_map': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_qos_qos_policy.fortios_switch_controller_qos(input_data, fos_instance)
expected_data = {
'default-cos': '3',
'name': 'default_name_4',
'queue-policy': 'test_value_5',
'trust-dot1p-map': 'test_value_6',
'trust-ip-dscp-map': 'test_value_7'
}
set_method_mock.assert_called_with('switch-controller.qos', 'qos-policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_switch_controller_qos_qos_policy_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'switch_controller_qos_qos_policy': {
'random_attribute_not_valid': 'tag',
'default_cos': '3',
'name': 'default_name_4',
'queue_policy': 'test_value_5',
'trust_dot1p_map': 'test_value_6',
'trust_ip_dscp_map': 'test_value_7'
},
'vdom': 'root'}
is_error, changed, response = fortios_switch_controller_qos_qos_policy.fortios_switch_controller_qos(input_data, fos_instance)
expected_data = {
'default-cos': '3',
'name': 'default_name_4',
'queue-policy': 'test_value_5',
'trust-dot1p-map': 'test_value_6',
'trust-ip-dscp-map': 'test_value_7'
}
set_method_mock.assert_called_with('switch-controller.qos', 'qos-policy', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
novel/ecru
|
refs/heads/master
|
python/setup.py
|
1
|
from distutils.core import setup
setup(
name = 'ecru',
version = '0.1',
description = 'Helper classes for writing ecru hooks in python',
author = 'Roman Bogorodskiy',
maintainer = 'Roman Bogorodskiy',
maintainer_email = 'bogorodskiy@gmail.com',
py_modules = ["ecru"],
)
|
jm-begon/montef-events
|
refs/heads/master
|
montefevents/data.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import datetime
try:
# Python 2
from urllib2 import URLError, HTTPError
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import URLError, HTTPError
from urllib.request import urlopen
from bs4 import BeautifulSoup
import json
from abc import ABCMeta, abstractmethod
import re
try:
# Python 2
from HTMLParser import HTMLParser
except ImportError:
# Python 3
from html.parser import HTMLParser
import codecs
from .event import Seminar
from .log import get_warn_log
__BASE_URL__ = "http://www.montefiore.ulg.ac.be"
__SEMINAR_SUFFIX__ = "/seminars"
class JSONParser(object):
def _parse_date(self, date_as_text):
return datetime.strptime(date_as_text, "%B %d, %Y (%A) %H:%M")
def _remove_duplicate_eol(self, abstract):
tmp = re.sub("( *(\r\n)+ *)+", "\r\n", abstract)
return tmp.replace("\r\n", "\r\n\r\n")
def _parse_contact(self, contact):
return contact.replace("\\", "")
def _html2str(self, dictionary):
parser = HTMLParser()
return {k: parser.unescape(v) for k,v in dictionary.items()}
def __call__(self, jdict):
if 'Seminar' not in jdict:
get_warn_log(__name__).warn("Not a seminar: %s"%str(jdict))
jdict = self._html2str(jdict)
# Parse date
date = self._parse_date(jdict["Time"])
# Remove duplicate end of lines
abstract = self._remove_duplicate_eol(jdict["Abstract"])
# Parse contact
contact = self._parse_contact(jdict["Contact"])
return Seminar(name=jdict["Seminar"], speaker=jdict["Speaker"],
date=date, location=jdict["Location"],
contact=contact, abstract=abstract)
class DataSource(object):
__metaclass__ = ABCMeta
@abstractmethod
def get_next_seminars(self):
pass
def __iter__(self):
return iter(self.get_next_seminars())
class MontefioreGetter(DataSource):
def __init__(self, parser=JSONParser(), base_url=__BASE_URL__,
seminar_suffix=__SEMINAR_SUFFIX__, fail_fast=False):
self.base_url = base_url
self.seminar_suffix = seminar_suffix
self.parser = parser
self.fail_fast = fail_fast
self.reader = codecs.getreader("utf-8")
def _get_link_list(self, page):
soup = BeautifulSoup(page, 'html.parser')
title_div = [x for x in soup.find_all("div")
if x.has_attr("seminar-title")]
links = [x.find("a") for x in title_div]
return [x.attrs["href"] for x in links if x is not None and
x.has_attr("href")]
def _get_json(self, link):
try:
url = self.base_url+link+"?json"
response = urlopen(url)
return json.load(self.reader(response))
except Exception as e:
if self.fail_fast:
raise
get_warn_log().warn("Could not load json at {link}. "
"Got Exception {error})".format(link=link,
error=repr(e)))
return None
def _parse(self, jdict):
try:
return self.parser(jdict)
except Exception as ex:
if self.fail_fast:
raise
errcls = ex.__class__.__name__
get_warn_log().warn("Could not parse event {jdict}. "
"Got Exception {error})".format(jdict=str(jdict),
error=repr(e)))
return None
def get_next_seminars(self):
url = self.base_url+self.seminar_suffix
# No need to close: http://stackoverflow.com/questions/1522636/should-i-call-close-after-urllib-urlopen
# No need to catch exception. In this case the program should crash
page = urlopen(url).read()
links = self._get_link_list(page)
jsons = [self._get_json(link) for link in links]
seminars = [self._parse(jdict) for jdict in jsons if jdict is not None]
return [x for x in seminars if x is not None]
|
YangSongzhou/django
|
refs/heads/master
|
django/contrib/admin/checks.py
|
186
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from itertools import chain
from django.contrib.admin.utils import (
NotRelationField, flatten, get_fields_from_path,
)
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.db import models
from django.forms.models import (
BaseModelForm, BaseModelFormSet, _get_foreign_key,
)
def check_admin_app(**kwargs):
from django.contrib.admin.sites import system_check_errors
return system_check_errors
class BaseModelAdminChecks(object):
def check(self, admin_obj, **kwargs):
errors = []
errors.extend(self._check_raw_id_fields(admin_obj))
errors.extend(self._check_fields(admin_obj))
errors.extend(self._check_fieldsets(admin_obj))
errors.extend(self._check_exclude(admin_obj))
errors.extend(self._check_form(admin_obj))
errors.extend(self._check_filter_vertical(admin_obj))
errors.extend(self._check_filter_horizontal(admin_obj))
errors.extend(self._check_radio_fields(admin_obj))
errors.extend(self._check_prepopulated_fields(admin_obj))
errors.extend(self._check_view_on_site_url(admin_obj))
errors.extend(self._check_ordering(admin_obj))
errors.extend(self._check_readonly_fields(admin_obj))
return errors
def _check_raw_id_fields(self, obj):
""" Check that `raw_id_fields` only contains field names that are listed
on the model. """
if not isinstance(obj.raw_id_fields, (list, tuple)):
return must_be('a list or tuple', option='raw_id_fields', obj=obj, id='admin.E001')
else:
return list(chain(*[
self._check_raw_id_fields_item(obj, obj.model, field_name, 'raw_id_fields[%d]' % index)
for index, field_name in enumerate(obj.raw_id_fields)
]))
def _check_raw_id_fields_item(self, obj, model, field_name, label):
""" Check an item of `raw_id_fields`, i.e. check that field named
`field_name` exists in model `model` and is a ForeignKey or a
ManyToManyField. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E002')
else:
if not isinstance(field, (models.ForeignKey, models.ManyToManyField)):
return must_be('a ForeignKey or ManyToManyField',
option=label, obj=obj, id='admin.E003')
else:
return []
def _check_fields(self, obj):
""" Check that `fields` only refer to existing fields, doesn't contain
duplicates. Check if at most one of `fields` and `fieldsets` is defined.
"""
if obj.fields is None:
return []
elif not isinstance(obj.fields, (list, tuple)):
return must_be('a list or tuple', option='fields', obj=obj, id='admin.E004')
elif obj.fieldsets:
return [
checks.Error(
"Both 'fieldsets' and 'fields' are specified.",
hint=None,
obj=obj.__class__,
id='admin.E005',
)
]
fields = flatten(obj.fields)
if len(fields) != len(set(fields)):
return [
checks.Error(
"The value of 'fields' contains duplicate field(s).",
hint=None,
obj=obj.__class__,
id='admin.E006',
)
]
return list(chain(*[
self._check_field_spec(obj, obj.model, field_name, 'fields')
for field_name in obj.fields
]))
def _check_fieldsets(self, obj):
""" Check that fieldsets is properly formatted and doesn't contain
duplicates. """
if obj.fieldsets is None:
return []
elif not isinstance(obj.fieldsets, (list, tuple)):
return must_be('a list or tuple', option='fieldsets', obj=obj, id='admin.E007')
else:
return list(chain(*[
self._check_fieldsets_item(obj, obj.model, fieldset, 'fieldsets[%d]' % index)
for index, fieldset in enumerate(obj.fieldsets)
]))
def _check_fieldsets_item(self, obj, model, fieldset, label):
""" Check an item of `fieldsets`, i.e. check that this is a pair of a
set name and a dictionary containing "fields" key. """
if not isinstance(fieldset, (list, tuple)):
return must_be('a list or tuple', option=label, obj=obj, id='admin.E008')
elif len(fieldset) != 2:
return must_be('of length 2', option=label, obj=obj, id='admin.E009')
elif not isinstance(fieldset[1], dict):
return must_be('a dictionary', option='%s[1]' % label, obj=obj, id='admin.E010')
elif 'fields' not in fieldset[1]:
return [
checks.Error(
"The value of '%s[1]' must contain the key 'fields'." % label,
hint=None,
obj=obj.__class__,
id='admin.E011',
)
]
elif not isinstance(fieldset[1]['fields'], (list, tuple)):
return must_be('a list or tuple', option="%s[1]['fields']" % label, obj=obj, id='admin.E008')
fields = flatten(fieldset[1]['fields'])
if len(fields) != len(set(fields)):
return [
checks.Error(
"There are duplicate field(s) in '%s[1]'." % label,
hint=None,
obj=obj.__class__,
id='admin.E012',
)
]
return list(chain(*[
self._check_field_spec(obj, model, fieldset_fields, '%s[1]["fields"]' % label)
for fieldset_fields in fieldset[1]['fields']
]))
def _check_field_spec(self, obj, model, fields, label):
""" `fields` should be an item of `fields` or an item of
fieldset[1]['fields'] for any `fieldset` in `fieldsets`. It should be a
field name or a tuple of field names. """
if isinstance(fields, tuple):
return list(chain(*[
self._check_field_spec_item(obj, model, field_name, "%s[%d]" % (label, index))
for index, field_name in enumerate(fields)
]))
else:
return self._check_field_spec_item(obj, model, fields, label)
def _check_field_spec_item(self, obj, model, field_name, label):
if field_name in obj.readonly_fields:
# Stuff can be put in fields that isn't actually a model field if
# it's in readonly_fields, readonly_fields will handle the
# validation of such things.
return []
else:
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
# If we can't find a field on the model that matches, it could
# be an extra field on the form.
return []
else:
if (isinstance(field, models.ManyToManyField) and
not field.remote_field.through._meta.auto_created):
return [
checks.Error(
("The value of '%s' cannot include the ManyToManyField '%s', "
"because that field manually specifies a relationship model.")
% (label, field_name),
hint=None,
obj=obj.__class__,
id='admin.E013',
)
]
else:
return []
def _check_exclude(self, obj):
""" Check that exclude is a sequence without duplicates. """
if obj.exclude is None: # default value is None
return []
elif not isinstance(obj.exclude, (list, tuple)):
return must_be('a list or tuple', option='exclude', obj=obj, id='admin.E014')
elif len(obj.exclude) > len(set(obj.exclude)):
return [
checks.Error(
"The value of 'exclude' contains duplicate field(s).",
hint=None,
obj=obj.__class__,
id='admin.E015',
)
]
else:
return []
def _check_form(self, obj):
""" Check that form subclasses BaseModelForm. """
if hasattr(obj, 'form') and not issubclass(obj.form, BaseModelForm):
return must_inherit_from(parent='BaseModelForm', option='form',
obj=obj, id='admin.E016')
else:
return []
def _check_filter_vertical(self, obj):
""" Check that filter_vertical is a sequence of field names. """
if not hasattr(obj, 'filter_vertical'):
return []
elif not isinstance(obj.filter_vertical, (list, tuple)):
return must_be('a list or tuple', option='filter_vertical', obj=obj, id='admin.E017')
else:
return list(chain(*[
self._check_filter_item(obj, obj.model, field_name, "filter_vertical[%d]" % index)
for index, field_name in enumerate(obj.filter_vertical)
]))
def _check_filter_horizontal(self, obj):
""" Check that filter_horizontal is a sequence of field names. """
if not hasattr(obj, 'filter_horizontal'):
return []
elif not isinstance(obj.filter_horizontal, (list, tuple)):
return must_be('a list or tuple', option='filter_horizontal', obj=obj, id='admin.E018')
else:
return list(chain(*[
self._check_filter_item(obj, obj.model, field_name, "filter_horizontal[%d]" % index)
for index, field_name in enumerate(obj.filter_horizontal)
]))
def _check_filter_item(self, obj, model, field_name, label):
""" Check one item of `filter_vertical` or `filter_horizontal`, i.e.
check that given field exists and is a ManyToManyField. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E019')
else:
if not isinstance(field, models.ManyToManyField):
return must_be('a ManyToManyField', option=label, obj=obj, id='admin.E020')
else:
return []
def _check_radio_fields(self, obj):
""" Check that `radio_fields` is a dictionary. """
if not hasattr(obj, 'radio_fields'):
return []
elif not isinstance(obj.radio_fields, dict):
return must_be('a dictionary', option='radio_fields', obj=obj, id='admin.E021')
else:
return list(chain(*[
self._check_radio_fields_key(obj, obj.model, field_name, 'radio_fields') +
self._check_radio_fields_value(obj, val, 'radio_fields["%s"]' % field_name)
for field_name, val in obj.radio_fields.items()
]))
def _check_radio_fields_key(self, obj, model, field_name, label):
""" Check that a key of `radio_fields` dictionary is name of existing
field and that the field is a ForeignKey or has `choices` defined. """
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E022')
else:
if not (isinstance(field, models.ForeignKey) or field.choices):
return [
checks.Error(
"The value of '%s' refers to '%s', which is not an "
"instance of ForeignKey, and does not have a 'choices' definition." % (
label, field_name
),
hint=None,
obj=obj.__class__,
id='admin.E023',
)
]
else:
return []
def _check_radio_fields_value(self, obj, val, label):
""" Check type of a value of `radio_fields` dictionary. """
from django.contrib.admin.options import HORIZONTAL, VERTICAL
if val not in (HORIZONTAL, VERTICAL):
return [
checks.Error(
"The value of '%s' must be either admin.HORIZONTAL or admin.VERTICAL." % label,
hint=None,
obj=obj.__class__,
id='admin.E024',
)
]
else:
return []
def _check_view_on_site_url(self, obj):
if hasattr(obj, 'view_on_site'):
if not callable(obj.view_on_site) and not isinstance(obj.view_on_site, bool):
return [
checks.Error(
"The value of 'view_on_site' must be a callable or a boolean value.",
hint=None,
obj=obj.__class__,
id='admin.E025',
)
]
else:
return []
else:
return []
def _check_prepopulated_fields(self, obj):
""" Check that `prepopulated_fields` is a dictionary containing allowed
field types. """
if not hasattr(obj, 'prepopulated_fields'):
return []
elif not isinstance(obj.prepopulated_fields, dict):
return must_be('a dictionary', option='prepopulated_fields', obj=obj, id='admin.E026')
else:
return list(chain(*[
self._check_prepopulated_fields_key(obj, obj.model, field_name, 'prepopulated_fields') +
self._check_prepopulated_fields_value(obj, obj.model, val, 'prepopulated_fields["%s"]' % field_name)
for field_name, val in obj.prepopulated_fields.items()
]))
def _check_prepopulated_fields_key(self, obj, model, field_name, label):
""" Check a key of `prepopulated_fields` dictionary, i.e. check that it
is a name of existing field and the field is one of the allowed types.
"""
forbidden_field_types = (
models.DateTimeField,
models.ForeignKey,
models.ManyToManyField
)
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E027')
else:
if isinstance(field, forbidden_field_types):
return [
checks.Error(
"The value of '%s' refers to '%s', which must not be a DateTimeField, "
"ForeignKey or ManyToManyField." % (
label, field_name
),
hint=None,
obj=obj.__class__,
id='admin.E028',
)
]
else:
return []
def _check_prepopulated_fields_value(self, obj, model, val, label):
""" Check a value of `prepopulated_fields` dictionary, i.e. it's an
iterable of existing fields. """
if not isinstance(val, (list, tuple)):
return must_be('a list or tuple', option=label, obj=obj, id='admin.E029')
else:
return list(chain(*[
self._check_prepopulated_fields_value_item(obj, model, subfield_name, "%s[%r]" % (label, index))
for index, subfield_name in enumerate(val)
]))
def _check_prepopulated_fields_value_item(self, obj, model, field_name, label):
""" For `prepopulated_fields` equal to {"slug": ("title",)},
`field_name` is "title". """
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E030')
else:
return []
def _check_ordering(self, obj):
""" Check that ordering refers to existing fields or is random. """
# ordering = None
if obj.ordering is None: # The default value is None
return []
elif not isinstance(obj.ordering, (list, tuple)):
return must_be('a list or tuple', option='ordering', obj=obj, id='admin.E031')
else:
return list(chain(*[
self._check_ordering_item(obj, obj.model, field_name, 'ordering[%d]' % index)
for index, field_name in enumerate(obj.ordering)
]))
def _check_ordering_item(self, obj, model, field_name, label):
""" Check that `ordering` refers to existing fields. """
if field_name == '?' and len(obj.ordering) != 1:
return [
checks.Error(
("The value of 'ordering' has the random ordering marker '?', "
"but contains other fields as well."),
hint='Either remove the "?", or remove the other fields.',
obj=obj.__class__,
id='admin.E032',
)
]
elif field_name == '?':
return []
elif '__' in field_name:
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
return []
else:
if field_name.startswith('-'):
field_name = field_name[1:]
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E033')
else:
return []
def _check_readonly_fields(self, obj):
""" Check that readonly_fields refers to proper attribute or field. """
if obj.readonly_fields == ():
return []
elif not isinstance(obj.readonly_fields, (list, tuple)):
return must_be('a list or tuple', option='readonly_fields', obj=obj, id='admin.E034')
else:
return list(chain(*[
self._check_readonly_fields_item(obj, obj.model, field_name, "readonly_fields[%d]" % index)
for index, field_name in enumerate(obj.readonly_fields)
]))
def _check_readonly_fields_item(self, obj, model, field_name, label):
if callable(field_name):
return []
elif hasattr(obj, field_name):
return []
elif hasattr(model, field_name):
return []
else:
try:
model._meta.get_field(field_name)
except FieldDoesNotExist:
return [
checks.Error(
"The value of '%s' is not a callable, an attribute of '%s', or an attribute of '%s.%s'." % (
label, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E035',
)
]
else:
return []
class ModelAdminChecks(BaseModelAdminChecks):
def check(self, admin_obj, **kwargs):
errors = super(ModelAdminChecks, self).check(admin_obj)
errors.extend(self._check_save_as(admin_obj))
errors.extend(self._check_save_on_top(admin_obj))
errors.extend(self._check_inlines(admin_obj))
errors.extend(self._check_list_display(admin_obj))
errors.extend(self._check_list_display_links(admin_obj))
errors.extend(self._check_list_filter(admin_obj))
errors.extend(self._check_list_select_related(admin_obj))
errors.extend(self._check_list_per_page(admin_obj))
errors.extend(self._check_list_max_show_all(admin_obj))
errors.extend(self._check_list_editable(admin_obj))
errors.extend(self._check_search_fields(admin_obj))
errors.extend(self._check_date_hierarchy(admin_obj))
return errors
def _check_save_as(self, obj):
""" Check save_as is a boolean. """
if not isinstance(obj.save_as, bool):
return must_be('a boolean', option='save_as',
obj=obj, id='admin.E101')
else:
return []
def _check_save_on_top(self, obj):
""" Check save_on_top is a boolean. """
if not isinstance(obj.save_on_top, bool):
return must_be('a boolean', option='save_on_top',
obj=obj, id='admin.E102')
else:
return []
def _check_inlines(self, obj):
""" Check all inline model admin classes. """
if not isinstance(obj.inlines, (list, tuple)):
return must_be('a list or tuple', option='inlines', obj=obj, id='admin.E103')
else:
return list(chain(*[
self._check_inlines_item(obj, obj.model, item, "inlines[%d]" % index)
for index, item in enumerate(obj.inlines)
]))
def _check_inlines_item(self, obj, model, inline, label):
""" Check one inline model admin. """
inline_label = '.'.join([inline.__module__, inline.__name__])
from django.contrib.admin.options import BaseModelAdmin
if not issubclass(inline, BaseModelAdmin):
return [
checks.Error(
"'%s' must inherit from 'BaseModelAdmin'." % inline_label,
hint=None,
obj=obj.__class__,
id='admin.E104',
)
]
elif not inline.model:
return [
checks.Error(
"'%s' must have a 'model' attribute." % inline_label,
hint=None,
obj=obj.__class__,
id='admin.E105',
)
]
elif not issubclass(inline.model, models.Model):
return must_be('a Model', option='%s.model' % inline_label,
obj=obj, id='admin.E106')
else:
return inline(model, obj.admin_site).check()
def _check_list_display(self, obj):
""" Check that list_display only contains fields or usable attributes.
"""
if not isinstance(obj.list_display, (list, tuple)):
return must_be('a list or tuple', option='list_display', obj=obj, id='admin.E107')
else:
return list(chain(*[
self._check_list_display_item(obj, obj.model, item, "list_display[%d]" % index)
for index, item in enumerate(obj.list_display)
]))
def _check_list_display_item(self, obj, model, item, label):
if callable(item):
return []
elif hasattr(obj, item):
return []
elif hasattr(model, item):
# getattr(model, item) could be an X_RelatedObjectsDescriptor
try:
field = model._meta.get_field(item)
except FieldDoesNotExist:
try:
field = getattr(model, item)
except AttributeError:
field = None
if field is None:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not a "
"callable, an attribute of '%s', or an attribute or method on '%s.%s'." % (
label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E108',
)
]
elif isinstance(field, models.ManyToManyField):
return [
checks.Error(
"The value of '%s' must not be a ManyToManyField." % label,
hint=None,
obj=obj.__class__,
id='admin.E109',
)
]
else:
return []
else:
try:
model._meta.get_field(item)
except FieldDoesNotExist:
return [
# This is a deliberate repeat of E108; there's more than one path
# required to test this condition.
checks.Error(
"The value of '%s' refers to '%s', which is not a callable, "
"an attribute of '%s', or an attribute or method on '%s.%s'." % (
label, item, obj.__class__.__name__, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E108',
)
]
else:
return []
def _check_list_display_links(self, obj):
""" Check that list_display_links is a unique subset of list_display.
"""
if obj.list_display_links is None:
return []
elif not isinstance(obj.list_display_links, (list, tuple)):
return must_be('a list, a tuple, or None', option='list_display_links', obj=obj, id='admin.E110')
else:
return list(chain(*[
self._check_list_display_links_item(obj, field_name, "list_display_links[%d]" % index)
for index, field_name in enumerate(obj.list_display_links)
]))
def _check_list_display_links_item(self, obj, field_name, label):
if field_name not in obj.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not defined in 'list_display'." % (
label, field_name
),
hint=None,
obj=obj.__class__,
id='admin.E111',
)
]
else:
return []
def _check_list_filter(self, obj):
if not isinstance(obj.list_filter, (list, tuple)):
return must_be('a list or tuple', option='list_filter', obj=obj, id='admin.E112')
else:
return list(chain(*[
self._check_list_filter_item(obj, obj.model, item, "list_filter[%d]" % index)
for index, item in enumerate(obj.list_filter)
]))
def _check_list_filter_item(self, obj, model, item, label):
"""
Check one item of `list_filter`, i.e. check if it is one of three options:
1. 'field' -- a basic field filter, possibly w/ relationships (e.g.
'field__rel')
2. ('field', SomeFieldListFilter) - a field-based list filter class
3. SomeListFilter - a non-field list filter class
"""
from django.contrib.admin import ListFilter, FieldListFilter
if callable(item) and not isinstance(item, models.Field):
# If item is option 3, it should be a ListFilter...
if not issubclass(item, ListFilter):
return must_inherit_from(parent='ListFilter', option=label,
obj=obj, id='admin.E113')
# ... but not a FieldListFilter.
elif issubclass(item, FieldListFilter):
return [
checks.Error(
"The value of '%s' must not inherit from 'FieldListFilter'." % label,
hint=None,
obj=obj.__class__,
id='admin.E114',
)
]
else:
return []
elif isinstance(item, (tuple, list)):
# item is option #2
field, list_filter_class = item
if not issubclass(list_filter_class, FieldListFilter):
return must_inherit_from(parent='FieldListFilter', option='%s[1]' % label,
obj=obj, id='admin.E115')
else:
return []
else:
# item is option #1
field = item
# Validate the field string
try:
get_fields_from_path(model, field)
except (NotRelationField, FieldDoesNotExist):
return [
checks.Error(
"The value of '%s' refers to '%s', which does not refer to a Field." % (label, field),
hint=None,
obj=obj.__class__,
id='admin.E116',
)
]
else:
return []
def _check_list_select_related(self, obj):
""" Check that list_select_related is a boolean, a list or a tuple. """
if not isinstance(obj.list_select_related, (bool, list, tuple)):
return must_be('a boolean, tuple or list', option='list_select_related',
obj=obj, id='admin.E117')
else:
return []
def _check_list_per_page(self, obj):
""" Check that list_per_page is an integer. """
if not isinstance(obj.list_per_page, int):
return must_be('an integer', option='list_per_page', obj=obj, id='admin.E118')
else:
return []
def _check_list_max_show_all(self, obj):
""" Check that list_max_show_all is an integer. """
if not isinstance(obj.list_max_show_all, int):
return must_be('an integer', option='list_max_show_all', obj=obj, id='admin.E119')
else:
return []
def _check_list_editable(self, obj):
""" Check that list_editable is a sequence of editable fields from
list_display without first element. """
if not isinstance(obj.list_editable, (list, tuple)):
return must_be('a list or tuple', option='list_editable', obj=obj, id='admin.E120')
else:
return list(chain(*[
self._check_list_editable_item(obj, obj.model, item, "list_editable[%d]" % index)
for index, item in enumerate(obj.list_editable)
]))
def _check_list_editable_item(self, obj, model, field_name, label):
try:
field = model._meta.get_field(field_name)
except FieldDoesNotExist:
return refer_to_missing_field(field=field_name, option=label,
model=model, obj=obj, id='admin.E121')
else:
if field_name not in obj.list_display:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not "
"contained in 'list_display'." % (label, field_name),
hint=None,
obj=obj.__class__,
id='admin.E122',
)
]
elif obj.list_display_links and field_name in obj.list_display_links:
return [
checks.Error(
"The value of '%s' cannot be in both 'list_editable' and 'list_display_links'." % field_name,
hint=None,
obj=obj.__class__,
id='admin.E123',
)
]
# Check that list_display_links is set, and that the first values of list_editable and list_display are
# not the same. See ticket #22792 for the use case relating to this.
elif (obj.list_display[0] in obj.list_editable and obj.list_display[0] != obj.list_editable[0] and
obj.list_display_links is not None):
return [
checks.Error(
"The value of '%s' refers to the first field in 'list_display' ('%s'), "
"which cannot be used unless 'list_display_links' is set." % (
label, obj.list_display[0]
),
hint=None,
obj=obj.__class__,
id='admin.E124',
)
]
elif not field.editable:
return [
checks.Error(
"The value of '%s' refers to '%s', which is not editable through the admin." % (
label, field_name
),
hint=None,
obj=obj.__class__,
id='admin.E125',
)
]
else:
return []
def _check_search_fields(self, obj):
""" Check search_fields is a sequence. """
if not isinstance(obj.search_fields, (list, tuple)):
return must_be('a list or tuple', option='search_fields', obj=obj, id='admin.E126')
else:
return []
def _check_date_hierarchy(self, obj):
""" Check that date_hierarchy refers to DateField or DateTimeField. """
if obj.date_hierarchy is None:
return []
else:
try:
field = obj.model._meta.get_field(obj.date_hierarchy)
except FieldDoesNotExist:
return refer_to_missing_field(option='date_hierarchy',
field=obj.date_hierarchy,
model=obj.model, obj=obj, id='admin.E127')
else:
if not isinstance(field, (models.DateField, models.DateTimeField)):
return must_be('a DateField or DateTimeField', option='date_hierarchy',
obj=obj, id='admin.E128')
else:
return []
class InlineModelAdminChecks(BaseModelAdminChecks):
def check(self, inline_obj, **kwargs):
errors = super(InlineModelAdminChecks, self).check(inline_obj)
parent_model = inline_obj.parent_model
errors.extend(self._check_relation(inline_obj, parent_model))
errors.extend(self._check_exclude_of_parent_model(inline_obj, parent_model))
errors.extend(self._check_extra(inline_obj))
errors.extend(self._check_max_num(inline_obj))
errors.extend(self._check_min_num(inline_obj))
errors.extend(self._check_formset(inline_obj))
return errors
def _check_exclude_of_parent_model(self, obj, parent_model):
# Do not perform more specific checks if the base checks result in an
# error.
errors = super(InlineModelAdminChecks, self)._check_exclude(obj)
if errors:
return []
# Skip if `fk_name` is invalid.
if self._check_relation(obj, parent_model):
return []
if obj.exclude is None:
return []
fk = _get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name)
if fk.name in obj.exclude:
return [
checks.Error(
"Cannot exclude the field '%s', because it is the foreign key "
"to the parent model '%s.%s'." % (
fk.name, parent_model._meta.app_label, parent_model._meta.object_name
),
hint=None,
obj=obj.__class__,
id='admin.E201',
)
]
else:
return []
def _check_relation(self, obj, parent_model):
try:
_get_foreign_key(parent_model, obj.model, fk_name=obj.fk_name)
except ValueError as e:
return [checks.Error(e.args[0], hint=None, obj=obj.__class__, id='admin.E202')]
else:
return []
def _check_extra(self, obj):
""" Check that extra is an integer. """
if not isinstance(obj.extra, int):
return must_be('an integer', option='extra', obj=obj, id='admin.E203')
else:
return []
def _check_max_num(self, obj):
""" Check that max_num is an integer. """
if obj.max_num is None:
return []
elif not isinstance(obj.max_num, int):
return must_be('an integer', option='max_num', obj=obj, id='admin.E204')
else:
return []
def _check_min_num(self, obj):
""" Check that min_num is an integer. """
if obj.min_num is None:
return []
elif not isinstance(obj.min_num, int):
return must_be('an integer', option='min_num', obj=obj, id='admin.E205')
else:
return []
def _check_formset(self, obj):
""" Check formset is a subclass of BaseModelFormSet. """
if not issubclass(obj.formset, BaseModelFormSet):
return must_inherit_from(parent='BaseModelFormSet', option='formset',
obj=obj, id='admin.E206')
else:
return []
def must_be(type, option, obj, id):
return [
checks.Error(
"The value of '%s' must be %s." % (option, type),
hint=None,
obj=obj.__class__,
id=id,
),
]
def must_inherit_from(parent, option, obj, id):
return [
checks.Error(
"The value of '%s' must inherit from '%s'." % (option, parent),
hint=None,
obj=obj.__class__,
id=id,
),
]
def refer_to_missing_field(field, option, model, obj, id):
return [
checks.Error(
"The value of '%s' refers to '%s', which is not an attribute of '%s.%s'." % (
option, field, model._meta.app_label, model._meta.object_name
),
hint=None,
obj=obj.__class__,
id=id,
),
]
|
alrusdi/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/django/conf/urls/i18n.py
|
383
|
from django.conf.urls.defaults import *
urlpatterns = patterns('',
(r'^setlang/$', 'django.views.i18n.set_language'),
)
|
boudewijnrempt/HyvesDesktop
|
refs/heads/master
|
3rdparty/socorro/scripts/startMonitor.py
|
1
|
#! /usr/bin/env python
import sys
import logging
import logging.handlers
try:
import config.monitorconfig as config
except ImportError:
import monitorconfig as config
import socorro.monitor.monitor as monitor
import socorro.lib.ConfigurationManager as configurationManager
try:
configurationContext = configurationManager.newConfiguration(configurationModule=config, applicationName="Socorro Monitor 2.0")
except configurationManager.NotAnOptionError, x:
print >>sys.stderr, x
print >>sys.stderr, "for usage, try --help"
sys.exit()
logger = logging.getLogger("monitor")
logger.setLevel(logging.DEBUG)
stderrLog = logging.StreamHandler()
stderrLog.setLevel(configurationContext.stderrErrorLoggingLevel)
stderrLogFormatter = logging.Formatter(configurationContext.stderrLineFormatString)
stderrLog.setFormatter(stderrLogFormatter)
logger.addHandler(stderrLog)
rotatingFileLog = logging.handlers.RotatingFileHandler(configurationContext.logFilePathname, "a", configurationContext.logFileMaximumSize, configurationContext.logFileMaximumBackupHistory)
rotatingFileLog.setLevel(configurationContext.logFileErrorLoggingLevel)
rotatingFileLogFormatter = logging.Formatter(configurationContext.logFileLineFormatString)
rotatingFileLog.setFormatter(rotatingFileLogFormatter)
logger.addHandler(rotatingFileLog)
logger.info("current configuration\n%s", str(configurationContext))
try:
while True:
m = monitor.Monitor(configurationContext)
m.start()
finally:
logger.info("done.")
|
willprice/weboob
|
refs/heads/master
|
modules/bred/test.py
|
7
|
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
class BredTest(BackendTest):
MODULE = 'bred'
def test_bred(self):
l = list(self.backend.iter_accounts())
a = l[0]
list(self.backend.iter_history(a))
list(self.backend.iter_coming(a))
|
tectronics/madcow
|
refs/heads/master
|
madcow/protocol/cli.py
|
8
|
# Copyright (C) 2007, 2008 Christopher Jones
#
# This file is part of Madcow.
#
# Madcow is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Madcow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Madcow. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import re
from madcow.util.shell import Shell
from madcow import Madcow
from madcow.util import Request
from madcow.util.text import *
COLOR_SCHEME = 'ansi'
class ConsoleProtocol(Madcow):
_new_nick = re.compile(r'^\s*nick\s+(\S+)\s*$', re.I)
_prompt = '\x1b[1;31m>>>\x1b[0m '
_clear = u'\x1b[H\x1b[J'
_cli_usage = [u'quit - quit madcow',
u'history - show history',
u'nick <nick> - change your nick',
u'clear - clear screen']
def __init__(self, base):
super(ConsoleProtocol, self).__init__(base)
self.user_nick = os.environ.get('USER', 'UNKNOWN')
self.shell = Shell(polls=[self.check_response_queue])
self.usage_lines += self._cli_usage
def run(self):
self.output(u"type 'help' for a list of commands")
while self.running:
self.check_response_queue()
try:
input = self.shell.readline(self._prompt)
except IOError:
# this happens when you get EINTR from SIGHUP handling
continue
input = decode(input, sys.stdin.encoding)
if input.lower() == u'quit':
break
if input.lower() == u'history':
print u'history: %s' % repr(self.shell.history)
continue
if input.lower() == u'clear':
sys.stdout.write(self._clear)
continue
if len(input) > 0:
req = Request(message=input)
req.nick = self.user_nick
req.channel = u'cli'
req.private = True
req.addressed = True
self.check_addressing(req)
if req.message.startswith(u'^'):
req.colorize = True
req.message = req.message[1:]
try:
self.user_nick = self._new_nick.search(req.message).group(1)
self.output(u'nick changed to: %s' % self.user_nick, req)
continue
except:
pass
self.process_message(req)
def protocol_output(self, message, req=None):
if req is not None and req.colorize is True:
message = self.colorlib.rainbow(message)
print encode(message, sys.stdout.encoding)
class ProtocolHandler(ConsoleProtocol):
allow_detach = False
|
jerli/sympy
|
refs/heads/master
|
sympy/simplify/trigsimp.py
|
3
|
from __future__ import print_function, division
from collections import defaultdict
from sympy.core.cache import cacheit
from sympy.core import (sympify, Basic, S, Expr, expand_mul, factor_terms,
Mul, Dummy, igcd, FunctionClass, Add, symbols, Wild, expand)
from sympy.core.compatibility import reduce, iterable
from sympy.core.numbers import I, Integer
from sympy.core.function import count_ops, _mexpand
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.functions.elementary.hyperbolic import HyperbolicFunction
from sympy.functions import sin, cos, exp, cosh, tanh, sinh, tan, cot, coth
from sympy.strategies.core import identity
from sympy.strategies.tree import greedy
from sympy.polys.polyerrors import PolificationFailed
from sympy.polys.polytools import groebner
from sympy.polys.domains import ZZ
from sympy.polys import factor, cancel, parallel_poly_from_expr
from sympy.utilities.misc import debug
def trigsimp_groebner(expr, hints=[], quick=False, order="grlex",
polynomial=False):
"""
Simplify trigonometric expressions using a groebner basis algorithm.
This routine takes a fraction involving trigonometric or hyperbolic
expressions, and tries to simplify it. The primary metric is the
total degree. Some attempts are made to choose the simplest possible
expression of the minimal degree, but this is non-rigorous, and also
very slow (see the ``quick=True`` option).
If ``polynomial`` is set to True, instead of simplifying numerator and
denominator together, this function just brings numerator and denominator
into a canonical form. This is much faster, but has potentially worse
results. However, if the input is a polynomial, then the result is
guaranteed to be an equivalent polynomial of minimal degree.
The most important option is hints. Its entries can be any of the
following:
- a natural number
- a function
- an iterable of the form (func, var1, var2, ...)
- anything else, interpreted as a generator
A number is used to indicate that the search space should be increased.
A function is used to indicate that said function is likely to occur in a
simplified expression.
An iterable is used indicate that func(var1 + var2 + ...) is likely to
occur in a simplified .
An additional generator also indicates that it is likely to occur.
(See examples below).
This routine carries out various computationally intensive algorithms.
The option ``quick=True`` can be used to suppress one particularly slow
step (at the expense of potentially more complicated results, but never at
the expense of increased total degree).
Examples
========
>>> from sympy.abc import x, y
>>> from sympy import sin, tan, cos, sinh, cosh, tanh
>>> from sympy.simplify.trigsimp import trigsimp_groebner
Suppose you want to simplify ``sin(x)*cos(x)``. Naively, nothing happens:
>>> ex = sin(x)*cos(x)
>>> trigsimp_groebner(ex)
sin(x)*cos(x)
This is because ``trigsimp_groebner`` only looks for a simplification
involving just ``sin(x)`` and ``cos(x)``. You can tell it to also try
``2*x`` by passing ``hints=[2]``:
>>> trigsimp_groebner(ex, hints=[2])
sin(2*x)/2
>>> trigsimp_groebner(sin(x)**2 - cos(x)**2, hints=[2])
-cos(2*x)
Increasing the search space this way can quickly become expensive. A much
faster way is to give a specific expression that is likely to occur:
>>> trigsimp_groebner(ex, hints=[sin(2*x)])
sin(2*x)/2
Hyperbolic expressions are similarly supported:
>>> trigsimp_groebner(sinh(2*x)/sinh(x))
2*cosh(x)
Note how no hints had to be passed, since the expression already involved
``2*x``.
The tangent function is also supported. You can either pass ``tan`` in the
hints, to indicate that than should be tried whenever cosine or sine are,
or you can pass a specific generator:
>>> trigsimp_groebner(sin(x)/cos(x), hints=[tan])
tan(x)
>>> trigsimp_groebner(sinh(x)/cosh(x), hints=[tanh(x)])
tanh(x)
Finally, you can use the iterable form to suggest that angle sum formulae
should be tried:
>>> ex = (tan(x) + tan(y))/(1 - tan(x)*tan(y))
>>> trigsimp_groebner(ex, hints=[(tan, x, y)])
tan(x + y)
"""
# TODO
# - preprocess by replacing everything by funcs we can handle
# - optionally use cot instead of tan
# - more intelligent hinting.
# For example, if the ideal is small, and we have sin(x), sin(y),
# add sin(x + y) automatically... ?
# - algebraic numbers ...
# - expressions of lowest degree are not distinguished properly
# e.g. 1 - sin(x)**2
# - we could try to order the generators intelligently, so as to influence
# which monomials appear in the quotient basis
# THEORY
# ------
# Ratsimpmodprime above can be used to "simplify" a rational function
# modulo a prime ideal. "Simplify" mainly means finding an equivalent
# expression of lower total degree.
#
# We intend to use this to simplify trigonometric functions. To do that,
# we need to decide (a) which ring to use, and (b) modulo which ideal to
# simplify. In practice, (a) means settling on a list of "generators"
# a, b, c, ..., such that the fraction we want to simplify is a rational
# function in a, b, c, ..., with coefficients in ZZ (integers).
# (2) means that we have to decide what relations to impose on the
# generators. There are two practical problems:
# (1) The ideal has to be *prime* (a technical term).
# (2) The relations have to be polynomials in the generators.
#
# We typically have two kinds of generators:
# - trigonometric expressions, like sin(x), cos(5*x), etc
# - "everything else", like gamma(x), pi, etc.
#
# Since this function is trigsimp, we will concentrate on what to do with
# trigonometric expressions. We can also simplify hyperbolic expressions,
# but the extensions should be clear.
#
# One crucial point is that all *other* generators really should behave
# like indeterminates. In particular if (say) "I" is one of them, then
# in fact I**2 + 1 = 0 and we may and will compute non-sensical
# expressions. However, we can work with a dummy and add the relation
# I**2 + 1 = 0 to our ideal, then substitute back in the end.
#
# Now regarding trigonometric generators. We split them into groups,
# according to the argument of the trigonometric functions. We want to
# organise this in such a way that most trigonometric identities apply in
# the same group. For example, given sin(x), cos(2*x) and cos(y), we would
# group as [sin(x), cos(2*x)] and [cos(y)].
#
# Our prime ideal will be built in three steps:
# (1) For each group, compute a "geometrically prime" ideal of relations.
# Geometrically prime means that it generates a prime ideal in
# CC[gens], not just ZZ[gens].
# (2) Take the union of all the generators of the ideals for all groups.
# By the geometric primality condition, this is still prime.
# (3) Add further inter-group relations which preserve primality.
#
# Step (1) works as follows. We will isolate common factors in the
# argument, so that all our generators are of the form sin(n*x), cos(n*x)
# or tan(n*x), with n an integer. Suppose first there are no tan terms.
# The ideal [sin(x)**2 + cos(x)**2 - 1] is geometrically prime, since
# X**2 + Y**2 - 1 is irreducible over CC.
# Now, if we have a generator sin(n*x), than we can, using trig identities,
# express sin(n*x) as a polynomial in sin(x) and cos(x). We can add this
# relation to the ideal, preserving geometric primality, since the quotient
# ring is unchanged.
# Thus we have treated all sin and cos terms.
# For tan(n*x), we add a relation tan(n*x)*cos(n*x) - sin(n*x) = 0.
# (This requires of course that we already have relations for cos(n*x) and
# sin(n*x).) It is not obvious, but it seems that this preserves geometric
# primality.
# XXX A real proof would be nice. HELP!
# Sketch that <S**2 + C**2 - 1, C*T - S> is a prime ideal of
# CC[S, C, T]:
# - it suffices to show that the projective closure in CP**3 is
# irreducible
# - using the half-angle substitutions, we can express sin(x), tan(x),
# cos(x) as rational functions in tan(x/2)
# - from this, we get a rational map from CP**1 to our curve
# - this is a morphism, hence the curve is prime
#
# Step (2) is trivial.
#
# Step (3) works by adding selected relations of the form
# sin(x + y) - sin(x)*cos(y) - sin(y)*cos(x), etc. Geometric primality is
# preserved by the same argument as before.
def parse_hints(hints):
"""Split hints into (n, funcs, iterables, gens)."""
n = 1
funcs, iterables, gens = [], [], []
for e in hints:
if isinstance(e, (int, Integer)):
n = e
elif isinstance(e, FunctionClass):
funcs.append(e)
elif iterable(e):
iterables.append((e[0], e[1:]))
# XXX sin(x+2y)?
# Note: we go through polys so e.g.
# sin(-x) -> -sin(x) -> sin(x)
gens.extend(parallel_poly_from_expr(
[e[0](x) for x in e[1:]] + [e[0](Add(*e[1:]))])[1].gens)
else:
gens.append(e)
return n, funcs, iterables, gens
def build_ideal(x, terms):
"""
Build generators for our ideal. Terms is an iterable with elements of
the form (fn, coeff), indicating that we have a generator fn(coeff*x).
If any of the terms is trigonometric, sin(x) and cos(x) are guaranteed
to appear in terms. Similarly for hyperbolic functions. For tan(n*x),
sin(n*x) and cos(n*x) are guaranteed.
"""
gens = []
I = []
y = Dummy('y')
for fn, coeff in terms:
for c, s, t, rel in (
[cos, sin, tan, cos(x)**2 + sin(x)**2 - 1],
[cosh, sinh, tanh, cosh(x)**2 - sinh(x)**2 - 1]):
if coeff == 1 and fn in [c, s]:
I.append(rel)
elif fn == t:
I.append(t(coeff*x)*c(coeff*x) - s(coeff*x))
elif fn in [c, s]:
cn = fn(coeff*y).expand(trig=True).subs(y, x)
I.append(fn(coeff*x) - cn)
return list(set(I))
def analyse_gens(gens, hints):
"""
Analyse the generators ``gens``, using the hints ``hints``.
The meaning of ``hints`` is described in the main docstring.
Return a new list of generators, and also the ideal we should
work with.
"""
# First parse the hints
n, funcs, iterables, extragens = parse_hints(hints)
debug('n=%s' % n, 'funcs:', funcs, 'iterables:',
iterables, 'extragens:', extragens)
# We just add the extragens to gens and analyse them as before
gens = list(gens)
gens.extend(extragens)
# remove duplicates
funcs = list(set(funcs))
iterables = list(set(iterables))
gens = list(set(gens))
# all the functions we can do anything with
allfuncs = set([sin, cos, tan, sinh, cosh, tanh])
# sin(3*x) -> ((3, x), sin)
trigterms = [(g.args[0].as_coeff_mul(), g.func) for g in gens
if g.func in allfuncs]
# Our list of new generators - start with anything that we cannot
# work with (i.e. is not a trigonometric term)
freegens = [g for g in gens if g.func not in allfuncs]
newgens = []
trigdict = {}
for (coeff, var), fn in trigterms:
trigdict.setdefault(var, []).append((coeff, fn))
res = [] # the ideal
for key, val in trigdict.items():
# We have now assembeled a dictionary. Its keys are common
# arguments in trigonometric expressions, and values are lists of
# pairs (fn, coeff). x0, (fn, coeff) in trigdict means that we
# need to deal with fn(coeff*x0). We take the rational gcd of the
# coeffs, call it ``gcd``. We then use x = x0/gcd as "base symbol",
# all other arguments are integral multiples thereof.
# We will build an ideal which works with sin(x), cos(x).
# If hint tan is provided, also work with tan(x). Moreover, if
# n > 1, also work with sin(k*x) for k <= n, and similarly for cos
# (and tan if the hint is provided). Finally, any generators which
# the ideal does not work with but we need to accomodate (either
# because it was in expr or because it was provided as a hint)
# we also build into the ideal.
# This selection process is expressed in the list ``terms``.
# build_ideal then generates the actual relations in our ideal,
# from this list.
fns = [x[1] for x in val]
val = [x[0] for x in val]
gcd = reduce(igcd, val)
terms = [(fn, v/gcd) for (fn, v) in zip(fns, val)]
fs = set(funcs + fns)
for c, s, t in ([cos, sin, tan], [cosh, sinh, tanh]):
if any(x in fs for x in (c, s, t)):
fs.add(c)
fs.add(s)
for fn in fs:
for k in range(1, n + 1):
terms.append((fn, k))
extra = []
for fn, v in terms:
if fn == tan:
extra.append((sin, v))
extra.append((cos, v))
if fn in [sin, cos] and tan in fs:
extra.append((tan, v))
if fn == tanh:
extra.append((sinh, v))
extra.append((cosh, v))
if fn in [sinh, cosh] and tanh in fs:
extra.append((tanh, v))
terms.extend(extra)
x = gcd*Mul(*key)
r = build_ideal(x, terms)
res.extend(r)
newgens.extend(set(fn(v*x) for fn, v in terms))
# Add generators for compound expressions from iterables
for fn, args in iterables:
if fn == tan:
# Tan expressions are recovered from sin and cos.
iterables.extend([(sin, args), (cos, args)])
elif fn == tanh:
# Tanh expressions are recovered from sihn and cosh.
iterables.extend([(sinh, args), (cosh, args)])
else:
dummys = symbols('d:%i' % len(args), cls=Dummy)
expr = fn( Add(*dummys)).expand(trig=True).subs(list(zip(dummys, args)))
res.append(fn(Add(*args)) - expr)
if myI in gens:
res.append(myI**2 + 1)
freegens.remove(myI)
newgens.append(myI)
return res, freegens, newgens
myI = Dummy('I')
expr = expr.subs(S.ImaginaryUnit, myI)
subs = [(myI, S.ImaginaryUnit)]
num, denom = cancel(expr).as_numer_denom()
try:
(pnum, pdenom), opt = parallel_poly_from_expr([num, denom])
except PolificationFailed:
return expr
debug('initial gens:', opt.gens)
ideal, freegens, gens = analyse_gens(opt.gens, hints)
debug('ideal:', ideal)
debug('new gens:', gens, " -- len", len(gens))
debug('free gens:', freegens, " -- len", len(gens))
# NOTE we force the domain to be ZZ to stop polys from injecting generators
# (which is usually a sign of a bug in the way we build the ideal)
if not gens:
return expr
G = groebner(ideal, order=order, gens=gens, domain=ZZ)
debug('groebner basis:', list(G), " -- len", len(G))
# If our fraction is a polynomial in the free generators, simplify all
# coefficients separately:
from sympy.simplify.ratsimp import ratsimpmodprime
if freegens and pdenom.has_only_gens(*set(gens).intersection(pdenom.gens)):
num = Poly(num, gens=gens+freegens).eject(*gens)
res = []
for monom, coeff in num.terms():
ourgens = set(parallel_poly_from_expr([coeff, denom])[1].gens)
# We compute the transitive closure of all generators that can
# be reached from our generators through relations in the ideal.
changed = True
while changed:
changed = False
for p in ideal:
p = Poly(p)
if not ourgens.issuperset(p.gens) and \
not p.has_only_gens(*set(p.gens).difference(ourgens)):
changed = True
ourgens.update(p.exclude().gens)
# NOTE preserve order!
realgens = [x for x in gens if x in ourgens]
# The generators of the ideal have now been (implicitely) split
# into two groups: those involving ourgens and those that don't.
# Since we took the transitive closure above, these two groups
# live in subgrings generated by a *disjoint* set of variables.
# Any sensible groebner basis algorithm will preserve this disjoint
# structure (i.e. the elements of the groebner basis can be split
# similarly), and and the two subsets of the groebner basis then
# form groebner bases by themselves. (For the smaller generating
# sets, of course.)
ourG = [g.as_expr() for g in G.polys if
g.has_only_gens(*ourgens.intersection(g.gens))]
res.append(Mul(*[a**b for a, b in zip(freegens, monom)]) * \
ratsimpmodprime(coeff/denom, ourG, order=order,
gens=realgens, quick=quick, domain=ZZ,
polynomial=polynomial).subs(subs))
return Add(*res)
# NOTE The following is simpler and has less assumptions on the
# groebner basis algorithm. If the above turns out to be broken,
# use this.
return Add(*[Mul(*[a**b for a, b in zip(freegens, monom)]) * \
ratsimpmodprime(coeff/denom, list(G), order=order,
gens=gens, quick=quick, domain=ZZ)
for monom, coeff in num.terms()])
else:
return ratsimpmodprime(
expr, list(G), order=order, gens=freegens+gens,
quick=quick, domain=ZZ, polynomial=polynomial).subs(subs)
_trigs = (TrigonometricFunction, HyperbolicFunction)
def trigsimp(expr, **opts):
"""
reduces expression by using known trig identities
Notes
=====
method:
- Determine the method to use. Valid choices are 'matching' (default),
'groebner', 'combined', and 'fu'. If 'matching', simplify the
expression recursively by targeting common patterns. If 'groebner', apply
an experimental groebner basis algorithm. In this case further options
are forwarded to ``trigsimp_groebner``, please refer to its docstring.
If 'combined', first run the groebner basis algorithm with small
default parameters, then run the 'matching' algorithm. 'fu' runs the
collection of trigonometric transformations described by Fu, et al.
(see the `fu` docstring).
Examples
========
>>> from sympy import trigsimp, sin, cos, log
>>> from sympy.abc import x, y
>>> e = 2*sin(x)**2 + 2*cos(x)**2
>>> trigsimp(e)
2
Simplification occurs wherever trigonometric functions are located.
>>> trigsimp(log(e))
log(2)
Using `method="groebner"` (or `"combined"`) might lead to greater
simplification.
The old trigsimp routine can be accessed as with method 'old'.
>>> from sympy import coth, tanh
>>> t = 3*tanh(x)**7 - 2/coth(x)**7
>>> trigsimp(t, method='old') == t
True
>>> trigsimp(t)
tanh(x)**7
"""
from sympy.simplify.fu import fu
expr = sympify(expr)
try:
return expr._eval_trigsimp(**opts)
except AttributeError:
pass
old = opts.pop('old', False)
if not old:
opts.pop('deep', None)
recursive = opts.pop('recursive', None)
method = opts.pop('method', 'matching')
else:
method = 'old'
def groebnersimp(ex, **opts):
def traverse(e):
if e.is_Atom:
return e
args = [traverse(x) for x in e.args]
if e.is_Function or e.is_Pow:
args = [trigsimp_groebner(x, **opts) for x in args]
return e.func(*args)
new = traverse(ex)
if not isinstance(new, Expr):
return new
return trigsimp_groebner(new, **opts)
trigsimpfunc = {
'fu': (lambda x: fu(x, **opts)),
'matching': (lambda x: futrig(x)),
'groebner': (lambda x: groebnersimp(x, **opts)),
'combined': (lambda x: futrig(groebnersimp(x,
polynomial=True, hints=[2, tan]))),
'old': lambda x: trigsimp_old(x, **opts),
}[method]
return trigsimpfunc(expr)
def exptrigsimp(expr, simplify=True):
"""
Simplifies exponential / trigonometric / hyperbolic functions.
When ``simplify`` is True (default) the expression obtained after the
simplification step will be then be passed through simplify to
precondition it so the final transformations will be applied.
Examples
========
>>> from sympy import exptrigsimp, exp, cosh, sinh
>>> from sympy.abc import z
>>> exptrigsimp(exp(z) + exp(-z))
2*cosh(z)
>>> exptrigsimp(cosh(z) - sinh(z))
exp(-z)
"""
from sympy.simplify.fu import hyper_as_trig, TR2i
from sympy.simplify.simplify import bottom_up
def exp_trig(e):
# select the better of e, and e rewritten in terms of exp or trig
# functions
choices = [e]
if e.has(*_trigs):
choices.append(e.rewrite(exp))
choices.append(e.rewrite(cos))
return min(*choices, key=count_ops)
newexpr = bottom_up(expr, exp_trig)
if simplify:
newexpr = newexpr.simplify()
# conversion from exp to hyperbolic
ex = newexpr.atoms(exp, S.Exp1)
ex = [ei for ei in ex if 1/ei not in ex]
## sinh and cosh
for ei in ex:
e2 = ei**-2
if e2 in ex:
a = e2.args[0]/2 if not e2 is S.Exp1 else S.Half
newexpr = newexpr.subs((e2 + 1)*ei, 2*cosh(a))
newexpr = newexpr.subs((e2 - 1)*ei, 2*sinh(a))
## exp ratios to tan and tanh
for ei in ex:
n, d = ei - 1, ei + 1
et = n/d
etinv = d/n # not 1/et or else recursion errors arise
a = ei.args[0] if ei.func is exp else S.One
if a.is_Mul or a is S.ImaginaryUnit:
c = a.as_coefficient(I)
if c:
t = S.ImaginaryUnit*tan(c/2)
newexpr = newexpr.subs(etinv, 1/t)
newexpr = newexpr.subs(et, t)
continue
t = tanh(a/2)
newexpr = newexpr.subs(etinv, 1/t)
newexpr = newexpr.subs(et, t)
# sin/cos and sinh/cosh ratios to tan and tanh, respectively
if newexpr.has(HyperbolicFunction):
e, f = hyper_as_trig(newexpr)
newexpr = f(TR2i(e))
if newexpr.has(TrigonometricFunction):
newexpr = TR2i(newexpr)
# can we ever generate an I where there was none previously?
if not (newexpr.has(I) and not expr.has(I)):
expr = newexpr
return expr
#-------------------- the old trigsimp routines ---------------------
def trigsimp_old(expr, **opts):
"""
reduces expression by using known trig identities
Notes
=====
deep:
- Apply trigsimp inside all objects with arguments
recursive:
- Use common subexpression elimination (cse()) and apply
trigsimp recursively (this is quite expensive if the
expression is large)
method:
- Determine the method to use. Valid choices are 'matching' (default),
'groebner', 'combined', 'fu' and 'futrig'. If 'matching', simplify the
expression recursively by pattern matching. If 'groebner', apply an
experimental groebner basis algorithm. In this case further options
are forwarded to ``trigsimp_groebner``, please refer to its docstring.
If 'combined', first run the groebner basis algorithm with small
default parameters, then run the 'matching' algorithm. 'fu' runs the
collection of trigonometric transformations described by Fu, et al.
(see the `fu` docstring) while `futrig` runs a subset of Fu-transforms
that mimic the behavior of `trigsimp`.
compare:
- show input and output from `trigsimp` and `futrig` when different,
but returns the `trigsimp` value.
Examples
========
>>> from sympy import trigsimp, sin, cos, log, cosh, sinh, tan, cot
>>> from sympy.abc import x, y
>>> e = 2*sin(x)**2 + 2*cos(x)**2
>>> trigsimp(e, old=True)
2
>>> trigsimp(log(e), old=True)
log(2*sin(x)**2 + 2*cos(x)**2)
>>> trigsimp(log(e), deep=True, old=True)
log(2)
Using `method="groebner"` (or `"combined"`) can sometimes lead to a lot
more simplification:
>>> e = (-sin(x) + 1)/cos(x) + cos(x)/(-sin(x) + 1)
>>> trigsimp(e, old=True)
(-sin(x) + 1)/cos(x) + cos(x)/(-sin(x) + 1)
>>> trigsimp(e, method="groebner", old=True)
2/cos(x)
>>> trigsimp(1/cot(x)**2, compare=True, old=True)
futrig: tan(x)**2
cot(x)**(-2)
"""
old = expr
first = opts.pop('first', True)
if first:
if not expr.has(*_trigs):
return expr
trigsyms = set().union(*[t.free_symbols for t in expr.atoms(*_trigs)])
if len(trigsyms) > 1:
d = separatevars(expr)
if d.is_Mul:
d = separatevars(d, dict=True) or d
if isinstance(d, dict):
expr = 1
for k, v in d.items():
# remove hollow factoring
was = v
v = expand_mul(v)
opts['first'] = False
vnew = trigsimp(v, **opts)
if vnew == v:
vnew = was
expr *= vnew
old = expr
else:
if d.is_Add:
for s in trigsyms:
r, e = expr.as_independent(s)
if r:
opts['first'] = False
expr = r + trigsimp(e, **opts)
if not expr.is_Add:
break
old = expr
recursive = opts.pop('recursive', False)
deep = opts.pop('deep', False)
method = opts.pop('method', 'matching')
def groebnersimp(ex, deep, **opts):
def traverse(e):
if e.is_Atom:
return e
args = [traverse(x) for x in e.args]
if e.is_Function or e.is_Pow:
args = [trigsimp_groebner(x, **opts) for x in args]
return e.func(*args)
if deep:
ex = traverse(ex)
return trigsimp_groebner(ex, **opts)
trigsimpfunc = {
'matching': (lambda x, d: _trigsimp(x, d)),
'groebner': (lambda x, d: groebnersimp(x, d, **opts)),
'combined': (lambda x, d: _trigsimp(groebnersimp(x,
d, polynomial=True, hints=[2, tan]),
d))
}[method]
if recursive:
w, g = cse(expr)
g = trigsimpfunc(g[0], deep)
for sub in reversed(w):
g = g.subs(sub[0], sub[1])
g = trigsimpfunc(g, deep)
result = g
else:
result = trigsimpfunc(expr, deep)
if opts.get('compare', False):
f = futrig(old)
if f != result:
print('\tfutrig:', f)
return result
def _dotrig(a, b):
"""Helper to tell whether ``a`` and ``b`` have the same sorts
of symbols in them -- no need to test hyperbolic patterns against
expressions that have no hyperbolics in them."""
return a.func == b.func and (
a.has(TrigonometricFunction) and b.has(TrigonometricFunction) or
a.has(HyperbolicFunction) and b.has(HyperbolicFunction))
_trigpat = None
def _trigpats():
global _trigpat
a, b, c = symbols('a b c', cls=Wild)
d = Wild('d', commutative=False)
# for the simplifications like sinh/cosh -> tanh:
# DO NOT REORDER THE FIRST 14 since these are assumed to be in this
# order in _match_div_rewrite.
matchers_division = (
(a*sin(b)**c/cos(b)**c, a*tan(b)**c, sin(b), cos(b)),
(a*tan(b)**c*cos(b)**c, a*sin(b)**c, sin(b), cos(b)),
(a*cot(b)**c*sin(b)**c, a*cos(b)**c, sin(b), cos(b)),
(a*tan(b)**c/sin(b)**c, a/cos(b)**c, sin(b), cos(b)),
(a*cot(b)**c/cos(b)**c, a/sin(b)**c, sin(b), cos(b)),
(a*cot(b)**c*tan(b)**c, a, sin(b), cos(b)),
(a*(cos(b) + 1)**c*(cos(b) - 1)**c,
a*(-sin(b)**2)**c, cos(b) + 1, cos(b) - 1),
(a*(sin(b) + 1)**c*(sin(b) - 1)**c,
a*(-cos(b)**2)**c, sin(b) + 1, sin(b) - 1),
(a*sinh(b)**c/cosh(b)**c, a*tanh(b)**c, S.One, S.One),
(a*tanh(b)**c*cosh(b)**c, a*sinh(b)**c, S.One, S.One),
(a*coth(b)**c*sinh(b)**c, a*cosh(b)**c, S.One, S.One),
(a*tanh(b)**c/sinh(b)**c, a/cosh(b)**c, S.One, S.One),
(a*coth(b)**c/cosh(b)**c, a/sinh(b)**c, S.One, S.One),
(a*coth(b)**c*tanh(b)**c, a, S.One, S.One),
(c*(tanh(a) + tanh(b))/(1 + tanh(a)*tanh(b)),
tanh(a + b)*c, S.One, S.One),
)
matchers_add = (
(c*sin(a)*cos(b) + c*cos(a)*sin(b) + d, sin(a + b)*c + d),
(c*cos(a)*cos(b) - c*sin(a)*sin(b) + d, cos(a + b)*c + d),
(c*sin(a)*cos(b) - c*cos(a)*sin(b) + d, sin(a - b)*c + d),
(c*cos(a)*cos(b) + c*sin(a)*sin(b) + d, cos(a - b)*c + d),
(c*sinh(a)*cosh(b) + c*sinh(b)*cosh(a) + d, sinh(a + b)*c + d),
(c*cosh(a)*cosh(b) + c*sinh(a)*sinh(b) + d, cosh(a + b)*c + d),
)
# for cos(x)**2 + sin(x)**2 -> 1
matchers_identity = (
(a*sin(b)**2, a - a*cos(b)**2),
(a*tan(b)**2, a*(1/cos(b))**2 - a),
(a*cot(b)**2, a*(1/sin(b))**2 - a),
(a*sin(b + c), a*(sin(b)*cos(c) + sin(c)*cos(b))),
(a*cos(b + c), a*(cos(b)*cos(c) - sin(b)*sin(c))),
(a*tan(b + c), a*((tan(b) + tan(c))/(1 - tan(b)*tan(c)))),
(a*sinh(b)**2, a*cosh(b)**2 - a),
(a*tanh(b)**2, a - a*(1/cosh(b))**2),
(a*coth(b)**2, a + a*(1/sinh(b))**2),
(a*sinh(b + c), a*(sinh(b)*cosh(c) + sinh(c)*cosh(b))),
(a*cosh(b + c), a*(cosh(b)*cosh(c) + sinh(b)*sinh(c))),
(a*tanh(b + c), a*((tanh(b) + tanh(c))/(1 + tanh(b)*tanh(c)))),
)
# Reduce any lingering artifacts, such as sin(x)**2 changing
# to 1-cos(x)**2 when sin(x)**2 was "simpler"
artifacts = (
(a - a*cos(b)**2 + c, a*sin(b)**2 + c, cos),
(a - a*(1/cos(b))**2 + c, -a*tan(b)**2 + c, cos),
(a - a*(1/sin(b))**2 + c, -a*cot(b)**2 + c, sin),
(a - a*cosh(b)**2 + c, -a*sinh(b)**2 + c, cosh),
(a - a*(1/cosh(b))**2 + c, a*tanh(b)**2 + c, cosh),
(a + a*(1/sinh(b))**2 + c, a*coth(b)**2 + c, sinh),
# same as above but with noncommutative prefactor
(a*d - a*d*cos(b)**2 + c, a*d*sin(b)**2 + c, cos),
(a*d - a*d*(1/cos(b))**2 + c, -a*d*tan(b)**2 + c, cos),
(a*d - a*d*(1/sin(b))**2 + c, -a*d*cot(b)**2 + c, sin),
(a*d - a*d*cosh(b)**2 + c, -a*d*sinh(b)**2 + c, cosh),
(a*d - a*d*(1/cosh(b))**2 + c, a*d*tanh(b)**2 + c, cosh),
(a*d + a*d*(1/sinh(b))**2 + c, a*d*coth(b)**2 + c, sinh),
)
_trigpat = (a, b, c, d, matchers_division, matchers_add,
matchers_identity, artifacts)
return _trigpat
def _replace_mul_fpowxgpow(expr, f, g, rexp, h, rexph):
"""Helper for _match_div_rewrite.
Replace f(b_)**c_*g(b_)**(rexp(c_)) with h(b)**rexph(c) if f(b_)
and g(b_) are both positive or if c_ is an integer.
"""
# assert expr.is_Mul and expr.is_commutative and f != g
fargs = defaultdict(int)
gargs = defaultdict(int)
args = []
for x in expr.args:
if x.is_Pow or x.func in (f, g):
b, e = x.as_base_exp()
if b.is_positive or e.is_integer:
if b.func == f:
fargs[b.args[0]] += e
continue
elif b.func == g:
gargs[b.args[0]] += e
continue
args.append(x)
common = set(fargs) & set(gargs)
hit = False
while common:
key = common.pop()
fe = fargs.pop(key)
ge = gargs.pop(key)
if fe == rexp(ge):
args.append(h(key)**rexph(fe))
hit = True
else:
fargs[key] = fe
gargs[key] = ge
if not hit:
return expr
while fargs:
key, e = fargs.popitem()
args.append(f(key)**e)
while gargs:
key, e = gargs.popitem()
args.append(g(key)**e)
return Mul(*args)
_idn = lambda x: x
_midn = lambda x: -x
_one = lambda x: S.One
def _match_div_rewrite(expr, i):
"""helper for __trigsimp"""
if i == 0:
expr = _replace_mul_fpowxgpow(expr, sin, cos,
_midn, tan, _idn)
elif i == 1:
expr = _replace_mul_fpowxgpow(expr, tan, cos,
_idn, sin, _idn)
elif i == 2:
expr = _replace_mul_fpowxgpow(expr, cot, sin,
_idn, cos, _idn)
elif i == 3:
expr = _replace_mul_fpowxgpow(expr, tan, sin,
_midn, cos, _midn)
elif i == 4:
expr = _replace_mul_fpowxgpow(expr, cot, cos,
_midn, sin, _midn)
elif i == 5:
expr = _replace_mul_fpowxgpow(expr, cot, tan,
_idn, _one, _idn)
# i in (6, 7) is skipped
elif i == 8:
expr = _replace_mul_fpowxgpow(expr, sinh, cosh,
_midn, tanh, _idn)
elif i == 9:
expr = _replace_mul_fpowxgpow(expr, tanh, cosh,
_idn, sinh, _idn)
elif i == 10:
expr = _replace_mul_fpowxgpow(expr, coth, sinh,
_idn, cosh, _idn)
elif i == 11:
expr = _replace_mul_fpowxgpow(expr, tanh, sinh,
_midn, cosh, _midn)
elif i == 12:
expr = _replace_mul_fpowxgpow(expr, coth, cosh,
_midn, sinh, _midn)
elif i == 13:
expr = _replace_mul_fpowxgpow(expr, coth, tanh,
_idn, _one, _idn)
else:
return None
return expr
def _trigsimp(expr, deep=False):
# protect the cache from non-trig patterns; we only allow
# trig patterns to enter the cache
if expr.has(*_trigs):
return __trigsimp(expr, deep)
return expr
@cacheit
def __trigsimp(expr, deep=False):
"""recursive helper for trigsimp"""
from sympy.simplify.fu import TR10i
if _trigpat is None:
_trigpats()
a, b, c, d, matchers_division, matchers_add, \
matchers_identity, artifacts = _trigpat
if expr.is_Mul:
# do some simplifications like sin/cos -> tan:
if not expr.is_commutative:
com, nc = expr.args_cnc()
expr = _trigsimp(Mul._from_args(com), deep)*Mul._from_args(nc)
else:
for i, (pattern, simp, ok1, ok2) in enumerate(matchers_division):
if not _dotrig(expr, pattern):
continue
newexpr = _match_div_rewrite(expr, i)
if newexpr is not None:
if newexpr != expr:
expr = newexpr
break
else:
continue
# use SymPy matching instead
res = expr.match(pattern)
if res and res.get(c, 0):
if not res[c].is_integer:
ok = ok1.subs(res)
if not ok.is_positive:
continue
ok = ok2.subs(res)
if not ok.is_positive:
continue
# if "a" contains any of trig or hyperbolic funcs with
# argument "b" then skip the simplification
if any(w.args[0] == res[b] for w in res[a].atoms(
TrigonometricFunction, HyperbolicFunction)):
continue
# simplify and finish:
expr = simp.subs(res)
break # process below
if expr.is_Add:
args = []
for term in expr.args:
if not term.is_commutative:
com, nc = term.args_cnc()
nc = Mul._from_args(nc)
term = Mul._from_args(com)
else:
nc = S.One
term = _trigsimp(term, deep)
for pattern, result in matchers_identity:
res = term.match(pattern)
if res is not None:
term = result.subs(res)
break
args.append(term*nc)
if args != expr.args:
expr = Add(*args)
expr = min(expr, expand(expr), key=count_ops)
if expr.is_Add:
for pattern, result in matchers_add:
if not _dotrig(expr, pattern):
continue
expr = TR10i(expr)
if expr.has(HyperbolicFunction):
res = expr.match(pattern)
# if "d" contains any trig or hyperbolic funcs with
# argument "a" or "b" then skip the simplification;
# this isn't perfect -- see tests
if res is None or not (a in res and b in res) or any(
w.args[0] in (res[a], res[b]) for w in res[d].atoms(
TrigonometricFunction, HyperbolicFunction)):
continue
expr = result.subs(res)
break
# Reduce any lingering artifacts, such as sin(x)**2 changing
# to 1 - cos(x)**2 when sin(x)**2 was "simpler"
for pattern, result, ex in artifacts:
if not _dotrig(expr, pattern):
continue
# Substitute a new wild that excludes some function(s)
# to help influence a better match. This is because
# sometimes, for example, 'a' would match sec(x)**2
a_t = Wild('a', exclude=[ex])
pattern = pattern.subs(a, a_t)
result = result.subs(a, a_t)
m = expr.match(pattern)
was = None
while m and was != expr:
was = expr
if m[a_t] == 0 or \
-m[a_t] in m[c].args or m[a_t] + m[c] == 0:
break
if d in m and m[a_t]*m[d] + m[c] == 0:
break
expr = result.subs(m)
m = expr.match(pattern)
m.setdefault(c, S.Zero)
elif expr.is_Mul or expr.is_Pow or deep and expr.args:
expr = expr.func(*[_trigsimp(a, deep) for a in expr.args])
try:
if not expr.has(*_trigs):
raise TypeError
e = expr.atoms(exp)
new = expr.rewrite(exp, deep=deep)
if new == e:
raise TypeError
fnew = factor(new)
if fnew != new:
new = sorted([new, factor(new)], key=count_ops)[0]
# if all exp that were introduced disappeared then accept it
if not (new.atoms(exp) - e):
expr = new
except TypeError:
pass
return expr
#------------------- end of old trigsimp routines --------------------
def futrig(e, **kwargs):
"""Return simplified ``e`` using Fu-like transformations.
This is not the "Fu" algorithm. This is called by default
from ``trigsimp``. By default, hyperbolics subexpressions
will be simplified, but this can be disabled by setting
``hyper=False``.
Examples
========
>>> from sympy import trigsimp, tan, sinh, tanh
>>> from sympy.simplify.trigsimp import futrig
>>> from sympy.abc import x
>>> trigsimp(1/tan(x)**2)
tan(x)**(-2)
>>> futrig(sinh(x)/tanh(x))
cosh(x)
"""
from sympy.simplify.fu import hyper_as_trig
from sympy.simplify.simplify import bottom_up
e = sympify(e)
if not isinstance(e, Basic):
return e
if not e.args:
return e
old = e
e = bottom_up(e, lambda x: _futrig(x, **kwargs))
if kwargs.pop('hyper', True) and e.has(HyperbolicFunction):
e, f = hyper_as_trig(e)
e = f(_futrig(e))
if e != old and e.is_Mul and e.args[0].is_Rational:
# redistribute leading coeff on 2-arg Add
e = Mul(*e.as_coeff_Mul())
return e
def _futrig(e, **kwargs):
"""Helper for futrig."""
from sympy.simplify.fu import (
TR1, TR2, TR3, TR2i, TR10, L, TR10i,
TR8, TR6, TR15, TR16, TR111, TR5, TRmorrie, TR11, TR14, TR22,
TR12)
from sympy.core.compatibility import _nodes
if not e.has(TrigonometricFunction):
return e
if e.is_Mul:
coeff, e = e.as_independent(TrigonometricFunction)
else:
coeff = S.One
Lops = lambda x: (L(x), x.count_ops(), _nodes(x), len(x.args), x.is_Add)
trigs = lambda x: x.has(TrigonometricFunction)
tree = [identity,
(
TR3, # canonical angles
TR1, # sec-csc -> cos-sin
TR12, # expand tan of sum
lambda x: _eapply(factor, x, trigs),
TR2, # tan-cot -> sin-cos
[identity, lambda x: _eapply(_mexpand, x, trigs)],
TR2i, # sin-cos ratio -> tan
lambda x: _eapply(lambda i: factor(i.normal()), x, trigs),
TR14, # factored identities
TR5, # sin-pow -> cos_pow
TR10, # sin-cos of sums -> sin-cos prod
TR11, TR6, # reduce double angles and rewrite cos pows
lambda x: _eapply(factor, x, trigs),
TR14, # factored powers of identities
[identity, lambda x: _eapply(_mexpand, x, trigs)],
TRmorrie,
TR10i, # sin-cos products > sin-cos of sums
[identity, TR8], # sin-cos products -> sin-cos of sums
[identity, lambda x: TR2i(TR2(x))], # tan -> sin-cos -> tan
[
lambda x: _eapply(expand_mul, TR5(x), trigs),
lambda x: _eapply(
expand_mul, TR15(x), trigs)], # pos/neg powers of sin
[
lambda x: _eapply(expand_mul, TR6(x), trigs),
lambda x: _eapply(
expand_mul, TR16(x), trigs)], # pos/neg powers of cos
TR111, # tan, sin, cos to neg power -> cot, csc, sec
[identity, TR2i], # sin-cos ratio to tan
[identity, lambda x: _eapply(
expand_mul, TR22(x), trigs)], # tan-cot to sec-csc
TR1, TR2, TR2i,
[identity, lambda x: _eapply(
factor_terms, TR12(x), trigs)], # expand tan of sum
)]
e = greedy(tree, objective=Lops)(e)
return coeff*e
def _is_Expr(e):
"""_eapply helper to tell whether ``e`` and all its args
are Exprs."""
if not isinstance(e, Expr):
return False
return all(_is_Expr(i) for i in e.args)
def _eapply(func, e, cond=None):
"""Apply ``func`` to ``e`` if all args are Exprs else only
apply it to those args that *are* Exprs."""
if not isinstance(e, Expr):
return e
if _is_Expr(e) or not e.args:
return func(e)
return e.func(*[
_eapply(func, ei) if (cond is None or cond(ei)) else ei
for ei in e.args])
|
ad-m/django-mailbox
|
refs/heads/master
|
django_mailbox/south_migrations/0010_auto__add_field_mailbox_from_email.py
|
2
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Mailbox.from_email'
db.add_column('django_mailbox_mailbox', 'from_email',
self.gf('django.db.models.fields.CharField')(default=None, max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Mailbox.from_email'
db.delete_column('django_mailbox_mailbox', 'from_email')
models = {
'django_mailbox.mailbox': {
'Meta': {'object_name': 'Mailbox'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'from_email': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uri': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'null': 'True', 'blank': 'True'})
},
'django_mailbox.message': {
'Meta': {'object_name': 'Message'},
'body': ('django.db.models.fields.TextField', [], {}),
'from_header': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_reply_to': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'replies'", 'null': 'True', 'to': "orm['django_mailbox.Message']"}),
'mailbox': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'messages'", 'to': "orm['django_mailbox.Mailbox']"}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'outgoing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'processed': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'to_header': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['django_mailbox']
|
nnethercote/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/third_party/py/testing/io_/test_terminalwriter.py
|
54
|
import py
import os, sys
from py._io import terminalwriter
import codecs
import pytest
def test_get_terminal_width():
x = py.io.get_terminal_width
assert x == terminalwriter.get_terminal_width
def test_getdimensions(monkeypatch):
fcntl = py.test.importorskip("fcntl")
import struct
l = []
monkeypatch.setattr(fcntl, 'ioctl', lambda *args: l.append(args))
try:
terminalwriter._getdimensions()
except (TypeError, struct.error):
pass
assert len(l) == 1
assert l[0][0] == 1
def test_terminal_width_COLUMNS(monkeypatch):
""" Dummy test for get_terminal_width
"""
fcntl = py.test.importorskip("fcntl")
monkeypatch.setattr(fcntl, 'ioctl', lambda *args: int('x'))
monkeypatch.setenv('COLUMNS', '42')
assert terminalwriter.get_terminal_width() == 42
monkeypatch.delenv('COLUMNS', raising=False)
def test_terminalwriter_defaultwidth_80(monkeypatch):
monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: 0/0)
monkeypatch.delenv('COLUMNS', raising=False)
tw = py.io.TerminalWriter()
assert tw.fullwidth == 80
def test_terminalwriter_getdimensions_bogus(monkeypatch):
monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: (10,10))
monkeypatch.delenv('COLUMNS', raising=False)
tw = py.io.TerminalWriter()
assert tw.fullwidth == 80
def test_terminalwriter_getdimensions_emacs(monkeypatch):
# emacs terminal returns (0,0) but set COLUMNS properly
monkeypatch.setattr(terminalwriter, '_getdimensions', lambda: (0,0))
monkeypatch.setenv('COLUMNS', '42')
tw = py.io.TerminalWriter()
assert tw.fullwidth == 42
def test_terminalwriter_computes_width(monkeypatch):
monkeypatch.setattr(terminalwriter, 'get_terminal_width', lambda: 42)
tw = py.io.TerminalWriter()
assert tw.fullwidth == 42
def test_terminalwriter_default_instantiation():
tw = py.io.TerminalWriter(stringio=True)
assert hasattr(tw, 'stringio')
def test_terminalwriter_dumb_term_no_markup(monkeypatch):
monkeypatch.setattr(os, 'environ', {'TERM': 'dumb', 'PATH': ''})
class MyFile:
closed = False
def isatty(self):
return True
monkeypatch.setattr(sys, 'stdout', MyFile())
try:
assert sys.stdout.isatty()
tw = py.io.TerminalWriter()
assert not tw.hasmarkup
finally:
monkeypatch.undo()
def test_terminalwriter_file_unicode(tmpdir):
f = codecs.open(str(tmpdir.join("xyz")), "wb", "utf8")
tw = py.io.TerminalWriter(file=f)
assert tw.encoding == "utf8"
def test_unicode_encoding():
msg = py.builtin._totext('b\u00f6y', 'utf8')
for encoding in 'utf8', 'latin1':
l = []
tw = py.io.TerminalWriter(l.append, encoding=encoding)
tw.line(msg)
assert l[0].strip() == msg.encode(encoding)
@pytest.mark.parametrize("encoding", ["ascii"])
def test_unicode_on_file_with_ascii_encoding(tmpdir, monkeypatch, encoding):
msg = py.builtin._totext('hell\xf6', "latin1")
#pytest.raises(UnicodeEncodeError, lambda: bytes(msg))
f = codecs.open(str(tmpdir.join("x")), "w", encoding)
tw = py.io.TerminalWriter(f)
tw.line(msg)
f.close()
s = tmpdir.join("x").open("rb").read().strip()
assert encoding == "ascii"
assert s == msg.encode("unicode-escape")
win32 = int(sys.platform == "win32")
class TestTerminalWriter:
def pytest_generate_tests(self, metafunc):
if "tw" in metafunc.funcargnames:
metafunc.addcall(id="path", param="path")
metafunc.addcall(id="stringio", param="stringio")
metafunc.addcall(id="callable", param="callable")
def pytest_funcarg__tw(self, request):
if request.param == "path":
tmpdir = request.getfuncargvalue("tmpdir")
p = tmpdir.join("tmpfile")
f = codecs.open(str(p), 'w+', encoding='utf8')
tw = py.io.TerminalWriter(f)
def getlines():
tw._file.flush()
return codecs.open(str(p), 'r',
encoding='utf8').readlines()
elif request.param == "stringio":
tw = py.io.TerminalWriter(stringio=True)
def getlines():
tw.stringio.seek(0)
return tw.stringio.readlines()
elif request.param == "callable":
writes = []
tw = py.io.TerminalWriter(writes.append)
def getlines():
io = py.io.TextIO()
io.write("".join(writes))
io.seek(0)
return io.readlines()
tw.getlines = getlines
tw.getvalue = lambda: "".join(getlines())
return tw
def test_line(self, tw):
tw.line("hello")
l = tw.getlines()
assert len(l) == 1
assert l[0] == "hello\n"
def test_line_unicode(self, tw):
for encoding in 'utf8', 'latin1':
tw._encoding = encoding
msg = py.builtin._totext('b\u00f6y', 'utf8')
tw.line(msg)
l = tw.getlines()
assert l[0] == msg + "\n"
def test_sep_no_title(self, tw):
tw.sep("-", fullwidth=60)
l = tw.getlines()
assert len(l) == 1
assert l[0] == "-" * (60-win32) + "\n"
def test_sep_with_title(self, tw):
tw.sep("-", "hello", fullwidth=60)
l = tw.getlines()
assert len(l) == 1
assert l[0] == "-" * 26 + " hello " + "-" * (27-win32) + "\n"
@py.test.mark.skipif("sys.platform == 'win32'")
def test__escaped(self, tw):
text2 = tw._escaped("hello", (31))
assert text2.find("hello") != -1
@py.test.mark.skipif("sys.platform == 'win32'")
def test_markup(self, tw):
for bold in (True, False):
for color in ("red", "green"):
text2 = tw.markup("hello", **{color: True, 'bold': bold})
assert text2.find("hello") != -1
py.test.raises(ValueError, "tw.markup('x', wronkw=3)")
py.test.raises(ValueError, "tw.markup('x', wronkw=0)")
def test_line_write_markup(self, tw):
tw.hasmarkup = True
tw.line("x", bold=True)
tw.write("x\n", red=True)
l = tw.getlines()
if sys.platform != "win32":
assert len(l[0]) >= 2, l
assert len(l[1]) >= 2, l
def test_attr_fullwidth(self, tw):
tw.sep("-", "hello", fullwidth=70)
tw.fullwidth = 70
tw.sep("-", "hello")
l = tw.getlines()
assert len(l[0]) == len(l[1])
def test_reline(self, tw):
tw.line("hello")
tw.hasmarkup = False
pytest.raises(ValueError, lambda: tw.reline("x"))
tw.hasmarkup = True
tw.reline("0 1 2")
tw.getlines()
l = tw.getvalue().split("\n")
assert len(l) == 2
tw.reline("0 1 3")
l = tw.getvalue().split("\n")
assert len(l) == 2
assert l[1].endswith("0 1 3\r")
tw.line("so")
l = tw.getvalue().split("\n")
assert len(l) == 3
assert l[-1] == ""
assert l[1] == ("0 1 2\r0 1 3\rso ")
assert l[0] == "hello"
def test_terminal_with_callable_write_and_flush():
l = set()
class fil:
flush = lambda self: l.add("1")
write = lambda self, x: l.add("1")
__call__ = lambda self, x: l.add("2")
tw = py.io.TerminalWriter(fil())
tw.line("hello")
assert l == set(["1"])
del fil.flush
l.clear()
tw = py.io.TerminalWriter(fil())
tw.line("hello")
assert l == set(["2"])
def test_chars_on_current_line():
tw = py.io.TerminalWriter(stringio=True)
written = []
def write_and_check(s, expected):
tw.write(s, bold=True)
written.append(s)
assert tw.chars_on_current_line == expected
assert tw.stringio.getvalue() == ''.join(written)
write_and_check('foo', 3)
write_and_check('bar', 6)
write_and_check('\n', 0)
write_and_check('\n', 0)
write_and_check('\n\n\n', 0)
write_and_check('\nfoo', 3)
write_and_check('\nfbar\nhello', 5)
write_and_check('10', 7)
@pytest.mark.skipif(sys.platform == "win32", reason="win32 has no native ansi")
def test_attr_hasmarkup():
tw = py.io.TerminalWriter(stringio=True)
assert not tw.hasmarkup
tw.hasmarkup = True
tw.line("hello", bold=True)
s = tw.stringio.getvalue()
assert len(s) > len("hello\n")
assert '\x1b[1m' in s
assert '\x1b[0m' in s
@pytest.mark.skipif(sys.platform == "win32", reason="win32 has no native ansi")
def test_ansi_print():
# we have no easy way to construct a file that
# represents a terminal
f = py.io.TextIO()
f.isatty = lambda: True
py.io.ansi_print("hello", 0x32, file=f)
text2 = f.getvalue()
assert text2.find("hello") != -1
assert len(text2) >= len("hello\n")
assert '\x1b[50m' in text2
assert '\x1b[0m' in text2
def test_should_do_markup_PY_COLORS_eq_1(monkeypatch):
monkeypatch.setitem(os.environ, 'PY_COLORS', '1')
tw = py.io.TerminalWriter(stringio=True)
assert tw.hasmarkup
tw.line("hello", bold=True)
s = tw.stringio.getvalue()
assert len(s) > len("hello\n")
assert '\x1b[1m' in s
assert '\x1b[0m' in s
def test_should_do_markup_PY_COLORS_eq_0(monkeypatch):
monkeypatch.setitem(os.environ, 'PY_COLORS', '0')
f = py.io.TextIO()
f.isatty = lambda: True
tw = py.io.TerminalWriter(file=f)
assert not tw.hasmarkup
tw.line("hello", bold=True)
s = f.getvalue()
assert s == "hello\n"
|
opentrials/collectors
|
refs/heads/master
|
collectors/nct/parser.py
|
2
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import xmltodict
try:
from lxml import etree
except ImportError:
import xml.etree.ElementTree as etree
from .record import Record
logger = logging.getLogger(__name__)
# Module API
def parse_record(res):
# Init data
data = {}
res = etree.parse(res)
# General
key = 'download_date'
path = 'required_header/download_date'
value = _parse_text(res, path)
data[key] = value
key = 'link_text'
path = 'required_header/link_text'
value = _parse_text(res, path)
data[key] = value
key = 'url'
path = 'required_header/url'
value = _parse_text(res, path)
data[key] = value
key = 'org_study_id'
path = 'id_info/org_study_id'
value = _parse_text(res, path)
data[key] = value
key = 'nct_id'
path = 'id_info/nct_id'
value = _parse_text(res, path)
data[key] = value
key = 'secondary_ids'
path = 'id_info/secondary_id'
value = _parse_list(res, path, expand='secondary_id')
data[key] = value
key = 'nct_aliases'
path = 'id_info/nct_alias'
value = _parse_list(res, path, expand='nct_alias')
data[key] = value
key = 'brief_title'
path = 'brief_title'
value = _parse_text(res, path)
data[key] = value
key = 'acronym'
path = 'acronym'
value = _parse_text(res, path)
data[key] = value
key = 'official_title'
path = 'official_title'
value = _parse_text(res, path)
data[key] = value
key = 'sponsors'
path = 'sponsors/*'
value = _parse_list(res, path)
data[key] = value
key = 'source'
path = 'source'
value = _parse_text(res, path)
data[key] = value
key = 'oversight_info'
path = 'oversight_info'
value = _parse_dict(res, path, expand=key)
data[key] = value
key = 'brief_summary'
path = 'brief_summary/textblock'
value = _parse_text(res, path)
data[key] = value
key = 'detailed_description'
path = 'detailed_description/textblock'
value = _parse_text(res, path)
data[key] = value
key = 'overall_status'
path = 'overall_status'
value = _parse_text(res, path)
data[key] = value
key = 'why_stopped'
path = 'why_stopped'
value = _parse_text(res, path)
data[key] = value
key = 'start_date'
path = 'start_date'
value = _parse_text(res, path)
data[key] = value
key = 'completion_date_actual'
path = 'completion_date[@type="Actual"]'
value = _parse_text(res, path)
data[key] = value
key = 'completion_date_anticipated'
path = 'completion_date[@type="Actual"]'
value = _parse_text(res, path)
data[key] = value
key = 'primary_completion_date_actual'
path = 'primary_completion_date[@type="Actual"]'
value = _parse_text(res, path)
data[key] = value
key = 'primary_completion_date_anticipated'
path = 'primary_completion_date[@type="Actual"]'
value = _parse_text(res, path)
data[key] = value
key = 'phase'
path = 'phase'
value = _parse_text(res, path)
data[key] = value
key = 'study_type'
path = 'study_type'
value = _parse_text(res, path)
data[key] = value
key = 'study_design'
path = 'study_design'
value = _parse_text(res, path)
data[key] = value
key = 'target_duration'
path = 'target_duration'
value = _parse_text(res, path)
data[key] = value
key = 'primary_outcomes'
path = 'primary_outcome'
value = _parse_list(res, path, expand='primary_outcome')
data[key] = value
key = 'secondary_outcomes'
path = 'secondary_outcome'
value = _parse_list(res, path, expand='secondary_outcome')
data[key] = value
key = 'other_outcomes'
path = 'other_outcome'
value = _parse_list(res, path, expand='other_outcome')
data[key] = value
key = 'number_of_arms'
path = 'number_of_arms'
value = _parse_text(res, path)
data[key] = value
key = 'number_of_groups'
path = 'number_of_groups'
value = _parse_text(res, path)
data[key] = value
key = 'enrollment_actual'
path = 'enrollment[@type="Actual"]'
value = _parse_text(res, path)
data[key] = value
key = 'enrollment_anticipated'
path = 'enrollment[@type="Anticipated"]'
value = _parse_text(res, path)
data[key] = value
key = 'conditions'
path = 'condition'
value = _parse_list(res, path, expand='condition')
data[key] = value
key = 'arm_groups'
path = 'arm_group'
value = _parse_list(res, path, expand='arm_group')
data[key] = value
key = 'interventions'
path = 'intervention'
value = _parse_list(res, path, expand='intervention')
data[key] = value
key = 'biospec_retention'
path = 'biospec_retention'
value = _parse_text(res, path)
data[key] = value
key = 'biospec_desrc'
path = 'biospec_desrc/textblock'
value = _parse_text(res, path)
data[key] = value
key = 'eligibility'
path = 'eligibility'
value = _parse_dict(res, path, expand=key)
data[key] = value
key = 'overall_officials'
path = 'overall_official'
value = _parse_list(res, path, expand='overall_official')
data[key] = value
key = 'overall_contact'
path = 'overall_contact'
value = _parse_dict(res, path, expand=key)
data[key] = value
key = 'overall_contact_backup'
path = 'overall_contact_backup'
value = _parse_dict(res, path, expand=key)
data[key] = value
key = 'locations'
path = 'location'
value = _parse_list(res, path, expand='location')
data[key] = value
key = 'location_countries'
path = 'location_countries/*'
value = _parse_list(res, path, expand='country')
data[key] = value
key = 'removed_countries'
path = 'removed_countries/*'
value = _parse_list(res, path, expand='country')
data[key] = value
key = 'links'
path = 'link'
value = _parse_list(res, path, expand='link')
data[key] = value
key = 'references'
path = 'reference'
value = _parse_list(res, path, expand='reference')
data[key] = value
key = 'results_references'
path = 'results_reference'
value = _parse_list(res, path, expand='results_reference')
data[key] = value
key = 'verification_date'
path = 'verification_date'
value = _parse_text(res, path)
data[key] = value
key = 'lastchanged_date'
path = 'lastchanged_date'
value = _parse_text(res, path)
data[key] = value
key = 'firstreceived_date'
path = 'firstreceived_date'
value = _parse_text(res, path)
data[key] = value
key = 'firstreceived_results_date'
path = 'firstreceived_results_date'
value = _parse_text(res, path)
data[key] = value
key = 'responsible_party'
path = 'responsible_party'
value = _parse_dict(res, path, expand=key)
data[key] = value
key = 'keywords'
path = 'keyword'
value = _parse_list(res, path, expand='keyword')
data[key] = value
key = 'is_fda_regulated'
path = 'is_fda_regulated'
value = _parse_text(res, path)
data[key] = value
key = 'is_section_801'
path = 'is_section_801'
value = _parse_text(res, path)
data[key] = value
key = 'has_expanded_access'
path = 'has_expanded_access'
value = _parse_text(res, path)
data[key] = value
key = 'condition_browse'
path = 'condition_browse'
value = _parse_dict(res, path, expand=key)
data[key] = value
key = 'intervention_browse'
path = 'intervention_browse'
value = _parse_dict(res, path, expand=key)
data[key] = value
key = 'clinical_results'
path = 'clinical_results'
value = _parse_dict(res, path, expand=key)
data[key] = value
key = 'results_exemption_date'
path = 'firstreceived_results_disposition_date'
value = _parse_text(res, path)
data[key] = value
# Create record
url = 'https://clinicaltrials.gov/ct2/show/%s' % data['nct_id']
record = Record.create(url, data)
return record
# Internal
def _parse_text(res, path):
"""Parsing text from response by path.
"""
value = None
node = res.find(path)
if node is not None:
value = node.text
value = value.strip()
return value
def _parse_dict(res, path, expand=None):
"""Parse dict from response by path.
"""
value = None
node = res.find(path)
if node is not None:
text = etree.tostring(node, encoding='utf-8', method='xml')
node_dict = xmltodict.parse(text)
if expand:
node_dict = node_dict[expand]
value = node_dict
return value
def _parse_list(res, path, expand=None):
"""Parse list from response by path.
"""
value = None
nodes = res.findall(path)
if len(nodes) > 0:
hashs = []
for node in nodes:
text = etree.tostring(node, encoding='utf-8', method='xml')
node_dict = xmltodict.parse(text)
if expand:
node_dict = node_dict[expand]
hashs.append(node_dict)
value = hashs
return value
|
heynemann/mememe
|
refs/heads/master
|
urls.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import *
from django.conf import settings
PLUGINS = {'document_root': settings.PLUGINS_DIRECTORY}
MEDIA = {'document_root': settings.LOCAL_FILE('media')}
urlpatterns = patterns('',
url(r'^plugins/(?P<path>.*)$', 'django.views.static.serve', PLUGINS,
name='plugins-url'),
url(r'^media/(?P<path>.*)', 'django.views.static.serve', MEDIA),
url(r'', 'engine.views.index', name='index'),
)
|
gbwebdev/PFE-VA_Dev
|
refs/heads/master
|
components/ecl/vsc.py
|
2
|
"""
vsc.py
Execution Control Level module : Velocity/Steering Controler
Controls the PWM module’s state in order to get desired velocity
(in % of Vmax) and steering radius (in % of Rmax)
"""
#!/usr/bin/python3.5
#-*- coding: utf-8 -*-
###Standard imports :
import atexit
import time
from os import path
###Specific imports :
##robotBasics:
#Constants:
from robotBasics.constants.connectionSettings import MOT as MOT_CS
from robotBasics.constants.connectionSettings import VSC as VSC_CS
#Classes & Methods:
from robotBasics.sockets.tcp.Server import Server as Server
from robotBasics.sockets.tcp.Client import Client as Client
from robotBasics.logger import robotLogger
###########################################################################
# Environment Setup : #
###########################################################################
#If we are on an actual robot :
if path.isdir("/home/robot"):
ROBOT_ROOT = '/home/robot/'
elif path.isfile(path.expanduser('~/.robotConf')):
#If we're not on an actual robot, check if we have
#a working environment set for robot debugging:
CONFIG_FILE = open(path.expanduser('~/.robotConf'), 'r')
ROBOT_ROOT = CONFIG_FILE.read().strip()
CONFIG_FILE.close()
else:
ROBOT_ROOT = ''
print('It seems like you are NOT working on an actual robot. \
You should set up a debugging environment before running any code (see documentation)')
#Logging Initialization :
LOGGER = robotLogger("ECL > vsc", ROBOT_ROOT+'logs/ecl/')
###########################################################################
# Functions/Callbacks definition : #
###########################################################################
def velocity_control_cb(data, args):
"""
Velocity control callback method
Apply requested velocity while making sure not to perform strong accelerations.
"""
deltaVelocity = data[0] - args["currentState"]["velocity"]
#If we're accelerating, we make sure to do so increasingly (if we're braking, we don't care) :
if abs(data[0]) > abs(args["currentState"]["velocity"]):
#If we're still under 30% of Vmax, we only allow 5% increments
if abs(args["currentState"]["velocity"]) < 30:
if deltaVelocity < -5:
deltaVelocity = -5
elif deltaVelocity > 5:
deltaVelocity = 5
#Else, wee allow 10% increments
else:
if deltaVelocity < -10:
deltaVelocity = -10
elif deltaVelocity > 10:
deltaVelocity = 10
#We apply the change to the program's velocity variable
args["currentState"]["velocity"] += deltaVelocity
#We apply the changes to the robot :
apply_modifications(args)
#We send the velocity actually applied to the client :
args["velocityConnection"].send([args["currentState"]["velocity"]])
def steering_control_cb(data, args):
"""
Steering control callback method
Apply requested steering
"""
print('REQUEST RECEIVED !!!!')
#We apply the change to the program's velocity variable
args["currentState"]["steeringRatio"] = data[0]
print('Received steering : ', data[0])
#We apply the changes to the robot :
apply_modifications(args)
#We send the velocity actually applied to the client :
args["steeringConnection"].send([data[0]])
def apply_modifications(args):
#Wait for the semaphore to be cleared (so we don't apply two modifications at the same time)
while args["currentState"]["busy"]:
time.sleep(0.0001)
args["currentState"]["busy"] = True
velocity = args["currentState"]["velocity"]
steering = args["currentState"]["steeringRatio"]
#We make sure not to go over 100% velocity on each wheel.
#If so, we have to reduce the mean velocity :
factor = 0.01
if velocity*(1+factor*abs(steering)) > 100:
velocity = 100/(1+abs(steering)*factor)
elif velocity*(1+factor*abs(steering)) < -100:
velocity = -100/(1+abs(steering)*factor)
leftVelocity = round(velocity*(1+factor*steering))
rightVelocity = round(velocity*(1-factor*steering))
args["currentState"]["velocity"] = velocity
#We apply the changes to the robot :
try:
args["leftMotorConnection"].send([leftVelocity])
args["rightMotorConnection"].send([rightVelocity])
except:
print('erreur lors de l\'envoi')
args["currentState"]["busy"] = False
###########################################################################
# CONNECTIONS SET UP AND SETTINGS : #
###########################################################################
#### CLIENTS CONNECTION :
#Creating the connection object
CLIENT_LEFT = Client(MOT_CS["LEFT"], LOGGER)
CLIENT_RIGHT = Client(MOT_CS["RIGHT"], LOGGER)
#Opening the connection
CLIENT_LEFT.connect()
CLIENT_RIGHT.connect()
#### SERVER CONNECTION :
## Velocity Server :
#Creating the connection object
VELOCITY_SERVER = Server(VSC_CS["velocity"], LOGGER)
#Registering the close method to be executed at exit (clean deconnection)
atexit.register(VELOCITY_SERVER.close)
#Opening the connection
VELOCITY_SERVER.connect()
## Steering Server :
#Creating the connection object
STEERING_SERVER = Server(VSC_CS["steering"], LOGGER)
#Registering the close method to be executed at exit (clean deconnection)
atexit.register(STEERING_SERVER.close)
#Opening the connection
STEERING_SERVER.connect()
#### CALLBACKS' ARGUMENTS SETUP:
#Shared robot's state :
CURRENT_STATE = {
"busy": False,
"velocity": 0,
"steeringRatio" : 0
}
#Argument to be passed to the velocity callback method
ARGUMENTS_VELOCITY = {
"currentState" : CURRENT_STATE, #To "Mix" the
"velocityConnection": VELOCITY_SERVER, #To respond to the request
"leftMotorConnection": CLIENT_LEFT,
"rightMotorConnection": CLIENT_RIGHT
}
#Argument to be passed to the steering callback method
ARGUMENTS_STEERING = {
"currentState" : CURRENT_STATE,
"steeringConnection": STEERING_SERVER,
"leftMotorConnection": CLIENT_LEFT,
"rightMotorConnection": CLIENT_RIGHT
}
###########################################################################
# RUNNING : #
###########################################################################
while not VELOCITY_SERVER.connected or not STEERING_SERVER.connected:
time.sleep(0.05)
#Waiting for requests and redirecting them to the callback methods
VELOCITY_SERVER.listen_to_clients(velocity_control_cb, ARGUMENTS_VELOCITY)
STEERING_SERVER.listen_to_clients(steering_control_cb, ARGUMENTS_STEERING)
VELOCITY_SERVER.join_clients()
STEERING_SERVER.join_clients()
stopped = False
|
habibmasuro/bitcoinxt
|
refs/heads/master
|
qa/rpc-tests/test_framework/bignum.py
|
230
|
#
#
# bignum.py
#
# This file is copied from python-bitcoinlib.
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
"""Bignum routines"""
from __future__ import absolute_import, division, print_function, unicode_literals
import struct
# generic big endian MPI format
def bn_bytes(v, have_ext=False):
ext = 0
if have_ext:
ext = 1
return ((v.bit_length()+7)//8) + ext
def bn2bin(v):
s = bytearray()
i = bn_bytes(v)
while i > 0:
s.append((v >> ((i-1) * 8)) & 0xff)
i -= 1
return s
def bin2bn(s):
l = 0
for ch in s:
l = (l << 8) | ch
return l
def bn2mpi(v):
have_ext = False
if v.bit_length() > 0:
have_ext = (v.bit_length() & 0x07) == 0
neg = False
if v < 0:
neg = True
v = -v
s = struct.pack(b">I", bn_bytes(v, have_ext))
ext = bytearray()
if have_ext:
ext.append(0)
v_bin = bn2bin(v)
if neg:
if have_ext:
ext[0] |= 0x80
else:
v_bin[0] |= 0x80
return s + ext + v_bin
def mpi2bn(s):
if len(s) < 4:
return None
s_size = bytes(s[:4])
v_len = struct.unpack(b">I", s_size)[0]
if len(s) != (v_len + 4):
return None
if v_len == 0:
return 0
v_str = bytearray(s[4:])
neg = False
i = v_str[0]
if i & 0x80:
neg = True
i &= ~0x80
v_str[0] = i
v = bin2bn(v_str)
if neg:
return -v
return v
# bitcoin-specific little endian format, with implicit size
def mpi2vch(s):
r = s[4:] # strip size
r = r[::-1] # reverse string, converting BE->LE
return r
def bn2vch(v):
return bytes(mpi2vch(bn2mpi(v)))
def vch2mpi(s):
r = struct.pack(b">I", len(s)) # size
r += s[::-1] # reverse string, converting LE->BE
return r
def vch2bn(s):
return mpi2bn(vch2mpi(s))
|
ptonner/GPy
|
refs/heads/master
|
GPy/plotting/matplot_dep/mapping_plots.py
|
8
|
# Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
try:
import Tango
from matplotlib import pyplot as pb
except:
pass
from base_plots import x_frame1D, x_frame2D
def plot_mapping(self, plot_limits=None, which_data='all', which_parts='all', resolution=None, levels=20, samples=0, fignum=None, ax=None, fixed_inputs=[], linecol=Tango.colorsHex['darkBlue']):
"""
Plots the mapping associated with the model.
- In one dimension, the function is plotted.
- In two dimsensions, a contour-plot shows the function
- In higher dimensions, we've not implemented this yet !TODO!
Can plot only part of the data and part of the posterior functions
using which_data and which_functions
:param plot_limits: The limits of the plot. If 1D [xmin,xmax], if 2D [[xmin,ymin],[xmax,ymax]]. Defaluts to data limits
:type plot_limits: np.array
:param which_data: which if the training data to plot (default all)
:type which_data: 'all' or a slice object to slice self.X, self.Y
:param which_parts: which of the kernel functions to plot (additively)
:type which_parts: 'all', or list of bools
:param resolution: the number of intervals to sample the GP on. Defaults to 200 in 1D and 50 (a 50x50 grid) in 2D
:type resolution: int
:param levels: number of levels to plot in a contour plot.
:type levels: int
:param samples: the number of a posteriori samples to plot
:type samples: int
:param fignum: figure to plot on.
:type fignum: figure number
:param ax: axes to plot on.
:type ax: axes handle
:param fixed_inputs: a list of tuple [(i,v), (i,v)...], specifying that input index i should be set to value v.
:type fixed_inputs: a list of tuples
:param linecol: color of line to plot.
:type linecol:
:param levels: for 2D plotting, the number of contour levels to use is ax is None, create a new figure
"""
# TODO include samples
if which_data == 'all':
which_data = slice(None)
if ax is None:
fig = pb.figure(num=fignum)
ax = fig.add_subplot(111)
plotdims = self.input_dim - len(fixed_inputs)
if plotdims == 1:
Xu = self.X * self._Xscale + self._Xoffset # NOTE self.X are the normalized values now
fixed_dims = np.array([i for i,v in fixed_inputs])
freedim = np.setdiff1d(np.arange(self.input_dim),fixed_dims)
Xnew, xmin, xmax = x_frame1D(Xu[:,freedim], plot_limits=plot_limits)
Xgrid = np.empty((Xnew.shape[0],self.input_dim))
Xgrid[:,freedim] = Xnew
for i,v in fixed_inputs:
Xgrid[:,i] = v
f = self.predict(Xgrid, which_parts=which_parts)
for d in range(y.shape[1]):
ax.plot(Xnew, f[:, d], edgecol=linecol)
elif self.X.shape[1] == 2:
resolution = resolution or 50
Xnew, _, _, xmin, xmax = x_frame2D(self.X, plot_limits, resolution)
x, y = np.linspace(xmin[0], xmax[0], resolution), np.linspace(xmin[1], xmax[1], resolution)
f = self.predict(Xnew, which_parts=which_parts)
m = m.reshape(resolution, resolution).T
ax.contour(x, y, f, levels, vmin=m.min(), vmax=m.max(), cmap=pb.cm.jet) # @UndefinedVariable
ax.set_xlim(xmin[0], xmax[0])
ax.set_ylim(xmin[1], xmax[1])
else:
raise NotImplementedError("Cannot define a frame with more than two input dimensions")
|
Event38/MissionPlanner
|
refs/heads/master
|
Lib/distutils/extension.py
|
59
|
"""distutils.extension
Provides the Extension class, used to describe C/C++ extension
modules in setup scripts."""
__revision__ = "$Id$"
import os, string, sys
from types import *
try:
import warnings
except ImportError:
warnings = None
# This class is really only used by the "build_ext" command, so it might
# make sense to put it in distutils.command.build_ext. However, that
# module is already big enough, and I want to make this class a bit more
# complex to simplify some common cases ("foo" module in "foo.c") and do
# better error-checking ("foo.c" actually exists).
#
# Also, putting this in build_ext.py means every setup script would have to
# import that large-ish module (indirectly, through distutils.core) in
# order to do anything.
class Extension:
"""Just a collection of attributes that describes an extension
module and everything needed to build it (hopefully in a portable
way, but there are hooks that let you be as unportable as you need).
Instance attributes:
name : string
the full name of the extension, including any packages -- ie.
*not* a filename or pathname, but Python dotted name
sources : [string]
list of source filenames, relative to the distribution root
(where the setup script lives), in Unix form (slash-separated)
for portability. Source files may be C, C++, SWIG (.i),
platform-specific resource files, or whatever else is recognized
by the "build_ext" command as source for a Python extension.
include_dirs : [string]
list of directories to search for C/C++ header files (in Unix
form for portability)
define_macros : [(name : string, value : string|None)]
list of macros to define; each macro is defined using a 2-tuple,
where 'value' is either the string to define it to or None to
define it without a particular value (equivalent of "#define
FOO" in source or -DFOO on Unix C compiler command line)
undef_macros : [string]
list of macros to undefine explicitly
library_dirs : [string]
list of directories to search for C/C++ libraries at link time
libraries : [string]
list of library names (not filenames or paths) to link against
runtime_library_dirs : [string]
list of directories to search for C/C++ libraries at run time
(for shared extensions, this is when the extension is loaded)
extra_objects : [string]
list of extra files to link with (eg. object files not implied
by 'sources', static library that must be explicitly specified,
binary resource files, etc.)
extra_compile_args : [string]
any extra platform- and compiler-specific information to use
when compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could
be anything.
extra_link_args : [string]
any extra platform- and compiler-specific information to use
when linking object files together to create the extension (or
to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.
export_symbols : [string]
list of symbols to be exported from a shared extension. Not
used on all platforms, and not generally necessary for Python
extensions, which typically export exactly one symbol: "init" +
extension_name.
swig_opts : [string]
any extra options to pass to SWIG if a source file has the .i
extension.
depends : [string]
list of files that the extension depends on
language : string
extension language (i.e. "c", "c++", "objc"). Will be detected
from the source extensions if not provided.
"""
# When adding arguments to this constructor, be sure to update
# setup_keywords in core.py.
def __init__ (self, name, sources,
include_dirs=None,
define_macros=None,
undef_macros=None,
library_dirs=None,
libraries=None,
runtime_library_dirs=None,
extra_objects=None,
extra_compile_args=None,
extra_link_args=None,
export_symbols=None,
swig_opts = None,
depends=None,
language=None,
**kw # To catch unknown keywords
):
assert type(name) is StringType, "'name' must be a string"
assert (type(sources) is ListType and
map(type, sources) == [StringType]*len(sources)), \
"'sources' must be a list of strings"
self.name = name
self.sources = sources
self.include_dirs = include_dirs or []
self.define_macros = define_macros or []
self.undef_macros = undef_macros or []
self.library_dirs = library_dirs or []
self.libraries = libraries or []
self.runtime_library_dirs = runtime_library_dirs or []
self.extra_objects = extra_objects or []
self.extra_compile_args = extra_compile_args or []
self.extra_link_args = extra_link_args or []
self.export_symbols = export_symbols or []
self.swig_opts = swig_opts or []
self.depends = depends or []
self.language = language
# If there are unknown keyword options, warn about them
if len(kw):
L = kw.keys() ; L.sort()
L = map(repr, L)
msg = "Unknown Extension options: " + string.join(L, ', ')
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + '\n')
# class Extension
def read_setup_file (filename):
from distutils.sysconfig import \
parse_makefile, expand_makefile_vars, _variable_rx
from distutils.text_file import TextFile
from distutils.util import split_quoted
# First pass over the file to gather "VAR = VALUE" assignments.
vars = parse_makefile(filename)
# Second pass to gobble up the real content: lines of the form
# <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
file = TextFile(filename,
strip_comments=1, skip_blanks=1, join_lines=1,
lstrip_ws=1, rstrip_ws=1)
try:
extensions = []
while 1:
line = file.readline()
if line is None: # eof
break
if _variable_rx.match(line): # VAR=VALUE, handled in first pass
continue
if line[0] == line[-1] == "*":
file.warn("'%s' lines not handled yet" % line)
continue
#print "original line: " + line
line = expand_makefile_vars(line, vars)
words = split_quoted(line)
#print "expanded line: " + line
# NB. this parses a slightly different syntax than the old
# makesetup script: here, there must be exactly one extension per
# line, and it must be the first word of the line. I have no idea
# why the old syntax supported multiple extensions per line, as
# they all wind up being the same.
module = words[0]
ext = Extension(module, [])
append_next_word = None
for word in words[1:]:
if append_next_word is not None:
append_next_word.append(word)
append_next_word = None
continue
suffix = os.path.splitext(word)[1]
switch = word[0:2] ; value = word[2:]
if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
# hmm, should we do something about C vs. C++ sources?
# or leave it up to the CCompiler implementation to
# worry about?
ext.sources.append(word)
elif switch == "-I":
ext.include_dirs.append(value)
elif switch == "-D":
equals = string.find(value, "=")
if equals == -1: # bare "-DFOO" -- no value
ext.define_macros.append((value, None))
else: # "-DFOO=blah"
ext.define_macros.append((value[0:equals],
value[equals+2:]))
elif switch == "-U":
ext.undef_macros.append(value)
elif switch == "-C": # only here 'cause makesetup has it!
ext.extra_compile_args.append(word)
elif switch == "-l":
ext.libraries.append(value)
elif switch == "-L":
ext.library_dirs.append(value)
elif switch == "-R":
ext.runtime_library_dirs.append(value)
elif word == "-rpath":
append_next_word = ext.runtime_library_dirs
elif word == "-Xlinker":
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
# NB. a really faithful emulation of makesetup would
# append a .o file to extra_objects only if it
# had a slash in it; otherwise, it would s/.o/.c/
# and append it to sources. Hmmmm.
ext.extra_objects.append(word)
else:
file.warn("unrecognized argument '%s'" % word)
extensions.append(ext)
finally:
file.close()
#print "module:", module
#print "source files:", source_files
#print "cpp args:", cpp_args
#print "lib args:", library_args
#extensions[module] = { 'sources': source_files,
# 'cpp_args': cpp_args,
# 'lib_args': library_args }
return extensions
# read_setup_file ()
|
ywang007/odo
|
refs/heads/master
|
odo/backends/tests/test_sas.py
|
9
|
from __future__ import absolute_import, division, print_function
import pytest
sas7bdat = pytest.importorskip('sas7bdat')
pytest.importorskip('odo.backends.sas')
import os
import pandas as pd
from collections import Iterator
from sas7bdat import SAS7BDAT
from odo.backends.sas import discover, sas_to_iterator
from odo.utils import tmpfile, into_path
from odo import append, convert, resource, dshape
test_path = into_path('backends', 'tests', 'airline.sas7bdat')
sasfile = SAS7BDAT(test_path)
columns = ("DATE", "AIR", "mon1", "mon2", "mon3", "mon4", "mon5", "mon6",
"mon7", "mon8", "mon9", "mon10", "mon11", "mon12", "t", "Lair")
ds = dshape('''var * {DATE: date, AIR: float64, mon1: float64, mon2: float64,
mon3: float64, mon4: float64, mon5: float64,
mon6: float64, mon7: float64, mon8: float64,
mon9: float64, mon10: float64, mon11: float64,
mon12: float64, t: float64, Lair: float64}''')
def test_resource_sas7bdat():
assert isinstance(resource(test_path), SAS7BDAT)
def test_discover_sas():
assert discover(sasfile) == ds
def test_convert_sas_to_dataframe():
df = convert(pd.DataFrame, sasfile)
assert isinstance(df, pd.DataFrame)
# pandas doesn't support date
expected = str(ds.measure).replace('date', 'datetime')
assert str(discover(df).measure).replace('?', '') == expected
def test_convert_sas_to_list():
out = convert(list, sasfile)
assert isinstance(out, list)
assert not any(isinstance(item, str) for item in out[0]) # No header
assert all(isinstance(ln, list) for ln in out)
def test_convert_sas_to_iterator():
itr = sas_to_iterator(sasfile)
assert isinstance(itr, Iterator)
def test_append_sas_to_sqlite_round_trip():
expected = convert(set, sasfile)
with tmpfile('db') as fn:
r = resource('sqlite:///%s::SAS' % fn, dshape=discover(sasfile))
append(r, sasfile)
result = convert(set, r)
assert expected == result
|
halfwit/qutebrowser
|
refs/heads/master
|
scripts/dev/pylint_checkers/qute_pylint/modeline.py
|
6
|
# Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Checker for vim modelines in files."""
import os.path
import contextlib
from pylint import interfaces, checkers
class ModelineChecker(checkers.BaseChecker):
"""Check for vim modelines in files."""
__implements__ = interfaces.IRawChecker
name = 'modeline'
msgs = {'W9002': ('Does not have vim modeline', 'modeline-missing', None),
'W9003': ('Modeline is invalid', 'invalid-modeline', None),
'W9004': ('Modeline position is wrong', 'modeline-position', None)}
options = ()
priority = -1
def process_module(self, node):
"""Process the module."""
if os.path.basename(os.path.splitext(node.file)[0]) == '__init__':
return
max_lineno = 1
with contextlib.closing(node.stream()) as stream:
for (lineno, line) in enumerate(stream):
if lineno == 1 and line.startswith(b'#!'):
max_lineno += 1
continue
elif line.startswith(b'# vim:'):
if lineno > max_lineno:
self.add_message('modeline-position', line=lineno)
if (line.rstrip() != b'# vim: ft=python '
b'fileencoding=utf-8 sts=4 sw=4 et:'):
self.add_message('invalid-modeline', line=lineno)
break
else:
self.add_message('modeline-missing', line=1)
def register(linter):
"""Register the checker."""
linter.register_checker(ModelineChecker(linter))
|
NoUsername/PrivateNotesExperimental
|
refs/heads/master
|
lib/south/migration.py
|
4
|
import datetime
import os
import sys
import traceback
import inspect
from django.conf import settings
from django.db import models
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from models import MigrationHistory
from south.db import db
from south.orm import FakeORM
def get_app(app):
"""
Returns the migrations module for the given app model name/module, or None
if it does not use migrations.
"""
if isinstance(app, (str, unicode)):
# If it's a string, use the models module
app = models.get_app(app)
mod = __import__(app.__name__[:-7], {}, {}, ['migrations'])
if hasattr(mod, 'migrations'):
return getattr(mod, 'migrations')
def get_migrated_apps():
"""
Returns all apps with migrations.
"""
for mapp in models.get_apps():
app = get_app(mapp)
if app:
yield app
def get_app_name(app):
"""
Returns the _internal_ app name for the given app module.
i.e. for <module django.contrib.auth.models> will return 'auth'
"""
return app.__name__.split('.')[-2]
def get_app_fullname(app):
"""
Returns the full python name of an app - e.g. django.contrib.auth
"""
return app.__name__[:-11]
def short_from_long(app_name):
return app_name.split(".")[-1]
def get_migration_names(app):
"""
Returns a list of migration file names for the given app.
"""
return sorted([
filename[:-3]
for filename in os.listdir(os.path.dirname(app.__file__))
if filename.endswith(".py") and filename != "__init__.py" and not filename.startswith(".")
])
def get_migration_classes(app):
"""
Returns a list of migration classes (one for each migration) for the app.
"""
for name in get_migration_names(app):
yield get_migration(app, name)
def get_migration(app, name):
"""
Returns the migration class implied by 'name'.
"""
try:
module = __import__(app.__name__ + "." + name, '', '', ['Migration'])
migclass = module.Migration
migclass.orm = FakeORM(migclass, get_app_name(app))
module._ = lambda x: x # Fake i18n
return migclass
except ImportError:
print " ! Migration %s:%s probably doesn't exist." % (get_app_name(app), name)
print " - Traceback:"
raise
except Exception, e:
print "While loading migration '%s.%s':" % (get_app_name(app), name)
raise
def all_migrations():
return dict([
(app, dict([(name, get_migration(app, name)) for name in get_migration_names(app)]))
for app in get_migrated_apps()
])
def dependency_tree():
tree = all_migrations()
# Annotate tree with 'backwards edges'
for app, classes in tree.items():
for name, cls in classes.items():
cls.needs = []
if not hasattr(cls, "needed_by"):
cls.needed_by = []
if hasattr(cls, "depends_on"):
for dapp, dname in cls.depends_on:
dapp = get_app(dapp)
if dapp not in tree:
print "Migration %s in app %s depends on unmigrated app %s." % (
name,
get_app_name(app),
dapp,
)
sys.exit(1)
if dname not in tree[dapp]:
print "Migration %s in app %s depends on nonexistent migration %s in app %s." % (
name,
get_app_name(app),
dname,
get_app_name(dapp),
)
sys.exit(1)
cls.needs.append((dapp, dname))
if not hasattr(tree[dapp][dname], "needed_by"):
tree[dapp][dname].needed_by = []
tree[dapp][dname].needed_by.append((app, name))
# Sanity check whole tree
for app, classes in tree.items():
for name, cls in classes.items():
cls.dependencies = dependencies(tree, app, name)
return tree
def nice_trace(trace):
return " -> ".join([str((get_app_name(a), n)) for a, n in trace])
def dependencies(tree, app, name, trace=[]):
# Copy trace to stop pass-by-ref problems
trace = trace[:]
# Sanity check
for papp, pname in trace:
if app == papp:
if pname == name:
print "Found circular dependency: %s" % nice_trace(trace + [(app,name)])
sys.exit(1)
else:
# See if they depend in the same app the wrong way
migrations = get_migration_names(app)
if migrations.index(name) > migrations.index(pname):
print "Found a lower migration (%s) depending on a higher migration (%s) in the same app (%s)." % (pname, name, get_app_name(app))
print "Path: %s" % nice_trace(trace + [(app,name)])
sys.exit(1)
# Get the dependencies of a migration
deps = []
migration = tree[app][name]
for dapp, dname in migration.needs:
deps.extend(
dependencies(tree, dapp, dname, trace+[(app,name)])
)
return deps
def remove_duplicates(l):
m = []
for x in l:
if x not in m:
m.append(x)
return m
def needed_before_forwards(tree, app, name, sameapp=True):
"""
Returns a list of migrations that must be applied before (app, name),
in the order they should be applied.
Used to make sure a migration can be applied (and to help apply up to it).
"""
app_migrations = get_migration_names(app)
needed = []
if sameapp:
for aname in app_migrations[:app_migrations.index(name)]:
needed += needed_before_forwards(tree, app, aname, False)
needed += [(app, aname)]
for dapp, dname in tree[app][name].needs:
needed += needed_before_forwards(tree, dapp, dname)
needed += [(dapp, dname)]
return remove_duplicates(needed)
def needed_before_backwards(tree, app, name, sameapp=True):
"""
Returns a list of migrations that must be unapplied before (app, name) is,
in the order they should be unapplied.
Used to make sure a migration can be unapplied (and to help unapply up to it).
"""
app_migrations = get_migration_names(app)
needed = []
if sameapp:
for aname in reversed(app_migrations[app_migrations.index(name)+1:]):
needed += needed_before_backwards(tree, app, aname, False)
needed += [(app, aname)]
for dapp, dname in tree[app][name].needed_by:
needed += needed_before_backwards(tree, dapp, dname)
needed += [(dapp, dname)]
return remove_duplicates(needed)
def run_migrations(toprint, torun, recorder, app, migrations, fake=False, db_dry_run=False, silent=False):
"""
Runs the specified migrations forwards/backwards, in order.
"""
for migration in migrations:
app_name = get_app_name(app)
if not silent:
print toprint % (app_name, migration)
klass = get_migration(app, migration)
if fake:
if not silent:
print " (faked)"
else:
runfunc = getattr(klass(), torun)
args = inspect.getargspec(runfunc)
# If the database doesn't support running DDL inside a transaction
# *cough*MySQL*cough* then do a dry run first.
if not db.has_ddl_transactions or db_dry_run:
if not (hasattr(klass, "no_dry_run") and klass.no_dry_run):
db.dry_run = True
db.debug, old_debug = False, db.debug
pending_creates = db.get_pending_creates()
try:
if len(args[0]) == 1: # They don't want an ORM param
runfunc()
else:
runfunc(klass.orm)
except:
traceback.print_exc()
print " ! Error found during dry run of migration! Aborting."
return False
db.debug = old_debug
db.clear_run_data(pending_creates)
db.dry_run = False
elif db_dry_run:
print " - Migration '%s' is marked for no-dry-run."
# If they really wanted to dry-run, then quit!
if db_dry_run:
return
if db.has_ddl_transactions:
db.start_transaction()
try:
if len(args[0]) == 1: # They don't want an ORM param
runfunc()
else:
runfunc(klass.orm)
db.execute_deferred_sql()
except:
if db.has_ddl_transactions:
db.rollback_transaction()
raise
else:
traceback.print_exc()
print " ! Error found during real run of migration! Aborting."
print
print " ! Since you have a database that does not support running"
print " ! schema-altering statements in transactions, we have had to"
print " ! leave it in an interim state between migrations."
if torun == "forwards":
print
print " ! You *might* be able to recover with:"
db.debug = db.dry_run = True
if len(args[0]) == 1:
klass().backwards()
else:
klass().backwards(klass.orm)
print
print " ! The South developers regret this has happened, and would"
print " ! like to gently persuade you to consider a slightly"
print " ! easier-to-deal-with DBMS."
return False
else:
if db.has_ddl_transactions:
db.commit_transaction()
if not db_dry_run:
# Record us as having done this
recorder(app_name, migration)
def run_forwards(app, migrations, fake=False, db_dry_run=False, silent=False):
"""
Runs the specified migrations forwards, in order.
"""
def record(app_name, migration):
# Record us as having done this
record = MigrationHistory.for_migration(app_name, migration)
record.applied = datetime.datetime.utcnow()
record.save()
return run_migrations(
toprint = " > %s: %s",
torun = "forwards",
recorder = record,
app = app,
migrations = migrations,
fake = fake,
db_dry_run = db_dry_run,
silent = silent,
)
def run_backwards(app, migrations, ignore=[], fake=False, db_dry_run=False, silent=False):
"""
Runs the specified migrations backwards, in order, skipping those
migrations in 'ignore'.
"""
def record(app_name, migration):
# Record us as having not done this
record = MigrationHistory.for_migration(app_name, migration)
record.delete()
return run_migrations(
toprint = " < %s: %s",
torun = "backwards",
recorder = record,
app = app,
migrations = [x for x in migrations if x not in ignore],
fake = fake,
db_dry_run = db_dry_run,
silent = silent,
)
def right_side_of(x, y):
return left_side_of(reversed(x), reversed(y))
def left_side_of(x, y):
return list(y)[:len(x)] == list(x)
def forwards_problems(tree, forwards, done, silent=False):
problems = []
for app, name in forwards:
if (app, name) not in done:
for dapp, dname in needed_before_backwards(tree, app, name):
if (dapp, dname) in done:
if not silent:
print " ! Migration (%s, %s) should not have been applied before (%s, %s) but was." % (get_app_name(dapp), dname, get_app_name(app), name)
problems.append(((app, name), (dapp, dname)))
return problems
def backwards_problems(tree, backwards, done, silent=False):
problems = []
for app, name in backwards:
if (app, name) in done:
for dapp, dname in needed_before_forwards(tree, app, name):
if (dapp, dname) not in done:
if not silent:
print " ! Migration (%s, %s) should have been applied before (%s, %s) but wasn't." % (get_app_name(dapp), dname, get_app_name(app), name)
problems.append(((app, name), (dapp, dname)))
return problems
def migrate_app(app, target_name=None, resolve_mode=None, fake=False, db_dry_run=False, yes=False, silent=False, load_inital_data=False, skip=False):
app_name = get_app_name(app)
db.debug = not silent
# If any of their app names in the DB contain a ., they're 0.2 or below, so migrate em
longuns = MigrationHistory.objects.filter(app_name__contains=".")
if longuns:
for mh in longuns:
mh.app_name = short_from_long(mh.app_name)
mh.save()
if not silent:
print "- Updated your South 0.2 database."
# Find out what delightful migrations we have
tree = dependency_tree()
migrations = get_migration_names(app)
# If there aren't any, quit quizically
if not migrations:
if not silent:
print "? You have no migrations for the '%s' app. You might want some." % app_name
return
if target_name not in migrations and target_name not in ["zero", None]:
matches = [x for x in migrations if x.startswith(target_name)]
if len(matches) == 1:
target = migrations.index(matches[0]) + 1
if not silent:
print " - Soft matched migration %s to %s." % (
target_name,
matches[0]
)
target_name = matches[0]
elif len(matches) > 1:
if not silent:
print " - Prefix %s matches more than one migration:" % target_name
print " " + "\n ".join(matches)
return
else:
if not silent:
print " ! '%s' is not a migration." % target_name
return
# Check there's no strange ones in the database
ghost_migrations = []
for m in MigrationHistory.objects.filter(applied__isnull = False):
try:
if get_app(m.app_name) not in tree or m.migration not in tree[get_app(m.app_name)]:
ghost_migrations.append(m)
except ImproperlyConfigured:
pass
if ghost_migrations:
if not silent:
print " ! These migrations are in the database but not on disk:"
print " - " + "\n - ".join(["%s: %s" % (x.app_name, x.migration) for x in ghost_migrations])
print " ! I'm not trusting myself; fix this yourself by fiddling"
print " ! with the south_migrationhistory table."
return
# Say what we're doing
if not silent:
print "Running migrations for %s:" % app_name
# Get the forwards and reverse dependencies for this target
if target_name == None:
target_name = migrations[-1]
if target_name == "zero":
forwards = []
backwards = needed_before_backwards(tree, app, migrations[0]) + [(app, migrations[0])]
else:
forwards = needed_before_forwards(tree, app, target_name) + [(app, target_name)]
# When migrating backwards we want to remove up to and including
# the next migration up in this app (not the next one, that includes other apps)
try:
migration_before_here = migrations[migrations.index(target_name)+1]
backwards = needed_before_backwards(tree, app, migration_before_here) + [(app, migration_before_here)]
except IndexError:
backwards = []
# Get the list of currently applied migrations from the db
current_migrations = []
for m in MigrationHistory.objects.filter(applied__isnull = False):
try:
current_migrations.append((get_app(m.app_name), m.migration))
except ImproperlyConfigured:
pass
direction = None
bad = False
# Work out the direction
applied_for_this_app = list(MigrationHistory.objects.filter(app_name=app_name, applied__isnull=False).order_by("migration"))
if target_name == "zero":
direction = -1
elif not applied_for_this_app:
direction = 1
elif migrations.index(target_name) > migrations.index(applied_for_this_app[-1].migration):
direction = 1
elif migrations.index(target_name) < migrations.index(applied_for_this_app[-1].migration):
direction = -1
else:
direction = None
# Is the whole forward branch applied?
missing = [step for step in forwards if step not in current_migrations]
# If they're all applied, we only know it's not backwards
if not missing:
direction = None
# If the remaining migrations are strictly a right segment of the forwards
# trace, we just need to go forwards to our target (and check for badness)
else:
problems = forwards_problems(tree, forwards, current_migrations, silent=silent)
if problems:
bad = True
direction = 1
# What about the whole backward trace then?
if not bad:
missing = [step for step in backwards if step not in current_migrations]
# If they're all missing, stick with the forwards decision
if missing == backwards:
pass
# If what's missing is a strict left segment of backwards (i.e.
# all the higher migrations) then we need to go backwards
else:
problems = backwards_problems(tree, backwards, current_migrations, silent=silent)
if problems:
bad = True
direction = -1
if bad and resolve_mode not in ['merge'] and not skip:
if not silent:
print " ! Inconsistent migration history"
print " ! The following options are available:"
print " --merge: will just attempt the migration ignoring any potential dependency conflicts."
sys.exit(1)
if direction == 1:
if not silent:
print " - Migrating forwards to %s." % target_name
try:
for mapp, mname in forwards:
if (mapp, mname) not in current_migrations:
result = run_forwards(mapp, [mname], fake=fake, db_dry_run=db_dry_run, silent=silent)
if result is False: # The migrations errored, but nicely.
return False
finally:
# Call any pending post_syncdb signals
db.send_pending_create_signals()
# Now load initial data, only if we're really doing things and ended up at current
if not fake and not db_dry_run and load_inital_data and target_name == migrations[-1]:
print " - Loading initial data for %s." % app_name
# Override Django's get_apps call temporarily to only load from the
# current app
old_get_apps, models.get_apps = (
models.get_apps,
lambda: [models.get_app(get_app_name(app))],
)
# Load the initial fixture
call_command('loaddata', 'initial_data', verbosity=1)
# Un-override
models.get_apps = old_get_apps
elif direction == -1:
if not silent:
print " - Migrating backwards to just after %s." % target_name
for mapp, mname in backwards:
if (mapp, mname) in current_migrations:
run_backwards(mapp, [mname], fake=fake, db_dry_run=db_dry_run, silent=silent)
else:
if not silent:
print "- Nothing to migrate."
|
syci/ingadhoc-odoo-addons
|
refs/heads/8.0
|
product_salesman_group/sale.py
|
2
|
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
class salesman_group(osv.osv):
_name = "sale.salesman.group"
_description = "Salesman Group"
_order = "name"
_constraints = [
(osv.osv._check_recursion,
'Error ! You cannot create recursive categories.', ['parent_id'])
]
def name_get(self, cr, uid, ids, context=None):
if not len(ids):
return []
reads = self.read(cr, uid, ids, ['name', 'parent_id'], context=context)
res = []
for record in reads:
name = record['name']
if record['parent_id']:
name = record['parent_id'][1] + ' / ' + name
res.append((record['id'], name))
return res
def _name_get_fnc(self, cr, uid, ids, prop, unknow_none, context=None):
res = self.name_get(cr, uid, ids, context=context)
return dict(res)
_columns = {
'name': fields.char('Name', required=True, translate=True),
'complete_name': fields.function(
_name_get_fnc, type="char", string='Name'),
'parent_id': fields.many2one(
'sale.salesman.group', 'Parent Group', select=True),
'child_id': fields.one2many(
'sale.salesman.group', 'parent_id', string='Children Groups'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mwiebe/numpy
|
refs/heads/master
|
doc/source/conf.py
|
63
|
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
import sys, os, re
# Check Sphinx version
import sphinx
if sphinx.__version__ < "1.0.1":
raise RuntimeError("Sphinx 1.0.1 or newer required")
needs_sphinx = '1.0'
# -----------------------------------------------------------------------------
# General configuration
# -----------------------------------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
sys.path.insert(0, os.path.abspath('../sphinxext'))
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.pngmath', 'numpydoc',
'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
'sphinx.ext.doctest', 'sphinx.ext.autosummary',
'matplotlib.sphinxext.plot_directive']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# General substitutions.
project = 'NumPy'
copyright = '2008-2009, The Scipy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
import numpy
# The short X.Y version (including .devXXXX, rcX, b1 suffixes if present)
version = re.sub(r'(\d+\.\d+)\.\d+(.*)', r'\1\2', numpy.__version__)
version = re.sub(r'(\.dev\d+).*?$', r'\1', version)
# The full version, including alpha/beta/rc tags.
release = numpy.__version__
print("%s %s" % (version, release))
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = "autolink"
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -----------------------------------------------------------------------------
# HTML output
# -----------------------------------------------------------------------------
themedir = os.path.join(os.pardir, 'scipy-sphinx-theme', '_theme')
if not os.path.isdir(themedir):
raise RuntimeError("Get the scipy-sphinx-theme first, "
"via git submodule init && git submodule update")
html_theme = 'scipy'
html_theme_path = [themedir]
if 'scipyorg' in tags:
# Build for the scipy.org website
html_theme_options = {
"edit_link": True,
"sidebar": "right",
"scipy_org_logo": True,
"rootlinks": [("http://scipy.org/", "Scipy.org"),
("http://docs.scipy.org/", "Docs")]
}
else:
# Default build
html_theme_options = {
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
"rootlinks": []
}
html_sidebars = {'index': 'indexsidebar.html'}
html_additional_pages = {
'index': 'indexcontent.html',
}
html_title = "%s v%s Manual" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
html_use_modindex = True
html_copy_source = False
html_domain_indices = False
html_file_suffix = '.html'
htmlhelp_basename = 'numpy'
pngmath_use_preview = True
pngmath_dvipng_args = ['-gamma', '1.5', '-D', '96', '-bg', 'Transparent']
# -----------------------------------------------------------------------------
# LaTeX output
# -----------------------------------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
_stdauthor = 'Written by the NumPy community'
latex_documents = [
('reference/index', 'numpy-ref.tex', 'NumPy Reference',
_stdauthor, 'manual'),
('user/index', 'numpy-user.tex', 'NumPy User Guide',
_stdauthor, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
\DeclareUnicodeCharacter{00A0}{\nobreakspace}
% In the parameters section, place a newline after the Parameters
% header
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% Make Examples/etc section headers smaller and more compact
\makeatletter
\titleformat{\paragraph}{\normalsize\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
\titlespacing*{\paragraph}{0pt}{1ex}{0pt}
\makeatother
% Fix footer/header
\renewcommand{\chaptermark}[1]{\markboth{\MakeUppercase{\thechapter.\ #1}}{}}
\renewcommand{\sectionmark}[1]{\markright{\MakeUppercase{\thesection.\ #1}}}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
# -----------------------------------------------------------------------------
# Texinfo output
# -----------------------------------------------------------------------------
texinfo_documents = [
("contents", 'numpy', 'Numpy Documentation', _stdauthor, 'Numpy',
"NumPy: array processing for numbers, strings, records, and objects.",
'Programming',
1),
]
# -----------------------------------------------------------------------------
# Intersphinx configuration
# -----------------------------------------------------------------------------
intersphinx_mapping = {'http://docs.python.org/dev': None}
# -----------------------------------------------------------------------------
# Numpy extensions
# -----------------------------------------------------------------------------
# If we want to do a phantom import from an XML file for all autodocs
phantom_import_file = 'dump.xml'
# Make numpydoc to generate plots for example sections
numpydoc_use_plots = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
import glob
autosummary_generate = glob.glob("reference/*.rst")
# -----------------------------------------------------------------------------
# Coverage checker
# -----------------------------------------------------------------------------
coverage_ignore_modules = r"""
""".split()
coverage_ignore_functions = r"""
test($|_) (some|all)true bitwise_not cumproduct pkgload
generic\.
""".split()
coverage_ignore_classes = r"""
""".split()
coverage_c_path = []
coverage_c_regexes = {}
coverage_ignore_c_items = {}
# -----------------------------------------------------------------------------
# Plots
# -----------------------------------------------------------------------------
plot_pre_code = """
import numpy as np
np.random.seed(0)
"""
plot_include_source = True
plot_formats = [('png', 100), 'pdf']
import math
phi = (math.sqrt(5) + 1)/2
plot_rcparams = {
'font.size': 8,
'axes.titlesize': 8,
'axes.labelsize': 8,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 8,
'figure.figsize': (3*phi, 3),
'figure.subplot.bottom': 0.2,
'figure.subplot.left': 0.2,
'figure.subplot.right': 0.9,
'figure.subplot.top': 0.85,
'figure.subplot.wspace': 0.4,
'text.usetex': False,
}
# -----------------------------------------------------------------------------
# Source code links
# -----------------------------------------------------------------------------
import inspect
from os.path import relpath, dirname
for name in ['sphinx.ext.linkcode', 'numpydoc.linkcode']:
try:
__import__(name)
extensions.append(name)
break
except ImportError:
pass
else:
print("NOTE: linkcode extension not found -- no links to source generated")
def linkcode_resolve(domain, info):
"""
Determine the URL corresponding to Python object
"""
if domain != 'py':
return None
modname = info['module']
fullname = info['fullname']
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split('.'):
try:
obj = getattr(obj, part)
except:
return None
try:
fn = inspect.getsourcefile(obj)
except:
fn = None
if not fn:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except:
lineno = None
if lineno:
linespec = "#L%d-L%d" % (lineno, lineno + len(source) - 1)
else:
linespec = ""
fn = relpath(fn, start=dirname(numpy.__file__))
if 'dev' in numpy.__version__:
return "http://github.com/numpy/numpy/blob/master/numpy/%s%s" % (
fn, linespec)
else:
return "http://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
numpy.__version__, fn, linespec)
|
torchmed/torchmed
|
refs/heads/master
|
contact/models.py
|
10644
|
from django.db import models
# Create your models here.
|
eworm-de/pacman
|
refs/heads/master
|
test/pacman/tests/remove-recursive-cycle.py
|
5
|
self.description = "Recursively remove a package with cyclical dependencies"
lpkg1 = pmpkg('pkg1')
self.addpkg2db('local', lpkg1)
lpkg1.depends = [ 'dep1' ]
lpkg2 = pmpkg('pkg2')
self.addpkg2db('local', lpkg2)
lpkg2.depends = [ 'dep3' ]
# cyclic dependency 1
ldep1 = pmpkg('dep1')
self.addpkg2db('local', ldep1)
ldep1.depends = [ 'dep2', 'dep3', 'dep4' ]
ldep1.reason = 1
# cyclic dependency 2
ldep2 = pmpkg('dep2')
self.addpkg2db('local', ldep2)
ldep2.depends = [ 'dep1' ]
ldep2.reason = 1
# dependency required by another package
ldep3 = pmpkg('dep3')
self.addpkg2db('local', ldep3)
ldep3.reason = 1
# explicitly installed dependency
ldep4 = pmpkg('dep4')
self.addpkg2db('local', ldep4)
ldep4.reason = 0
self.args = "-Rs pkg1"
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=pkg2")
self.addrule("PKG_EXIST=dep3")
self.addrule("PKG_EXIST=dep4")
self.addrule("!PKG_EXIST=pkg1")
self.addrule("!PKG_EXIST=dep1")
self.addrule("!PKG_EXIST=dep2")
|
bmander/dancecontraption
|
refs/heads/master
|
django/contrib/contenttypes/management.py
|
315
|
from django.contrib.contenttypes.models import ContentType
from django.db.models import get_apps, get_models, signals
from django.utils.encoding import smart_unicode
def update_contenttypes(app, created_models, verbosity=2, **kwargs):
"""
Creates content types for models in the given app, removing any model
entries that no longer have a matching model class.
"""
ContentType.objects.clear_cache()
content_types = list(ContentType.objects.filter(app_label=app.__name__.split('.')[-2]))
app_models = get_models(app)
if not app_models:
return
for klass in app_models:
opts = klass._meta
try:
ct = ContentType.objects.get(app_label=opts.app_label,
model=opts.object_name.lower())
content_types.remove(ct)
except ContentType.DoesNotExist:
ct = ContentType(name=smart_unicode(opts.verbose_name_raw),
app_label=opts.app_label, model=opts.object_name.lower())
ct.save()
if verbosity >= 2:
print "Adding content type '%s | %s'" % (ct.app_label, ct.model)
# The presence of any remaining content types means the supplied app has an
# undefined model. Confirm that the content type is stale before deletion.
if content_types:
if kwargs.get('interactive', False):
content_type_display = '\n'.join([' %s | %s' % (ct.app_label, ct.model) for ct in content_types])
ok_to_delete = raw_input("""The following content types are stale and need to be deleted:
%s
Any objects related to these content types by a foreign key will also
be deleted. Are you sure you want to delete these content types?
If you're unsure, answer 'no'.
Type 'yes' to continue, or 'no' to cancel: """ % content_type_display)
else:
ok_to_delete = False
if ok_to_delete == 'yes':
for ct in content_types:
if verbosity >= 2:
print "Deleting stale content type '%s | %s'" % (ct.app_label, ct.model)
ct.delete()
else:
if verbosity >= 2:
print "Stale content types remain."
def update_all_contenttypes(verbosity=2, **kwargs):
for app in get_apps():
update_contenttypes(app, None, verbosity, **kwargs)
signals.post_syncdb.connect(update_contenttypes)
if __name__ == "__main__":
update_all_contenttypes()
|
jlspyaozhongkai/Uter
|
refs/heads/master
|
third_party_backup/Python-2.7.9/Lib/abc.py
|
488
|
# Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
import types
from _weakrefset import WeakSet
# Instance of old-style class
class _C: pass
_InstanceType = type(_C())
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
"""A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C:
__metaclass__ = ABCMeta
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = set(name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False))
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(subclass, (type, types.ClassType)):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__)
print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print >> file, "%s: %r" % (name, value)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking when it's simple.
subclass = getattr(instance, '__class__', None)
if subclass is not None and subclass in cls._abc_cache:
return True
subtype = type(instance)
# Old-style instances
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subtype in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or
cls.__subclasscheck__(subtype))
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
|
leafclick/intellij-community
|
refs/heads/master
|
python/testData/completion/className/orderingUnderscoreInPath/main.py
|
72
|
fo<caret>o
|
KDB2/veusz
|
refs/heads/master
|
veusz/setting/__init__.py
|
9
|
# Copyright (C) 2005 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
from .settingdb import *
from .reference import Reference, ReferenceMultiple
from .setting import *
from .settings import *
from .collections import *
from .stylesheet import *
|
vabs22/zulip
|
refs/heads/master
|
analytics/lib/counts.py
|
4
|
from django.conf import settings
from django.db import connection, models
from django.db.models import F
from analytics.models import InstallationCount, RealmCount, \
UserCount, StreamCount, BaseCount, FillState, Anomaly, installation_epoch, \
last_successful_fill
from zerver.models import Realm, UserProfile, Message, Stream, \
UserActivityInterval, RealmAuditLog, models
from zerver.lib.timestamp import floor_to_day, floor_to_hour, ceiling_to_day, \
ceiling_to_hour
from typing import Any, Callable, Dict, List, Optional, Text, Tuple, Type, Union
from collections import defaultdict, OrderedDict
from datetime import timedelta, datetime
import logging
import time
## Logging setup ##
log_format = '%(asctime)s %(levelname)-8s %(message)s'
logging.basicConfig(format=log_format)
formatter = logging.Formatter(log_format)
file_handler = logging.FileHandler(settings.ANALYTICS_LOG_PATH)
file_handler.setFormatter(formatter)
logger = logging.getLogger("zulip.management")
logger.setLevel(logging.INFO)
logger.addHandler(file_handler)
# You can't subtract timedelta.max from a datetime, so use this instead
TIMEDELTA_MAX = timedelta(days=365*1000)
## Class definitions ##
class CountStat(object):
HOUR = 'hour'
DAY = 'day'
FREQUENCIES = frozenset([HOUR, DAY])
def __init__(self, property, data_collector, frequency, interval=None):
# type: (str, DataCollector, str, Optional[timedelta]) -> None
self.property = property
self.data_collector = data_collector
# might have to do something different for bitfields
if frequency not in self.FREQUENCIES:
raise AssertionError("Unknown frequency: %s" % (frequency,))
self.frequency = frequency
if interval is not None:
self.interval = interval
elif frequency == CountStat.HOUR:
self.interval = timedelta(hours=1)
else: # frequency == CountStat.DAY
self.interval = timedelta(days=1)
def __unicode__(self):
# type: () -> Text
return u"<CountStat: %s>" % (self.property,)
class LoggingCountStat(CountStat):
def __init__(self, property, output_table, frequency):
# type: (str, Type[BaseCount], str) -> None
CountStat.__init__(self, property, DataCollector(output_table, None), frequency)
class DependentCountStat(CountStat):
def __init__(self, property, data_collector, frequency, interval=None, dependencies=[]):
# type: (str, DataCollector, str, Optional[timedelta], List[str]) -> None
CountStat.__init__(self, property, data_collector, frequency, interval=interval)
self.dependencies = dependencies
class DataCollector(object):
def __init__(self, output_table, pull_function):
# type: (Type[BaseCount], Optional[Callable[[str, datetime, datetime], int]]) -> None
self.output_table = output_table
self.pull_function = pull_function
## CountStat-level operations ##
def process_count_stat(stat, fill_to_time):
# type: (CountStat, datetime) -> None
if stat.frequency == CountStat.HOUR:
time_increment = timedelta(hours=1)
elif stat.frequency == CountStat.DAY:
time_increment = timedelta(days=1)
else:
raise AssertionError("Unknown frequency: %s" % (stat.frequency,))
if floor_to_hour(fill_to_time) != fill_to_time:
raise ValueError("fill_to_time must be on an hour boundary: %s" % (fill_to_time,))
if fill_to_time.tzinfo is None:
raise ValueError("fill_to_time must be timezone aware: %s" % (fill_to_time,))
fill_state = FillState.objects.filter(property=stat.property).first()
if fill_state is None:
currently_filled = installation_epoch()
fill_state = FillState.objects.create(property=stat.property,
end_time=currently_filled,
state=FillState.DONE)
logger.info("INITIALIZED %s %s" % (stat.property, currently_filled))
elif fill_state.state == FillState.STARTED:
logger.info("UNDO START %s %s" % (stat.property, fill_state.end_time))
do_delete_counts_at_hour(stat, fill_state.end_time)
currently_filled = fill_state.end_time - time_increment
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
logger.info("UNDO DONE %s" % (stat.property,))
elif fill_state.state == FillState.DONE:
currently_filled = fill_state.end_time
else:
raise AssertionError("Unknown value for FillState.state: %s." % (fill_state.state,))
if isinstance(stat, DependentCountStat):
for dependency in stat.dependencies:
dependency_fill_time = last_successful_fill(dependency)
if dependency_fill_time is None:
logger.warning("DependentCountStat %s run before dependency %s." %
(stat.property, dependency))
return
fill_to_time = min(fill_to_time, dependency_fill_time)
currently_filled = currently_filled + time_increment
while currently_filled <= fill_to_time:
logger.info("START %s %s" % (stat.property, currently_filled))
start = time.time()
do_update_fill_state(fill_state, currently_filled, FillState.STARTED)
do_fill_count_stat_at_hour(stat, currently_filled)
do_update_fill_state(fill_state, currently_filled, FillState.DONE)
end = time.time()
currently_filled = currently_filled + time_increment
logger.info("DONE %s (%dms)" % (stat.property, (end-start)*1000))
def do_update_fill_state(fill_state, end_time, state):
# type: (FillState, datetime, int) -> None
fill_state.end_time = end_time
fill_state.state = state
fill_state.save()
# We assume end_time is valid (e.g. is on a day or hour boundary as appropriate)
# and is timezone aware. It is the caller's responsibility to enforce this!
def do_fill_count_stat_at_hour(stat, end_time):
# type: (CountStat, datetime) -> None
start_time = end_time - stat.interval
if not isinstance(stat, LoggingCountStat):
timer = time.time()
assert(stat.data_collector.pull_function is not None)
rows_added = stat.data_collector.pull_function(stat.property, start_time, end_time)
logger.info("%s run pull_function (%dms/%sr)" %
(stat.property, (time.time()-timer)*1000, rows_added))
do_aggregate_to_summary_table(stat, end_time)
def do_delete_counts_at_hour(stat, end_time):
# type: (CountStat, datetime) -> None
if isinstance(stat, LoggingCountStat):
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
if stat.data_collector.output_table in [UserCount, StreamCount]:
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
else:
UserCount.objects.filter(property=stat.property, end_time=end_time).delete()
StreamCount.objects.filter(property=stat.property, end_time=end_time).delete()
RealmCount.objects.filter(property=stat.property, end_time=end_time).delete()
InstallationCount.objects.filter(property=stat.property, end_time=end_time).delete()
def do_aggregate_to_summary_table(stat, end_time):
# type: (CountStat, datetime) -> None
cursor = connection.cursor()
# Aggregate into RealmCount
output_table = stat.data_collector.output_table
if output_table in (UserCount, StreamCount):
realmcount_query = """
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, COALESCE(sum(%(output_table)s.value), 0), '%(property)s',
%(output_table)s.subgroup, %%(end_time)s
FROM zerver_realm
JOIN %(output_table)s
ON
zerver_realm.id = %(output_table)s.realm_id
WHERE
%(output_table)s.property = '%(property)s' AND
%(output_table)s.end_time = %%(end_time)s
GROUP BY zerver_realm.id, %(output_table)s.subgroup
""" % {'output_table': output_table._meta.db_table,
'property': stat.property}
start = time.time()
cursor.execute(realmcount_query, {'end_time': end_time})
end = time.time()
logger.info("%s RealmCount aggregation (%dms/%sr)" % (stat.property, (end-start)*1000, cursor.rowcount))
# Aggregate into InstallationCount
installationcount_query = """
INSERT INTO analytics_installationcount
(value, property, subgroup, end_time)
SELECT
sum(value), '%(property)s', analytics_realmcount.subgroup, %%(end_time)s
FROM analytics_realmcount
WHERE
property = '%(property)s' AND
end_time = %%(end_time)s
GROUP BY analytics_realmcount.subgroup
""" % {'property': stat.property}
start = time.time()
cursor.execute(installationcount_query, {'end_time': end_time})
end = time.time()
logger.info("%s InstallationCount aggregation (%dms/%sr)" % (stat.property, (end-start)*1000, cursor.rowcount))
cursor.close()
## Utility functions called from outside counts.py ##
# called from zerver/lib/actions.py; should not throw any errors
def do_increment_logging_stat(zerver_object, stat, subgroup, event_time, increment=1):
# type: (Union[Realm, UserProfile, Stream], CountStat, Optional[Union[str, int, bool]], datetime, int) -> None
table = stat.data_collector.output_table
if table == RealmCount:
id_args = {'realm': zerver_object}
elif table == UserCount:
id_args = {'realm': zerver_object.realm, 'user': zerver_object}
else: # StreamCount
id_args = {'realm': zerver_object.realm, 'stream': zerver_object}
if stat.frequency == CountStat.DAY:
end_time = ceiling_to_day(event_time)
else: # CountStat.HOUR:
end_time = ceiling_to_hour(event_time)
row, created = table.objects.get_or_create(
property=stat.property, subgroup=subgroup, end_time=end_time,
defaults={'value': increment}, **id_args)
if not created:
row.value = F('value') + increment
row.save(update_fields=['value'])
def do_drop_all_analytics_tables():
# type: () -> None
UserCount.objects.all().delete()
StreamCount.objects.all().delete()
RealmCount.objects.all().delete()
InstallationCount.objects.all().delete()
FillState.objects.all().delete()
Anomaly.objects.all().delete()
## DataCollector-level operations ##
def do_pull_by_sql_query(property, start_time, end_time, query, group_by):
# type: (str, datetime, datetime, str, Optional[Tuple[models.Model, str]]) -> int
if group_by is None:
subgroup = 'NULL'
group_by_clause = ''
else:
subgroup = '%s.%s' % (group_by[0]._meta.db_table, group_by[1])
group_by_clause = ', ' + subgroup
# We do string replacement here because cursor.execute will reject a
# group_by_clause given as a param.
# We pass in the datetimes as params to cursor.execute so that we don't have to
# think about how to convert python datetimes to SQL datetimes.
query_ = query % {'property': property, 'subgroup': subgroup,
'group_by_clause': group_by_clause}
cursor = connection.cursor()
cursor.execute(query_, {'time_start': start_time, 'time_end': end_time})
rowcount = cursor.rowcount
cursor.close()
return rowcount
def sql_data_collector(output_table, query, group_by):
# type: (Type[BaseCount], str, Optional[Tuple[models.Model, str]]) -> DataCollector
def pull_function(property, start_time, end_time):
# type: (str, datetime, datetime) -> int
return do_pull_by_sql_query(property, start_time, end_time, query, group_by)
return DataCollector(output_table, pull_function)
def do_pull_minutes_active(property, start_time, end_time):
# type: (str, datetime, datetime) -> int
user_activity_intervals = UserActivityInterval.objects.filter(
end__gt=start_time, start__lt=end_time
).select_related(
'user_profile'
).values_list(
'user_profile_id', 'user_profile__realm_id', 'start', 'end')
seconds_active = defaultdict(float) # type: Dict[Tuple[int, int], float]
for user_id, realm_id, interval_start, interval_end in user_activity_intervals:
start = max(start_time, interval_start)
end = min(end_time, interval_end)
seconds_active[(user_id, realm_id)] += (end - start).total_seconds()
rows = [UserCount(user_id=ids[0], realm_id=ids[1], property=property,
end_time=end_time, value=int(seconds // 60))
for ids, seconds in seconds_active.items() if seconds >= 60]
UserCount.objects.bulk_create(rows)
return len(rows)
count_message_by_user_query = """
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_userprofile.id, zerver_userprofile.realm_id, count(*), '%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_userprofile
JOIN zerver_message
ON
zerver_userprofile.id = zerver_message.sender_id
WHERE
zerver_userprofile.date_joined < %%(time_end)s AND
zerver_message.pub_date >= %%(time_start)s AND
zerver_message.pub_date < %%(time_end)s
GROUP BY zerver_userprofile.id %(group_by_clause)s
"""
# Note: ignores the group_by / group_by_clause.
count_message_type_by_user_query = """
INSERT INTO analytics_usercount
(realm_id, user_id, value, property, subgroup, end_time)
SELECT realm_id, id, SUM(count) AS value, '%(property)s', message_type, %%(time_end)s
FROM
(
SELECT zerver_userprofile.realm_id, zerver_userprofile.id, count(*),
CASE WHEN
zerver_recipient.type = 1 THEN 'private_message'
WHEN
zerver_recipient.type = 3 THEN 'huddle_message'
WHEN
zerver_stream.invite_only = TRUE THEN 'private_stream'
ELSE 'public_stream'
END
message_type
FROM zerver_userprofile
JOIN zerver_message
ON
zerver_userprofile.id = zerver_message.sender_id AND
zerver_message.pub_date >= %%(time_start)s AND
zerver_message.pub_date < %%(time_end)s
JOIN zerver_recipient
ON
zerver_message.recipient_id = zerver_recipient.id
LEFT JOIN zerver_stream
ON
zerver_recipient.type_id = zerver_stream.id
GROUP BY zerver_userprofile.realm_id, zerver_userprofile.id, zerver_recipient.type, zerver_stream.invite_only
) AS subquery
GROUP BY realm_id, id, message_type
"""
# This query joins to the UserProfile table since all current queries that
# use this also subgroup on UserProfile.is_bot. If in the future there is a
# stat that counts messages by stream and doesn't need the UserProfile
# table, consider writing a new query for efficiency.
count_message_by_stream_query = """
INSERT INTO analytics_streamcount
(stream_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_stream.id, zerver_stream.realm_id, count(*), '%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_stream
JOIN zerver_recipient
ON
zerver_stream.id = zerver_recipient.type_id
JOIN zerver_message
ON
zerver_recipient.id = zerver_message.recipient_id
JOIN zerver_userprofile
ON
zerver_message.sender_id = zerver_userprofile.id
WHERE
zerver_stream.date_created < %%(time_end)s AND
zerver_recipient.type = 2 AND
zerver_message.pub_date >= %%(time_start)s AND
zerver_message.pub_date < %%(time_end)s
GROUP BY zerver_stream.id %(group_by_clause)s
"""
# Hardcodes the query needed by active_users:is_bot:day, since that is
# currently the only stat that uses this.
count_user_by_realm_query = """
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, count(*),'%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_realm
JOIN zerver_userprofile
ON
zerver_realm.id = zerver_userprofile.realm_id
WHERE
zerver_realm.date_created < %%(time_end)s AND
zerver_userprofile.date_joined >= %%(time_start)s AND
zerver_userprofile.date_joined < %%(time_end)s AND
zerver_userprofile.is_active = TRUE
GROUP BY zerver_realm.id %(group_by_clause)s
"""
# Currently hardcodes the query needed for active_users_audit:is_bot:day.
# Assumes that a user cannot have two RealmAuditLog entries with the same event_time and
# event_type in ['user_created', 'user_deactivated', etc].
# In particular, it's important to ensure that migrations don't cause that to happen.
check_realmauditlog_by_user_query = """
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
ral1.modified_user_id, ral1.realm_id, 1, '%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_realmauditlog ral1
JOIN (
SELECT modified_user_id, max(event_time) AS max_event_time
FROM zerver_realmauditlog
WHERE
event_type in ('user_created', 'user_deactivated', 'user_activated', 'user_reactivated') AND
event_time < %%(time_end)s
GROUP BY modified_user_id
) ral2
ON
ral1.event_time = max_event_time AND
ral1.modified_user_id = ral2.modified_user_id
JOIN zerver_userprofile
ON
ral1.modified_user_id = zerver_userprofile.id
WHERE
ral1.event_type in ('user_created', 'user_activated', 'user_reactivated')
"""
check_useractivityinterval_by_user_query = """
INSERT INTO analytics_usercount
(user_id, realm_id, value, property, subgroup, end_time)
SELECT
zerver_userprofile.id, zerver_userprofile.realm_id, 1, '%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_userprofile
JOIN zerver_useractivityinterval
ON
zerver_userprofile.id = zerver_useractivityinterval.user_profile_id
WHERE
zerver_useractivityinterval.end >= %%(time_start)s AND
zerver_useractivityinterval.start < %%(time_end)s
GROUP BY zerver_userprofile.id %(group_by_clause)s
"""
count_realm_active_humans_query = """
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
usercount1.realm_id, count(*), '%(property)s', NULL, %%(time_end)s
FROM (
SELECT realm_id, user_id
FROM analytics_usercount
WHERE
property = 'active_users_audit:is_bot:day' AND
subgroup = 'false' AND
end_time = %%(time_end)s
) usercount1
JOIN (
SELECT realm_id, user_id
FROM analytics_usercount
WHERE
property = '15day_actives::day' AND
end_time = %%(time_end)s
) usercount2
ON
usercount1.user_id = usercount2.user_id
GROUP BY usercount1.realm_id
"""
# Currently unused and untested
count_stream_by_realm_query = """
INSERT INTO analytics_realmcount
(realm_id, value, property, subgroup, end_time)
SELECT
zerver_realm.id, count(*), '%(property)s', %(subgroup)s, %%(time_end)s
FROM zerver_realm
JOIN zerver_stream
ON
zerver_realm.id = zerver_stream.realm_id AND
WHERE
zerver_realm.date_created < %%(time_end)s AND
zerver_stream.date_created >= %%(time_start)s AND
zerver_stream.date_created < %%(time_end)s
GROUP BY zerver_realm.id %(group_by_clause)s
"""
## CountStat declarations ##
count_stats_ = [
# Messages Sent stats
# Stats that count the number of messages sent in various ways.
# These are also the set of stats that read from the Message table.
CountStat('messages_sent:is_bot:hour',
sql_data_collector(UserCount, count_message_by_user_query, (UserProfile, 'is_bot')),
CountStat.HOUR),
CountStat('messages_sent:message_type:day',
sql_data_collector(UserCount, count_message_type_by_user_query, None), CountStat.DAY),
CountStat('messages_sent:client:day',
sql_data_collector(UserCount, count_message_by_user_query, (Message, 'sending_client_id')),
CountStat.DAY),
CountStat('messages_in_stream:is_bot:day',
sql_data_collector(StreamCount, count_message_by_stream_query, (UserProfile, 'is_bot')),
CountStat.DAY),
# Number of Users stats
# Stats that count the number of active users in the UserProfile.is_active sense.
# 'active_users_audit:is_bot:day' is the canonical record of which users were
# active on which days (in the UserProfile.is_active sense).
# Important that this stay a daily stat, so that 'realm_active_humans::day' works as expected.
CountStat('active_users_audit:is_bot:day',
sql_data_collector(UserCount, check_realmauditlog_by_user_query, (UserProfile, 'is_bot')),
CountStat.DAY),
# Sanity check on 'active_users_audit:is_bot:day', and a archetype for future LoggingCountStats.
# In RealmCount, 'active_users_audit:is_bot:day' should be the partial
# sum sequence of 'active_users_log:is_bot:day', for any realm that
# started after the latter stat was introduced.
LoggingCountStat('active_users_log:is_bot:day', RealmCount, CountStat.DAY),
# Another sanity check on 'active_users_audit:is_bot:day'. Is only an
# approximation, e.g. if a user is deactivated between the end of the
# day and when this stat is run, they won't be counted. However, is the
# simplest of the three to inspect by hand.
CountStat('active_users:is_bot:day',
sql_data_collector(RealmCount, count_user_by_realm_query, (UserProfile, 'is_bot')),
CountStat.DAY, interval=TIMEDELTA_MAX),
# User Activity stats
# Stats that measure user activity in the UserActivityInterval sense.
CountStat('15day_actives::day',
sql_data_collector(UserCount, check_useractivityinterval_by_user_query, None),
CountStat.DAY, interval=timedelta(days=15)-UserActivityInterval.MIN_INTERVAL_LENGTH),
CountStat('minutes_active::day', DataCollector(UserCount, do_pull_minutes_active), CountStat.DAY),
# Dependent stats
# Must come after their dependencies.
# Canonical account of the number of active humans in a realm on each day.
DependentCountStat('realm_active_humans::day',
sql_data_collector(RealmCount, count_realm_active_humans_query, None),
CountStat.DAY,
dependencies=['active_users_audit:is_bot:day', '15day_actives::day'])
]
COUNT_STATS = OrderedDict([(stat.property, stat) for stat in count_stats_])
|
XiaosongWei/chromium-crosswalk
|
refs/heads/master
|
third_party/protobuf/python/google/protobuf/internal/wire_format.py
|
561
|
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Constants and static functions to support protocol buffer wire format."""
__author__ = 'robinson@google.com (Will Robinson)'
import struct
from google.protobuf import descriptor
from google.protobuf import message
TAG_TYPE_BITS = 3 # Number of bits used to hold type info in a proto tag.
TAG_TYPE_MASK = (1 << TAG_TYPE_BITS) - 1 # 0x7
# These numbers identify the wire type of a protocol buffer value.
# We use the least-significant TAG_TYPE_BITS bits of the varint-encoded
# tag-and-type to store one of these WIRETYPE_* constants.
# These values must match WireType enum in google/protobuf/wire_format.h.
WIRETYPE_VARINT = 0
WIRETYPE_FIXED64 = 1
WIRETYPE_LENGTH_DELIMITED = 2
WIRETYPE_START_GROUP = 3
WIRETYPE_END_GROUP = 4
WIRETYPE_FIXED32 = 5
_WIRETYPE_MAX = 5
# Bounds for various integer types.
INT32_MAX = int((1 << 31) - 1)
INT32_MIN = int(-(1 << 31))
UINT32_MAX = (1 << 32) - 1
INT64_MAX = (1 << 63) - 1
INT64_MIN = -(1 << 63)
UINT64_MAX = (1 << 64) - 1
# "struct" format strings that will encode/decode the specified formats.
FORMAT_UINT32_LITTLE_ENDIAN = '<I'
FORMAT_UINT64_LITTLE_ENDIAN = '<Q'
FORMAT_FLOAT_LITTLE_ENDIAN = '<f'
FORMAT_DOUBLE_LITTLE_ENDIAN = '<d'
# We'll have to provide alternate implementations of AppendLittleEndian*() on
# any architectures where these checks fail.
if struct.calcsize(FORMAT_UINT32_LITTLE_ENDIAN) != 4:
raise AssertionError('Format "I" is not a 32-bit number.')
if struct.calcsize(FORMAT_UINT64_LITTLE_ENDIAN) != 8:
raise AssertionError('Format "Q" is not a 64-bit number.')
def PackTag(field_number, wire_type):
"""Returns an unsigned 32-bit integer that encodes the field number and
wire type information in standard protocol message wire format.
Args:
field_number: Expected to be an integer in the range [1, 1 << 29)
wire_type: One of the WIRETYPE_* constants.
"""
if not 0 <= wire_type <= _WIRETYPE_MAX:
raise message.EncodeError('Unknown wire type: %d' % wire_type)
return (field_number << TAG_TYPE_BITS) | wire_type
def UnpackTag(tag):
"""The inverse of PackTag(). Given an unsigned 32-bit number,
returns a (field_number, wire_type) tuple.
"""
return (tag >> TAG_TYPE_BITS), (tag & TAG_TYPE_MASK)
def ZigZagEncode(value):
"""ZigZag Transform: Encodes signed integers so that they can be
effectively used with varint encoding. See wire_format.h for
more details.
"""
if value >= 0:
return value << 1
return (value << 1) ^ (~0)
def ZigZagDecode(value):
"""Inverse of ZigZagEncode()."""
if not value & 0x1:
return value >> 1
return (value >> 1) ^ (~0)
# The *ByteSize() functions below return the number of bytes required to
# serialize "field number + type" information and then serialize the value.
def Int32ByteSize(field_number, int32):
return Int64ByteSize(field_number, int32)
def Int32ByteSizeNoTag(int32):
return _VarUInt64ByteSizeNoTag(0xffffffffffffffff & int32)
def Int64ByteSize(field_number, int64):
# Have to convert to uint before calling UInt64ByteSize().
return UInt64ByteSize(field_number, 0xffffffffffffffff & int64)
def UInt32ByteSize(field_number, uint32):
return UInt64ByteSize(field_number, uint32)
def UInt64ByteSize(field_number, uint64):
return TagByteSize(field_number) + _VarUInt64ByteSizeNoTag(uint64)
def SInt32ByteSize(field_number, int32):
return UInt32ByteSize(field_number, ZigZagEncode(int32))
def SInt64ByteSize(field_number, int64):
return UInt64ByteSize(field_number, ZigZagEncode(int64))
def Fixed32ByteSize(field_number, fixed32):
return TagByteSize(field_number) + 4
def Fixed64ByteSize(field_number, fixed64):
return TagByteSize(field_number) + 8
def SFixed32ByteSize(field_number, sfixed32):
return TagByteSize(field_number) + 4
def SFixed64ByteSize(field_number, sfixed64):
return TagByteSize(field_number) + 8
def FloatByteSize(field_number, flt):
return TagByteSize(field_number) + 4
def DoubleByteSize(field_number, double):
return TagByteSize(field_number) + 8
def BoolByteSize(field_number, b):
return TagByteSize(field_number) + 1
def EnumByteSize(field_number, enum):
return UInt32ByteSize(field_number, enum)
def StringByteSize(field_number, string):
return BytesByteSize(field_number, string.encode('utf-8'))
def BytesByteSize(field_number, b):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(len(b))
+ len(b))
def GroupByteSize(field_number, message):
return (2 * TagByteSize(field_number) # START and END group.
+ message.ByteSize())
def MessageByteSize(field_number, message):
return (TagByteSize(field_number)
+ _VarUInt64ByteSizeNoTag(message.ByteSize())
+ message.ByteSize())
def MessageSetItemByteSize(field_number, msg):
# First compute the sizes of the tags.
# There are 2 tags for the beginning and ending of the repeated group, that
# is field number 1, one with field number 2 (type_id) and one with field
# number 3 (message).
total_size = (2 * TagByteSize(1) + TagByteSize(2) + TagByteSize(3))
# Add the number of bytes for type_id.
total_size += _VarUInt64ByteSizeNoTag(field_number)
message_size = msg.ByteSize()
# The number of bytes for encoding the length of the message.
total_size += _VarUInt64ByteSizeNoTag(message_size)
# The size of the message.
total_size += message_size
return total_size
def TagByteSize(field_number):
"""Returns the bytes required to serialize a tag with this field number."""
# Just pass in type 0, since the type won't affect the tag+type size.
return _VarUInt64ByteSizeNoTag(PackTag(field_number, 0))
# Private helper function for the *ByteSize() functions above.
def _VarUInt64ByteSizeNoTag(uint64):
"""Returns the number of bytes required to serialize a single varint
using boundary value comparisons. (unrolled loop optimization -WPierce)
uint64 must be unsigned.
"""
if uint64 <= 0x7f: return 1
if uint64 <= 0x3fff: return 2
if uint64 <= 0x1fffff: return 3
if uint64 <= 0xfffffff: return 4
if uint64 <= 0x7ffffffff: return 5
if uint64 <= 0x3ffffffffff: return 6
if uint64 <= 0x1ffffffffffff: return 7
if uint64 <= 0xffffffffffffff: return 8
if uint64 <= 0x7fffffffffffffff: return 9
if uint64 > UINT64_MAX:
raise message.EncodeError('Value out of range: %d' % uint64)
return 10
NON_PACKABLE_TYPES = (
descriptor.FieldDescriptor.TYPE_STRING,
descriptor.FieldDescriptor.TYPE_GROUP,
descriptor.FieldDescriptor.TYPE_MESSAGE,
descriptor.FieldDescriptor.TYPE_BYTES
)
def IsTypePackable(field_type):
"""Return true iff packable = true is valid for fields of this type.
Args:
field_type: a FieldDescriptor::Type value.
Returns:
True iff fields of this type are packable.
"""
return field_type not in NON_PACKABLE_TYPES
|
ajoaoff/django
|
refs/heads/master
|
tests/template_tests/filter_tests/test_addslashes.py
|
473
|
from django.template.defaultfilters import addslashes
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class AddslashesTests(SimpleTestCase):
@setup({'addslashes01': '{% autoescape off %}{{ a|addslashes }} {{ b|addslashes }}{% endautoescape %}'})
def test_addslashes01(self):
output = self.engine.render_to_string('addslashes01', {"a": "<a>'", "b": mark_safe("<a>'")})
self.assertEqual(output, r"<a>\' <a>\'")
@setup({'addslashes02': '{{ a|addslashes }} {{ b|addslashes }}'})
def test_addslashes02(self):
output = self.engine.render_to_string('addslashes02', {"a": "<a>'", "b": mark_safe("<a>'")})
self.assertEqual(output, r"<a>\' <a>\'")
class FunctionTests(SimpleTestCase):
def test_quotes(self):
self.assertEqual(
addslashes('"double quotes" and \'single quotes\''),
'\\"double quotes\\" and \\\'single quotes\\\'',
)
def test_backslashes(self):
self.assertEqual(addslashes(r'\ : backslashes, too'), '\\\\ : backslashes, too')
def test_non_string_input(self):
self.assertEqual(addslashes(123), '123')
|
amy12xx/keras
|
refs/heads/master
|
tests/manual/check_yaml.py
|
74
|
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from keras.utils.test_utils import get_test_data
from keras.preprocessing import sequence
from keras.optimizers import SGD, RMSprop, Adagrad
from keras.utils import np_utils
from keras.models import Sequential, Graph
from keras.layers.core import Dense, Dropout, Activation, Merge
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM, GRU
from keras.datasets import imdb
from keras.models import model_from_yaml
'''
This is essentially the IMDB test. Deserialized models should yield
the same config as the original one.
'''
max_features = 10000
maxlen = 100
batch_size = 32
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features, test_split=0.2)
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
model = Sequential()
model.add(Embedding(max_features, 128))
model.add(LSTM(128, 128))
model.add(Dropout(0.5))
model.add(Dense(128, 1, W_regularizer='identity', b_constraint='maxnorm'))
model.add(Activation('sigmoid'))
model.get_config(verbose=1)
#####################################
# save model w/o parameters to yaml #
#####################################
yaml_no_params = model.to_yaml()
no_param_model = model_from_yaml(yaml_no_params)
no_param_model.get_config(verbose=1)
######################################
# save multi-branch sequential model #
######################################
seq = Sequential()
seq.add(Merge([model, model], mode='sum'))
seq.get_config(verbose=1)
merge_yaml = seq.to_yaml()
merge_model = model_from_yaml(merge_yaml)
large_model = Sequential()
large_model.add(Merge([seq,model], mode='concat'))
large_model.get_config(verbose=1)
large_model.to_yaml()
####################
# save graph model #
####################
X = np.random.random((100, 32))
X2 = np.random.random((100, 32))
y = np.random.random((100, 4))
y2 = np.random.random((100,))
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(32,),
classification=False, output_shape=(4,))
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
graph.compile('rmsprop', {'output1': 'mse'})
graph.get_config(verbose=1)
history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10)
original_pred = graph.predict({'input1': X_test})
graph_yaml = graph.to_yaml()
graph.save_weights('temp.h5', overwrite=True)
reloaded_graph = model_from_yaml(graph_yaml)
reloaded_graph.load_weights('temp.h5')
reloaded_graph.get_config(verbose=1)
reloaded_graph.compile('rmsprop', {'output1': 'mse'})
new_pred = reloaded_graph.predict({'input1': X_test})
assert(np.sum(new_pred['output1'] - original_pred['output1']) == 0)
|
openshift/openshift-tools
|
refs/heads/prod
|
openshift/installer/vendored/openshift-ansible-3.9.14-1/roles/openshift_health_checker/test/fluentd_test.py
|
55
|
import pytest
import json
from openshift_checks.logging.fluentd import Fluentd, OpenShiftCheckExceptionList, OpenShiftCheckException
def assert_error_in_list(expect_err, errorlist):
assert any(err.name == expect_err for err in errorlist), "{} in {}".format(str(expect_err), str(errorlist))
fluentd_pod_node1 = {
"metadata": {
"labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
"name": "logging-fluentd-1",
},
"spec": {"host": "node1", "nodeName": "node1"},
"status": {
"containerStatuses": [{"ready": True}],
"conditions": [{"status": "True", "type": "Ready"}],
}
}
fluentd_pod_node2_down = {
"metadata": {
"labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
"name": "logging-fluentd-2",
},
"spec": {"host": "node2", "nodeName": "node2"},
"status": {
"containerStatuses": [{"ready": False}],
"conditions": [{"status": "False", "type": "Ready"}],
}
}
fluentd_node1 = {
"metadata": {
"labels": {"logging-infra-fluentd": "true", "kubernetes.io/hostname": "node1"},
"name": "node1",
},
"status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.1"}]},
}
fluentd_node2 = {
"metadata": {
"labels": {"logging-infra-fluentd": "true", "kubernetes.io/hostname": "hostname"},
"name": "node2",
},
"status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.2"}]},
}
fluentd_node3_unlabeled = {
"metadata": {
"labels": {"kubernetes.io/hostname": "hostname"},
"name": "node3",
},
"status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.3"}]},
}
def test_get_fluentd_pods():
check = Fluentd()
check.exec_oc = lambda *_: json.dumps(dict(items=[fluentd_node1]))
check.get_pods_for_component = lambda *_: [fluentd_pod_node1]
assert not check.run()
@pytest.mark.parametrize('pods, nodes, expect_error', [
(
[],
[],
'NoNodesDefined',
),
(
[],
[fluentd_node3_unlabeled],
'NoNodesLabeled',
),
(
[],
[fluentd_node1, fluentd_node3_unlabeled],
'NodesUnlabeled',
),
(
[],
[fluentd_node2],
'MissingFluentdPod',
),
(
[fluentd_pod_node1, fluentd_pod_node1],
[fluentd_node1],
'TooManyFluentdPods',
),
(
[fluentd_pod_node2_down],
[fluentd_node2],
'FluentdNotRunning',
),
])
def test_get_fluentd_pods_errors(pods, nodes, expect_error):
check = Fluentd()
check.exec_oc = lambda *_: json.dumps(dict(items=nodes))
with pytest.raises(OpenShiftCheckException) as excinfo:
check.check_fluentd(pods)
if isinstance(excinfo.value, OpenShiftCheckExceptionList):
assert_error_in_list(expect_error, excinfo.value)
else:
assert expect_error == excinfo.value.name
def test_bad_oc_node_list():
check = Fluentd()
check.exec_oc = lambda *_: "this isn't even json"
with pytest.raises(OpenShiftCheckException) as excinfo:
check.get_nodes_by_name()
assert 'BadOcNodeList' == excinfo.value.name
|
hujiajie/pa-chromium
|
refs/heads/master
|
third_party/mesa/MesaLib/src/mapi/glapi/gen/glX_proto_send.py
|
32
|
#!/usr/bin/env python
# (C) Copyright IBM Corporation 2004, 2005
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
# Jeremy Kolb <jkolb@brandeis.edu>
import gl_XML, glX_XML, glX_proto_common, license
import sys, getopt, copy, string
def convertStringForXCB(str):
tmp = ""
special = [ "ARB" ]
i = 0
while i < len(str):
if str[i:i+3] in special:
tmp = '%s_%s' % (tmp, string.lower(str[i:i+3]))
i = i + 2;
elif str[i].isupper():
tmp = '%s_%s' % (tmp, string.lower(str[i]))
else:
tmp = '%s%s' % (tmp, str[i])
i += 1
return tmp
def hash_pixel_function(func):
"""Generate a 'unique' key for a pixel function. The key is based on
the parameters written in the command packet. This includes any
padding that might be added for the original function and the 'NULL
image' flag."""
h = ""
hash_pre = ""
hash_suf = ""
for param in func.parameterIterateGlxSend():
if param.is_image():
[dim, junk, junk, junk, junk] = param.get_dimensions()
d = (dim + 1) & ~1
hash_pre = "%uD%uD_" % (d - 1, d)
if param.img_null_flag:
hash_suf = "_NF"
h += "%u" % (param.size())
if func.pad_after(param):
h += "4"
n = func.name.replace("%uD" % (dim), "")
n = "__glx_%s_%uD%uD" % (n, d - 1, d)
h = hash_pre + h + hash_suf
return [h, n]
class glx_pixel_function_stub(glX_XML.glx_function):
"""Dummy class used to generate pixel "utility" functions that are
shared by multiple dimension image functions. For example, these
objects are used to generate shared functions used to send GLX
protocol for TexImage1D and TexImage2D, TexSubImage1D and
TexSubImage2D, etc."""
def __init__(self, func, name):
# The parameters to the utility function are the same as the
# parameters to the real function except for the added "pad"
# parameters.
self.name = name
self.images = []
self.parameters = []
self.parameters_by_name = {}
for _p in func.parameterIterator():
p = copy.copy(_p)
self.parameters.append(p)
self.parameters_by_name[ p.name ] = p
if p.is_image():
self.images.append(p)
p.height = "height"
if p.img_yoff == None:
p.img_yoff = "yoffset"
if p.depth:
if p.extent == None:
p.extent = "extent"
if p.img_woff == None:
p.img_woff = "woffset"
pad_name = func.pad_after(p)
if pad_name:
pad = copy.copy(p)
pad.name = pad_name
self.parameters.append(pad)
self.parameters_by_name[ pad.name ] = pad
self.return_type = func.return_type
self.glx_rop = ~0
self.glx_sop = 0
self.glx_vendorpriv = 0
self.glx_doubles_in_order = func.glx_doubles_in_order
self.vectorequiv = None
self.output = None
self.can_be_large = func.can_be_large
self.reply_always_array = func.reply_always_array
self.dimensions_in_reply = func.dimensions_in_reply
self.img_reset = None
self.server_handcode = 0
self.client_handcode = 0
self.ignore = 0
self.count_parameter_list = func.count_parameter_list
self.counter_list = func.counter_list
self.offsets_calculated = 0
return
class PrintGlxProtoStubs(glX_proto_common.glx_print_proto):
def __init__(self):
glX_proto_common.glx_print_proto.__init__(self)
self.name = "glX_proto_send.py (from Mesa)"
self.license = license.bsd_license_template % ( "(C) Copyright IBM Corporation 2004, 2005", "IBM")
self.last_category = ""
self.generic_sizes = [3, 4, 6, 8, 12, 16, 24, 32]
self.pixel_stubs = {}
self.debug = 0
return
def printRealHeader(self):
print ''
print '#include <GL/gl.h>'
print '#include "indirect.h"'
print '#include "glxclient.h"'
print '#include "indirect_size.h"'
print '#include "glapidispatch.h"'
print '#include "glapi.h"'
print '#include "glthread.h"'
print '#include <GL/glxproto.h>'
print '#ifdef USE_XCB'
print '#include <X11/Xlib-xcb.h>'
print '#include <xcb/xcb.h>'
print '#include <xcb/glx.h>'
print '#endif /* USE_XCB */'
print ''
print '#define __GLX_PAD(n) (((n) + 3) & ~3)'
print ''
self.printFastcall()
self.printNoinline()
print ''
print '#ifndef __GNUC__'
print '# define __builtin_expect(x, y) x'
print '#endif'
print ''
print '/* If the size and opcode values are known at compile-time, this will, on'
print ' * x86 at least, emit them with a single instruction.'
print ' */'
print '#define emit_header(dest, op, size) \\'
print ' do { union { short s[2]; int i; } temp; \\'
print ' temp.s[0] = (size); temp.s[1] = (op); \\'
print ' *((int *)(dest)) = temp.i; } while(0)'
print ''
print """NOINLINE CARD32
__glXReadReply( Display *dpy, size_t size, void * dest, GLboolean reply_is_always_array )
{
xGLXSingleReply reply;
(void) _XReply(dpy, (xReply *) & reply, 0, False);
if (size != 0) {
if ((reply.length > 0) || reply_is_always_array) {
const GLint bytes = (reply_is_always_array)
? (4 * reply.length) : (reply.size * size);
const GLint extra = 4 - (bytes & 3);
_XRead(dpy, dest, bytes);
if ( extra < 4 ) {
_XEatData(dpy, extra);
}
}
else {
(void) memcpy( dest, &(reply.pad3), size);
}
}
return reply.retval;
}
NOINLINE void
__glXReadPixelReply( Display *dpy, struct glx_context * gc, unsigned max_dim,
GLint width, GLint height, GLint depth, GLenum format, GLenum type,
void * dest, GLboolean dimensions_in_reply )
{
xGLXSingleReply reply;
GLint size;
(void) _XReply(dpy, (xReply *) & reply, 0, False);
if ( dimensions_in_reply ) {
width = reply.pad3;
height = reply.pad4;
depth = reply.pad5;
if ((height == 0) || (max_dim < 2)) { height = 1; }
if ((depth == 0) || (max_dim < 3)) { depth = 1; }
}
size = reply.length * 4;
if (size != 0) {
void * buf = Xmalloc( size );
if ( buf == NULL ) {
_XEatData(dpy, size);
__glXSetError(gc, GL_OUT_OF_MEMORY);
}
else {
const GLint extra = 4 - (size & 3);
_XRead(dpy, buf, size);
if ( extra < 4 ) {
_XEatData(dpy, extra);
}
__glEmptyImage(gc, 3, width, height, depth, format, type,
buf, dest);
Xfree(buf);
}
}
}
#define X_GLXSingle 0
NOINLINE FASTCALL GLubyte *
__glXSetupSingleRequest( struct glx_context * gc, GLint sop, GLint cmdlen )
{
xGLXSingleReq * req;
Display * const dpy = gc->currentDpy;
(void) __glXFlushRenderBuffer(gc, gc->pc);
LockDisplay(dpy);
GetReqExtra(GLXSingle, cmdlen, req);
req->reqType = gc->majorOpcode;
req->contextTag = gc->currentContextTag;
req->glxCode = sop;
return (GLubyte *)(req) + sz_xGLXSingleReq;
}
NOINLINE FASTCALL GLubyte *
__glXSetupVendorRequest( struct glx_context * gc, GLint code, GLint vop, GLint cmdlen )
{
xGLXVendorPrivateReq * req;
Display * const dpy = gc->currentDpy;
(void) __glXFlushRenderBuffer(gc, gc->pc);
LockDisplay(dpy);
GetReqExtra(GLXVendorPrivate, cmdlen, req);
req->reqType = gc->majorOpcode;
req->glxCode = code;
req->vendorCode = vop;
req->contextTag = gc->currentContextTag;
return (GLubyte *)(req) + sz_xGLXVendorPrivateReq;
}
const GLuint __glXDefaultPixelStore[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 1 };
#define zero (__glXDefaultPixelStore+0)
#define one (__glXDefaultPixelStore+8)
#define default_pixel_store_1D (__glXDefaultPixelStore+4)
#define default_pixel_store_1D_size 20
#define default_pixel_store_2D (__glXDefaultPixelStore+4)
#define default_pixel_store_2D_size 20
#define default_pixel_store_3D (__glXDefaultPixelStore+0)
#define default_pixel_store_3D_size 36
#define default_pixel_store_4D (__glXDefaultPixelStore+0)
#define default_pixel_store_4D_size 36
"""
for size in self.generic_sizes:
self.print_generic_function(size)
return
def printBody(self, api):
self.pixel_stubs = {}
generated_stubs = []
for func in api.functionIterateGlx():
if func.client_handcode: continue
# If the function is a pixel function with a certain
# GLX protocol signature, create a fake stub function
# for it. For example, create a single stub function
# that is used to implement both glTexImage1D and
# glTexImage2D.
if func.glx_rop != 0:
do_it = 0
for image in func.get_images():
if image.img_pad_dimensions:
do_it = 1
break
if do_it:
[h, n] = hash_pixel_function(func)
self.pixel_stubs[ func.name ] = n
if h not in generated_stubs:
generated_stubs.append(h)
fake_func = glx_pixel_function_stub( func, n )
self.printFunction(fake_func, fake_func.name)
self.printFunction(func, func.name)
if func.glx_sop and func.glx_vendorpriv:
self.printFunction(func, func.glx_vendorpriv_names[0])
return
def printFunction(self, func, name):
footer = '}\n'
if func.glx_rop == ~0:
print 'static %s' % (func.return_type)
print '%s( unsigned opcode, unsigned dim, %s )' % (func.name, func.get_parameter_string())
print '{'
else:
if func.has_different_protocol(name):
if func.return_type == "void":
ret_string = ''
else:
ret_string = "return "
func_name = func.static_glx_name(name)
print '#define %s %d' % (func.opcode_vendor_name(name), func.glx_vendorpriv)
print '%s gl%s(%s)' % (func.return_type, func_name, func.get_parameter_string())
print '{'
print ' struct glx_context * const gc = __glXGetCurrentContext();'
print ''
print '#if defined(GLX_DIRECT_RENDERING) && !defined(GLX_USE_APPLEGL)'
print ' if (gc->isDirect) {'
print ' %sCALL_%s(GET_DISPATCH(), (%s));' % (ret_string, func.name, func.get_called_parameter_string())
print ' } else'
print '#endif'
print ' {'
footer = '}\n}\n'
else:
print '#define %s %d' % (func.opcode_name(), func.opcode_value())
print '%s __indirect_gl%s(%s)' % (func.return_type, name, func.get_parameter_string())
print '{'
if func.glx_rop != 0 or func.vectorequiv != None:
if len(func.images):
self.printPixelFunction(func)
else:
self.printRenderFunction(func)
elif func.glx_sop != 0 or func.glx_vendorpriv != 0:
self.printSingleFunction(func, name)
pass
else:
print "/* Missing GLX protocol for %s. */" % (name)
print footer
return
def print_generic_function(self, n):
size = (n + 3) & ~3
print """static FASTCALL NOINLINE void
generic_%u_byte( GLint rop, const void * ptr )
{
struct glx_context * const gc = __glXGetCurrentContext();
const GLuint cmdlen = %u;
emit_header(gc->pc, rop, cmdlen);
(void) memcpy((void *)(gc->pc + 4), ptr, %u);
gc->pc += cmdlen;
if (__builtin_expect(gc->pc > gc->limit, 0)) { (void) __glXFlushRenderBuffer(gc, gc->pc); }
}
""" % (n, size + 4, size)
return
def common_emit_one_arg(self, p, pc, adjust, extra_offset):
if p.is_array():
src_ptr = p.name
else:
src_ptr = "&" + p.name
if p.is_padding:
print '(void) memset((void *)(%s + %u), 0, %s);' \
% (pc, p.offset + adjust, p.size_string() )
elif not extra_offset:
print '(void) memcpy((void *)(%s + %u), (void *)(%s), %s);' \
% (pc, p.offset + adjust, src_ptr, p.size_string() )
else:
print '(void) memcpy((void *)(%s + %u + %s), (void *)(%s), %s);' \
% (pc, p.offset + adjust, extra_offset, src_ptr, p.size_string() )
def common_emit_args(self, f, pc, adjust, skip_vla):
extra_offset = None
for p in f.parameterIterateGlxSend( not skip_vla ):
if p.name != f.img_reset:
self.common_emit_one_arg(p, pc, adjust, extra_offset)
if p.is_variable_length():
temp = p.size_string()
if extra_offset:
extra_offset += " + %s" % (temp)
else:
extra_offset = temp
return
def pixel_emit_args(self, f, pc, large):
"""Emit the arguments for a pixel function. This differs from
common_emit_args in that pixel functions may require padding
be inserted (i.e., for the missing width field for
TexImage1D), and they may also require a 'NULL image' flag
be inserted before the image data."""
if large:
adjust = 8
else:
adjust = 4
for param in f.parameterIterateGlxSend():
if not param.is_image():
self.common_emit_one_arg(param, pc, adjust, None)
if f.pad_after(param):
print '(void) memcpy((void *)(%s + %u), zero, 4);' % (pc, (param.offset + param.size()) + adjust)
else:
[dim, width, height, depth, extent] = param.get_dimensions()
if f.glx_rop == ~0:
dim_str = "dim"
else:
dim_str = str(dim)
if param.is_padding:
print '(void) memset((void *)(%s + %u), 0, %s);' \
% (pc, (param.offset - 4) + adjust, param.size_string() )
if param.img_null_flag:
if large:
print '(void) memcpy((void *)(%s + %u), zero, 4);' % (pc, (param.offset - 4) + adjust)
else:
print '(void) memcpy((void *)(%s + %u), (void *)((%s == NULL) ? one : zero), 4);' % (pc, (param.offset - 4) + adjust, param.name)
pixHeaderPtr = "%s + %u" % (pc, adjust)
pcPtr = "%s + %u" % (pc, param.offset + adjust)
if not large:
if param.img_send_null:
condition = '(compsize > 0) && (%s != NULL)' % (param.name)
else:
condition = 'compsize > 0'
print 'if (%s) {' % (condition)
print ' (*gc->fillImage)(gc, %s, %s, %s, %s, %s, %s, %s, %s, %s);' % (dim_str, width, height, depth, param.img_format, param.img_type, param.name, pcPtr, pixHeaderPtr)
print '} else {'
print ' (void) memcpy( %s, default_pixel_store_%uD, default_pixel_store_%uD_size );' % (pixHeaderPtr, dim, dim)
print '}'
else:
print '__glXSendLargeImage(gc, compsize, %s, %s, %s, %s, %s, %s, %s, %s, %s);' % (dim_str, width, height, depth, param.img_format, param.img_type, param.name, pcPtr, pixHeaderPtr)
return
def large_emit_begin(self, f, op_name = None):
if not op_name:
op_name = f.opcode_real_name()
print 'const GLint op = %s;' % (op_name)
print 'const GLuint cmdlenLarge = cmdlen + 4;'
print 'GLubyte * const pc = __glXFlushRenderBuffer(gc, gc->pc);'
print '(void) memcpy((void *)(pc + 0), (void *)(&cmdlenLarge), 4);'
print '(void) memcpy((void *)(pc + 4), (void *)(&op), 4);'
return
def common_func_print_just_start(self, f, name):
print ' struct glx_context * const gc = __glXGetCurrentContext();'
# The only reason that single and vendor private commands need
# a variable called 'dpy' is becuase they use the SyncHandle
# macro. For whatever brain-dead reason, that macro is hard-
# coded to use a variable called 'dpy' instead of taking a
# parameter.
# FIXME Simplify the logic related to skip_condition and
# FIXME condition_list in this function. Basically, remove
# FIXME skip_condition, and just append the "dpy != NULL" type
# FIXME condition to condition_list from the start. The only
# FIXME reason it's done in this confusing way now is to
# FIXME minimize the diffs in the generated code.
if not f.glx_rop:
for p in f.parameterIterateOutputs():
if p.is_image() and (p.img_format != "GL_COLOR_INDEX" or p.img_type != "GL_BITMAP"):
print ' const __GLXattribute * const state = gc->client_state_private;'
break
print ' Display * const dpy = gc->currentDpy;'
skip_condition = "dpy != NULL"
elif f.can_be_large:
skip_condition = "gc->currentDpy != NULL"
else:
skip_condition = None
if f.return_type != 'void':
print ' %s retval = (%s) 0;' % (f.return_type, f.return_type)
if name != None and name not in f.glx_vendorpriv_names:
print '#ifndef USE_XCB'
self.emit_packet_size_calculation(f, 0)
if name != None and name not in f.glx_vendorpriv_names:
print '#endif'
condition_list = []
for p in f.parameterIterateCounters():
condition_list.append( "%s >= 0" % (p.name) )
# 'counter' parameters cannot be negative
print " if (%s < 0) {" % p.name
print " __glXSetError(gc, GL_INVALID_VALUE);"
if f.return_type != 'void':
print " return 0;"
else:
print " return;"
print " }"
if skip_condition:
condition_list.append( skip_condition )
if len( condition_list ) > 0:
if len( condition_list ) > 1:
skip_condition = "(%s)" % (string.join( condition_list, ") && (" ))
else:
skip_condition = "%s" % (condition_list.pop(0))
print ' if (__builtin_expect(%s, 1)) {' % (skip_condition)
return 1
else:
return 0
def printSingleFunction(self, f, name):
self.common_func_print_just_start(f, name)
if self.debug:
print ' printf( "Enter %%s...\\n", "gl%s" );' % (f.name)
if name not in f.glx_vendorpriv_names:
# XCB specific:
print '#ifdef USE_XCB'
if self.debug:
print ' printf("\\tUsing XCB.\\n");'
print ' xcb_connection_t *c = XGetXCBConnection(dpy);'
print ' (void) __glXFlushRenderBuffer(gc, gc->pc);'
xcb_name = 'xcb_glx%s' % convertStringForXCB(name)
iparams=[]
extra_iparams = []
output = None
for p in f.parameterIterator():
if p.is_output:
output = p
if p.is_image():
if p.img_format != "GL_COLOR_INDEX" or p.img_type != "GL_BITMAP":
extra_iparams.append("state->storePack.swapEndian")
else:
extra_iparams.append("0")
# Hardcode this in. lsb_first param (apparently always GL_FALSE)
# also present in GetPolygonStipple, but taken care of above.
if xcb_name == "xcb_glx_read_pixels":
extra_iparams.append("0")
else:
iparams.append(p.name)
xcb_request = '%s(%s)' % (xcb_name, ", ".join(["c", "gc->currentContextTag"] + iparams + extra_iparams))
if f.needs_reply():
print ' %s_reply_t *reply = %s_reply(c, %s, NULL);' % (xcb_name, xcb_name, xcb_request)
if output and f.reply_always_array:
print ' (void)memcpy(%s, %s_data(reply), %s_data_length(reply) * sizeof(%s));' % (output.name, xcb_name, xcb_name, output.get_base_type_string())
elif output and not f.reply_always_array:
if not output.is_image():
print ' if (%s_data_length(reply) == 0)' % (xcb_name)
print ' (void)memcpy(%s, &reply->datum, sizeof(reply->datum));' % (output.name)
print ' else'
print ' (void)memcpy(%s, %s_data(reply), %s_data_length(reply) * sizeof(%s));' % (output.name, xcb_name, xcb_name, output.get_base_type_string())
if f.return_type != 'void':
print ' retval = reply->ret_val;'
print ' free(reply);'
else:
print ' ' + xcb_request + ';'
print '#else'
# End of XCB specific.
if f.parameters != []:
pc_decl = "GLubyte const * pc ="
else:
pc_decl = "(void)"
if name in f.glx_vendorpriv_names:
print ' %s __glXSetupVendorRequest(gc, %s, %s, cmdlen);' % (pc_decl, f.opcode_real_name(), f.opcode_vendor_name(name))
else:
print ' %s __glXSetupSingleRequest(gc, %s, cmdlen);' % (pc_decl, f.opcode_name())
self.common_emit_args(f, "pc", 0, 0)
images = f.get_images()
for img in images:
if img.is_output:
o = f.command_fixed_length() - 4
print ' *(int32_t *)(pc + %u) = 0;' % (o)
if img.img_format != "GL_COLOR_INDEX" or img.img_type != "GL_BITMAP":
print ' * (int8_t *)(pc + %u) = state->storePack.swapEndian;' % (o)
if f.img_reset:
print ' * (int8_t *)(pc + %u) = %s;' % (o + 1, f.img_reset)
return_name = ''
if f.needs_reply():
if f.return_type != 'void':
return_name = " retval"
return_str = " retval = (%s)" % (f.return_type)
else:
return_str = " (void)"
got_reply = 0
for p in f.parameterIterateOutputs():
if p.is_image():
[dim, w, h, d, junk] = p.get_dimensions()
if f.dimensions_in_reply:
print " __glXReadPixelReply(dpy, gc, %u, 0, 0, 0, %s, %s, %s, GL_TRUE);" % (dim, p.img_format, p.img_type, p.name)
else:
print " __glXReadPixelReply(dpy, gc, %u, %s, %s, %s, %s, %s, %s, GL_FALSE);" % (dim, w, h, d, p.img_format, p.img_type, p.name)
got_reply = 1
else:
if f.reply_always_array:
aa = "GL_TRUE"
else:
aa = "GL_FALSE"
# gl_parameter.size() returns the size
# of the entire data item. If the
# item is a fixed-size array, this is
# the size of the whole array. This
# is not what __glXReadReply wants. It
# wants the size of a single data
# element in the reply packet.
# Dividing by the array size (1 for
# non-arrays) gives us this.
s = p.size() / p.get_element_count()
print " %s __glXReadReply(dpy, %s, %s, %s);" % (return_str, s, p.name, aa)
got_reply = 1
# If a reply wasn't read to fill an output parameter,
# read a NULL reply to get the return value.
if not got_reply:
print " %s __glXReadReply(dpy, 0, NULL, GL_FALSE);" % (return_str)
elif self.debug:
# Only emit the extra glFinish call for functions
# that don't already require a reply from the server.
print ' __indirect_glFinish();'
if self.debug:
print ' printf( "Exit %%s.\\n", "gl%s" );' % (name)
print ' UnlockDisplay(dpy); SyncHandle();'
if name not in f.glx_vendorpriv_names:
print '#endif /* USE_XCB */'
print ' }'
print ' return%s;' % (return_name)
return
def printPixelFunction(self, f):
if self.pixel_stubs.has_key( f.name ):
# Normally gl_function::get_parameter_string could be
# used. However, this call needs to have the missing
# dimensions (e.g., a fake height value for
# glTexImage1D) added in.
p_string = ""
for param in f.parameterIterateGlxSend():
if param.is_padding:
continue
p_string += ", " + param.name
if param.is_image():
[dim, junk, junk, junk, junk] = param.get_dimensions()
if f.pad_after(param):
p_string += ", 1"
print ' %s(%s, %u%s );' % (self.pixel_stubs[f.name] , f.opcode_name(), dim, p_string)
return
if self.common_func_print_just_start(f, None):
trailer = " }"
else:
trailer = None
if f.can_be_large:
print 'if (cmdlen <= gc->maxSmallRenderCommandSize) {'
print ' if ( (gc->pc + cmdlen) > gc->bufEnd ) {'
print ' (void) __glXFlushRenderBuffer(gc, gc->pc);'
print ' }'
if f.glx_rop == ~0:
opcode = "opcode"
else:
opcode = f.opcode_real_name()
print 'emit_header(gc->pc, %s, cmdlen);' % (opcode)
self.pixel_emit_args( f, "gc->pc", 0 )
print 'gc->pc += cmdlen;'
print 'if (gc->pc > gc->limit) { (void) __glXFlushRenderBuffer(gc, gc->pc); }'
if f.can_be_large:
print '}'
print 'else {'
self.large_emit_begin(f, opcode)
self.pixel_emit_args(f, "pc", 1)
print '}'
if trailer: print trailer
return
def printRenderFunction(self, f):
# There is a class of GL functions that take a single pointer
# as a parameter. This pointer points to a fixed-size chunk
# of data, and the protocol for this functions is very
# regular. Since they are so regular and there are so many
# of them, special case them with generic functions. On
# x86, this saves about 26KB in the libGL.so binary.
if f.variable_length_parameter() == None and len(f.parameters) == 1:
p = f.parameters[0]
if p.is_pointer():
cmdlen = f.command_fixed_length()
if cmdlen in self.generic_sizes:
print ' generic_%u_byte( %s, %s );' % (cmdlen, f.opcode_real_name(), p.name)
return
if self.common_func_print_just_start(f, None):
trailer = " }"
else:
trailer = None
if self.debug:
print 'printf( "Enter %%s...\\n", "gl%s" );' % (f.name)
if f.can_be_large:
print 'if (cmdlen <= gc->maxSmallRenderCommandSize) {'
print ' if ( (gc->pc + cmdlen) > gc->bufEnd ) {'
print ' (void) __glXFlushRenderBuffer(gc, gc->pc);'
print ' }'
print 'emit_header(gc->pc, %s, cmdlen);' % (f.opcode_real_name())
self.common_emit_args(f, "gc->pc", 4, 0)
print 'gc->pc += cmdlen;'
print 'if (__builtin_expect(gc->pc > gc->limit, 0)) { (void) __glXFlushRenderBuffer(gc, gc->pc); }'
if f.can_be_large:
print '}'
print 'else {'
self.large_emit_begin(f)
self.common_emit_args(f, "pc", 8, 1)
p = f.variable_length_parameter()
print ' __glXSendLargeCommand(gc, pc, %u, %s, %s);' % (p.offset + 8, p.name, p.size_string())
print '}'
if self.debug:
print '__indirect_glFinish();'
print 'printf( "Exit %%s.\\n", "gl%s" );' % (f.name)
if trailer: print trailer
return
class PrintGlxProtoInit_c(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "glX_proto_send.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
(C) Copyright IBM Corporation 2004""", "PRECISION INSIGHT, IBM")
return
def printRealHeader(self):
print """/**
* \\file indirect_init.c
* Initialize indirect rendering dispatch table.
*
* \\author Kevin E. Martin <kevin@precisioninsight.com>
* \\author Brian Paul <brian@precisioninsight.com>
* \\author Ian Romanick <idr@us.ibm.com>
*/
#include "indirect_init.h"
#include "indirect.h"
#include "glapi.h"
/**
* No-op function used to initialize functions that have no GLX protocol
* support.
*/
static int NoOp(void)
{
return 0;
}
/**
* Create and initialize a new GL dispatch table. The table is initialized
* with GLX indirect rendering protocol functions.
*/
struct _glapi_table * __glXNewIndirectAPI( void )
{
struct _glapi_table *glAPI;
GLuint entries;
entries = _glapi_get_dispatch_table_size();
glAPI = (struct _glapi_table *) Xmalloc(entries * sizeof(void *));
/* first, set all entries to point to no-op functions */
{
int i;
void **dispatch = (void **) glAPI;
for (i = 0; i < entries; i++) {
dispatch[i] = (void *) NoOp;
}
}
/* now, initialize the entries we understand */"""
def printRealFooter(self):
print """
return glAPI;
}
"""
return
def printBody(self, api):
for [name, number] in api.categoryIterate():
if number != None:
preamble = '\n /* %3u. %s */\n\n' % (int(number), name)
else:
preamble = '\n /* %s */\n\n' % (name)
for func in api.functionIterateByCategory(name):
if func.client_supported_for_indirect():
print '%s glAPI->%s = __indirect_gl%s;' % (preamble, func.name, func.name)
preamble = ''
return
class PrintGlxProtoInit_h(gl_XML.gl_print_base):
def __init__(self):
gl_XML.gl_print_base.__init__(self)
self.name = "glX_proto_send.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright 1998-1999 Precision Insight, Inc., Cedar Park, Texas.
(C) Copyright IBM Corporation 2004""", "PRECISION INSIGHT, IBM")
self.header_tag = "_INDIRECT_H_"
self.last_category = ""
return
def printRealHeader(self):
print """/**
* \\file
* Prototypes for indirect rendering functions.
*
* \\author Kevin E. Martin <kevin@precisioninsight.com>
* \\author Ian Romanick <idr@us.ibm.com>
*/
"""
self.printVisibility( "HIDDEN", "hidden" )
self.printFastcall()
self.printNoinline()
print """
#include "glxclient.h"
extern HIDDEN NOINLINE CARD32 __glXReadReply( Display *dpy, size_t size,
void * dest, GLboolean reply_is_always_array );
extern HIDDEN NOINLINE void __glXReadPixelReply( Display *dpy,
struct glx_context * gc, unsigned max_dim, GLint width, GLint height,
GLint depth, GLenum format, GLenum type, void * dest,
GLboolean dimensions_in_reply );
extern HIDDEN NOINLINE FASTCALL GLubyte * __glXSetupSingleRequest(
struct glx_context * gc, GLint sop, GLint cmdlen );
extern HIDDEN NOINLINE FASTCALL GLubyte * __glXSetupVendorRequest(
struct glx_context * gc, GLint code, GLint vop, GLint cmdlen );
"""
def printBody(self, api):
for func in api.functionIterateGlx():
params = func.get_parameter_string()
print 'extern HIDDEN %s __indirect_gl%s(%s);' % (func.return_type, func.name, params)
for n in func.entry_points:
if func.has_different_protocol(n):
asdf = func.static_glx_name(n)
if asdf not in func.static_entry_points:
print 'extern HIDDEN %s gl%s(%s);' % (func.return_type, asdf, params)
else:
print 'GLAPI %s GLAPIENTRY gl%s(%s);' % (func.return_type, asdf, params)
break
def show_usage():
print "Usage: %s [-f input_file_name] [-m output_mode] [-d]" % sys.argv[0]
print " -m output_mode Output mode can be one of 'proto', 'init_c' or 'init_h'."
print " -d Enable extra debug information in the generated code."
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "f:m:d")
except Exception,e:
show_usage()
debug = 0
mode = "proto"
for (arg,val) in args:
if arg == "-f":
file_name = val
elif arg == "-m":
mode = val
elif arg == "-d":
debug = 1
if mode == "proto":
printer = PrintGlxProtoStubs()
elif mode == "init_c":
printer = PrintGlxProtoInit_c()
elif mode == "init_h":
printer = PrintGlxProtoInit_h()
else:
show_usage()
printer.debug = debug
api = gl_XML.parse_GL_API( file_name, glX_XML.glx_item_factory() )
printer.Print( api )
|
alexlee-gk/visual_dynamics
|
refs/heads/master
|
visual_dynamics/policies/__init__.py
|
1
|
from .base import Policy
from .target_policy import TargetPolicy
from .random_policy import RandomPolicy
from .additive_normal_policy import AdditiveNormalPolicy
from .momentum_policy import MomentumPolicy
from .mixed_policy import MixedPolicy
from .choice_policy import ChoicePolicy
from .constant_policy import ConstantPolicy
from .camera_target_policy import CameraTargetPolicy
from .random_offset_camera_target_policy import RandomOffsetCameraTargetPolicy
from .quad_target_policy import QuadTargetPolicy
try:
from .pr2_target_policy import Pr2TargetPolicy
from .pr2_moving_arm_target_policy import Pr2MovingArmTargetPolicy
except ImportError:
pass
from .servoing_policy import ServoingPolicy, TheanoServoingPolicy
from .interactive_translation_angle_axis_policy import InteractiveTranslationAngleAxisPolicy
from .position_based_servoing_policy import PositionBasedServoingPolicy
|
anilgulecha/trelby
|
refs/heads/master
|
src/headersdlg.py
|
6
|
import gutil
import headers
import misc
import pdf
import pml
import util
import wx
class HeadersDlg(wx.Dialog):
def __init__(self, parent, headers, cfg, cfgGl, applyFunc):
wx.Dialog.__init__(self, parent, -1, "Headers",
style = wx.DEFAULT_DIALOG_STYLE)
self.headers = headers
self.cfg = cfg
self.cfgGl = cfgGl
self.applyFunc = applyFunc
# whether some events are blocked
self.block = False
self.hdrIndex = -1
if len(self.headers.hdrs) > 0:
self.hdrIndex = 0
vsizer = wx.BoxSizer(wx.VERTICAL)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(wx.StaticText(self, -1, "Empty lines after headers:"), 0,
wx.ALIGN_CENTER_VERTICAL)
self.elinesEntry = wx.SpinCtrl(self, -1)
self.elinesEntry.SetRange(0, 5)
wx.EVT_SPINCTRL(self, self.elinesEntry.GetId(), self.OnMisc)
wx.EVT_KILL_FOCUS(self.elinesEntry, self.OnKillFocus)
hsizer.Add(self.elinesEntry, 0, wx.LEFT, 10)
vsizer.Add(hsizer)
vsizer.Add(wx.StaticLine(self, -1), 0, wx.EXPAND | wx.TOP | wx.BOTTOM,
10)
tmp = wx.StaticText(self, -1, "Strings:")
vsizer.Add(tmp)
self.stringsLb = wx.ListBox(self, -1, size = (200, 100))
vsizer.Add(self.stringsLb, 0, wx.EXPAND)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
self.addBtn = gutil.createStockButton(self, "Add")
hsizer.Add(self.addBtn)
wx.EVT_BUTTON(self, self.addBtn.GetId(), self.OnAddString)
gutil.btnDblClick(self.addBtn, self.OnAddString)
self.delBtn = gutil.createStockButton(self, "Delete")
hsizer.Add(self.delBtn, 0, wx.LEFT, 10)
wx.EVT_BUTTON(self, self.delBtn.GetId(), self.OnDeleteString)
gutil.btnDblClick(self.delBtn, self.OnDeleteString)
vsizer.Add(hsizer, 0, wx.TOP, 5)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add(wx.StaticText(self, -1, "Text:"), 0,
wx.ALIGN_CENTER_VERTICAL)
self.textEntry = wx.TextCtrl(self, -1)
hsizer.Add(self.textEntry, 1, wx.LEFT, 10)
wx.EVT_TEXT(self, self.textEntry.GetId(), self.OnMisc)
vsizer.Add(hsizer, 0, wx.EXPAND | wx.TOP, 20)
vsizer.Add(wx.StaticText(self, -1,
"'${PAGE}' will be replaced by the page number."), 0,
wx.ALIGN_CENTER | wx.TOP, 5)
hsizerTop = wx.BoxSizer(wx.HORIZONTAL)
gsizer = wx.FlexGridSizer(3, 2, 5, 0)
gsizer.Add(wx.StaticText(self, -1, "Header line:"), 0,
wx.ALIGN_CENTER_VERTICAL)
self.lineEntry = wx.SpinCtrl(self, -1)
self.lineEntry.SetRange(1, 5)
wx.EVT_SPINCTRL(self, self.lineEntry.GetId(), self.OnMisc)
wx.EVT_KILL_FOCUS(self.lineEntry, self.OnKillFocus)
gsizer.Add(self.lineEntry)
gsizer.Add(wx.StaticText(self, -1, "X offset (characters):"), 0,
wx.ALIGN_CENTER_VERTICAL | wx.RIGHT, 10)
self.xoffEntry = wx.SpinCtrl(self, -1)
self.xoffEntry.SetRange(-100, 100)
wx.EVT_SPINCTRL(self, self.xoffEntry.GetId(), self.OnMisc)
wx.EVT_KILL_FOCUS(self.xoffEntry, self.OnKillFocus)
gsizer.Add(self.xoffEntry)
gsizer.Add(wx.StaticText(self, -1, "Alignment:"), 0,
wx.ALIGN_CENTER_VERTICAL)
self.alignCombo = wx.ComboBox(self, -1, style = wx.CB_READONLY)
for it in [ ("Left", util.ALIGN_LEFT), ("Center", util.ALIGN_CENTER),
("Right", util.ALIGN_RIGHT) ]:
self.alignCombo.Append(it[0], it[1])
gsizer.Add(self.alignCombo)
wx.EVT_COMBOBOX(self, self.alignCombo.GetId(), self.OnMisc)
hsizerTop.Add(gsizer)
bsizer = wx.StaticBoxSizer(
wx.StaticBox(self, -1, "Style"), wx.HORIZONTAL)
vsizer2 = wx.BoxSizer(wx.VERTICAL)
# wxGTK adds way more space by default than wxMSW between the
# items, have to adjust for that
pad = 0
if misc.isWindows:
pad = 5
self.addCheckBox("Bold", self, vsizer2, pad)
self.addCheckBox("Italic", self, vsizer2, pad)
self.addCheckBox("Underlined", self, vsizer2, pad)
bsizer.Add(vsizer2)
hsizerTop.Add(bsizer, 0, wx.LEFT, 40)
vsizer.Add(hsizerTop, 0, wx.TOP, 20)
hsizer = wx.BoxSizer(wx.HORIZONTAL)
hsizer.Add((1, 1), 1)
previewBtn = gutil.createStockButton(self, "Preview")
hsizer.Add(previewBtn)
applyBtn = gutil.createStockButton(self, "Apply")
hsizer.Add(applyBtn, 0, wx.LEFT, 10)
cancelBtn = gutil.createStockButton(self, "Cancel")
hsizer.Add(cancelBtn, 0, wx.LEFT, 10)
okBtn = gutil.createStockButton(self, "OK")
hsizer.Add(okBtn, 0, wx.LEFT, 10)
vsizer.Add(hsizer, 0, wx.EXPAND | wx.TOP, 20)
util.finishWindow(self, vsizer)
wx.EVT_BUTTON(self, previewBtn.GetId(), self.OnPreview)
wx.EVT_BUTTON(self, applyBtn.GetId(), self.OnApply)
wx.EVT_BUTTON(self, cancelBtn.GetId(), self.OnCancel)
wx.EVT_BUTTON(self, okBtn.GetId(), self.OnOK)
wx.EVT_LISTBOX(self, self.stringsLb.GetId(), self.OnStringsLb)
# list of widgets that are specific to editing the selected string
self.widList = [ self.textEntry, self.xoffEntry, self.alignCombo,
self.lineEntry, self.boldCb, self.italicCb,
self.underlinedCb ]
self.updateGui()
self.textEntry.SetFocus()
def addCheckBox(self, name, parent, sizer, pad):
cb = wx.CheckBox(parent, -1, name)
wx.EVT_CHECKBOX(self, cb.GetId(), self.OnMisc)
sizer.Add(cb, 0, wx.TOP, pad)
setattr(self, name.lower() + "Cb", cb)
def OnOK(self, event):
self.EndModal(wx.ID_OK)
def OnCancel(self, event):
self.EndModal(wx.ID_CANCEL)
def OnApply(self, event):
self.applyFunc(self.headers)
def OnPreview(self, event):
doc = pml.Document(self.cfg.paperWidth, self.cfg.paperHeight)
pg = pml.Page(doc)
self.headers.generatePML(pg, "42", self.cfg)
fs = self.cfg.fontSize
chY = util.getTextHeight(fs)
y = self.cfg.marginTop + self.headers.getNrOfLines() * chY
pg.add(pml.TextOp("Mindy runs away from the dinosaur, but trips on"
" the power", self.cfg.marginLeft, y, fs))
pg.add(pml.TextOp("cord. The raptor approaches her slowly.",
self.cfg.marginLeft, y + chY, fs))
doc.add(pg)
tmp = pdf.generate(doc)
gutil.showTempPDF(tmp, self.cfgGl, self)
def OnKillFocus(self, event):
self.OnMisc()
# if we don't call this, the spin entry on wxGTK gets stuck in
# some weird state
event.Skip()
def OnStringsLb(self, event = None):
self.hdrIndex = self.stringsLb.GetSelection()
self.updateHeaderGui()
def OnAddString(self, event):
h = headers.HeaderString()
h.text = "new string"
self.headers.hdrs.append(h)
self.hdrIndex = len(self.headers.hdrs) - 1
self.updateGui()
def OnDeleteString(self, event):
if self.hdrIndex == -1:
return
del self.headers.hdrs[self.hdrIndex]
self.hdrIndex = min(self.hdrIndex, len(self.headers.hdrs) - 1)
self.updateGui()
# update listbox
def updateGui(self):
self.stringsLb.Clear()
self.elinesEntry.SetValue(self.headers.emptyLinesAfter)
self.delBtn.Enable(self.hdrIndex != -1)
for h in self.headers.hdrs:
self.stringsLb.Append(h.text)
if self.hdrIndex != -1:
self.stringsLb.SetSelection(self.hdrIndex)
self.updateHeaderGui()
# update selected header stuff
def updateHeaderGui(self):
if self.hdrIndex == -1:
for w in self.widList:
w.Disable()
self.textEntry.SetValue("")
self.lineEntry.SetValue(1)
self.xoffEntry.SetValue(0)
self.boldCb.SetValue(False)
self.italicCb.SetValue(False)
self.underlinedCb.SetValue(False)
return
self.block = True
h = self.headers.hdrs[self.hdrIndex]
for w in self.widList:
w.Enable(True)
self.textEntry.SetValue(h.text)
self.xoffEntry.SetValue(h.xoff)
util.reverseComboSelect(self.alignCombo, h.align)
self.lineEntry.SetValue(h.line)
self.boldCb.SetValue(h.isBold)
self.italicCb.SetValue(h.isItalic)
self.underlinedCb.SetValue(h.isUnderlined)
self.block = False
def OnMisc(self, event = None):
self.headers.emptyLinesAfter = util.getSpinValue(self.elinesEntry)
if (self.hdrIndex == -1) or self.block:
return
h = self.headers.hdrs[self.hdrIndex]
h.text = util.toInputStr(misc.fromGUI(self.textEntry.GetValue()))
self.stringsLb.SetString(self.hdrIndex, h.text)
h.xoff = util.getSpinValue(self.xoffEntry)
h.line = util.getSpinValue(self.lineEntry)
h.align = self.alignCombo.GetClientData(self.alignCombo.GetSelection())
h.isBold = self.boldCb.GetValue()
h.isItalic = self.italicCb.GetValue()
h.isUnderlined = self.underlinedCb.GetValue()
|
JCROM-Android/jcrom_external_chromium_org
|
refs/heads/kitkat
|
chrome/common/extensions/docs/server2/chained_compiled_file_system_test.py
|
24
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from chained_compiled_file_system import ChainedCompiledFileSystem
from compiled_file_system import CompiledFileSystem
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
_TEST_DATA_BASE = {
'a.txt': 'base a.txt',
'dir': {
'b.txt': 'base b.txt'
},
}
_TEST_DATA_NEW = {
'a.txt': 'new a.txt',
'new.txt': 'a new file',
'dir': {
'b.txt': 'new b.txt',
'new.txt': 'new file in dir',
},
}
identity = lambda _, x: x
class ChainedCompiledFileSystemTest(unittest.TestCase):
def setUp(self):
object_store_creator = ObjectStoreCreator(start_empty=False)
base_file_system = TestFileSystem(_TEST_DATA_BASE)
self._base_factory = CompiledFileSystem.Factory(base_file_system,
object_store_creator)
self._file_system = TestFileSystem(_TEST_DATA_NEW)
self._patched_factory = CompiledFileSystem.Factory(self._file_system,
object_store_creator)
self._chained_factory = ChainedCompiledFileSystem.Factory(
[(self._patched_factory, self._file_system),
(self._base_factory, base_file_system)])
self._base_compiled_fs = self._base_factory.Create(identity, TestFileSystem)
self._chained_compiled_fs = self._chained_factory.Create(
identity, TestFileSystem)
def testGetFromFile(self):
self.assertEqual(self._chained_compiled_fs.GetFromFile('a.txt'),
self._base_compiled_fs.GetFromFile('a.txt'))
self.assertEqual(self._chained_compiled_fs.GetFromFile('new.txt'),
'a new file')
self.assertEqual(self._chained_compiled_fs.GetFromFile('dir/new.txt'),
'new file in dir')
self._file_system.IncrementStat('a.txt')
self.assertNotEqual(self._chained_compiled_fs.GetFromFile('a.txt'),
self._base_compiled_fs.GetFromFile('a.txt'))
self.assertEqual(self._chained_compiled_fs.GetFromFile('a.txt'),
self._file_system.ReadSingle('a.txt'))
def testGetFromFileListing(self):
self.assertEqual(self._chained_compiled_fs.GetFromFile('dir/'),
self._base_compiled_fs.GetFromFile('dir/'))
self._file_system.IncrementStat('dir/')
self.assertNotEqual(self._chained_compiled_fs.GetFromFileListing('dir/'),
self._base_compiled_fs.GetFromFileListing('dir/'))
self.assertEqual(self._chained_compiled_fs.GetFromFileListing('dir/'),
self._file_system.ReadSingle('dir/'))
if __name__ == '__main__':
unittest.main()
|
ericmjl/bokeh
|
refs/heads/master
|
sphinx/source/docs/user_guide/examples/plotting_patch_single.py
|
63
|
from bokeh.plotting import figure, output_file, show
output_file("patch.html")
p = figure(plot_width=400, plot_height=400)
# add a patch renderer with an alpha an line width
p.patch([1, 2, 3, 4, 5], [6, 7, 8, 7, 3], alpha=0.5, line_width=2)
show(p)
|
calvinfarias/IC2015-2
|
refs/heads/master
|
BOOST/boost_1_61_0/libs/python/test/enum.py
|
45
|
# Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from enum_ext import *
>>> identity(color.red) # in case of duplicated enums it always take the last enum
enum_ext.color.blood
>>> identity(color.green)
enum_ext.color.green
>>> identity(color.blue)
enum_ext.color.blue
>>> identity(color(1)) # in case of duplicated enums it always take the last enum
enum_ext.color.blood
>>> identity(color(2))
enum_ext.color.green
>>> identity(color(3))
enum_ext.color(3)
>>> identity(color(4))
enum_ext.color.blue
--- check export to scope ---
>>> identity(red)
enum_ext.color.blood
>>> identity(green)
enum_ext.color.green
>>> identity(blue)
enum_ext.color.blue
>>> try: identity(1)
... except TypeError: pass
... else: print 'expected a TypeError'
>>> c = colorized()
>>> c.x
enum_ext.color.blood
>>> c.x = green
>>> c.x
enum_ext.color.green
>>> red == blood
True
>>> red == green
False
>>> hash(red) == hash(blood)
True
>>> hash(red) == hash(green)
False
'''
# pickling of enums only works with Python 2.3 or higher
exercise_pickling = '''
>>> import pickle
>>> p = pickle.dumps(color.green, pickle.HIGHEST_PROTOCOL)
>>> l = pickle.loads(p)
>>> identity(l)
enum_ext.color.green
'''
def run(args = None):
import sys
import doctest
import pickle
if args is not None:
sys.argv = args
self = sys.modules.get(__name__)
if (hasattr(pickle, "HIGHEST_PROTOCOL")):
self.__doc__ += exercise_pickling
return doctest.testmod(self)
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
|
Djacket/djacket
|
refs/heads/master
|
core/backend/git/statistics.py
|
1
|
import json
from dateutil import parser
from datetime import datetime
from utils.date import get_year, get_month, get_weeknumber, get_weekday
def extract_month(utc_timestamp):
"""
Extracts month from utc timestamp string.
"""
datetime = parser.parse(utc_timestamp)
return '{0}-{1}'.format(datetime.year, datetime.month)
def extract_day(utc_timestamp):
"""
Extracts day from utc timestamp string.
"""
datetime = parser.parse(utc_timestamp)
return '{0}-{1}-{2}'.format(datetime.year, datetime.month, datetime.day)
class DataPresentation:
"""
Represents a presentation format for a given dataset (Datasets are python 'dict's basically).
If data format is 'py' then it's left alone.
But in case of 'js', a json compatible presentation is returned.
"""
JS_FORMAT, PY_FORMAT = 'js', 'py'
VALID_DATA_FORMATS = [JS_FORMAT, PY_FORMAT] # Available data presentation output formats.
def __init__(self, data_format):
if not data_format in self.VALID_DATA_FORMATS: raise ValueError('Input data format is not a valid one.')
self.data_format = data_format
def present(self, data):
"""
Returns presentation of the given dataset.
"""
if self.data_format == 'js':
return json.dumps(data)
elif self.data_format == 'py':
return data
class GitStatistics:
"""
Generates data analysis for a git repository. This data will be available
in python 'dict' or javascript 'json' formats. One can use this statistics
to plot graphs or analyze repository activities.
"""
# Available time intervals for generating datasets.
DAILY_INTERVALS = 'daily'
WEEKLY_INTERVALS = 'weekly'
MONTHLY_INTERVALS = 'monthly'
VALID_DATA_GENERATION_INTERVALS = [DAILY_INTERVALS, WEEKLY_INTERVALS, MONTHLY_INTERVALS]
def __init__(self, repo, rev):
self.repo = repo
self.rev = rev
self.current_year = datetime.utcnow().isocalendar()[0]
self.current_week = datetime.utcnow().isocalendar()[1]
def _for_commits_daily(self, commits):
"""
Returns number of commits per day for the given commits.
"""
# get dates only in the current year.
dates = [extract_day(commit.get_committer_date()) for commit in commits \
if get_year(commit.get_committer_date()) == self.current_year]
return {date: dates.count(date) for date in dates}
def _for_commits_weekly(self, commits):
"""
Returns number of commits per day for the given commits.
"""
# get dates only in the current year and current week.
dates = [get_weekday(extract_day(commit.get_committer_date())) for commit in commits \
if get_year(commit.get_committer_date()) == self.current_year and \
get_weeknumber(commit.get_committer_date()) == self.current_week]
return {wd: dates.count(wd) if wd in dates else 0 for wd in range(1, 8)}
def _for_commits_monthly(self, commits):
"""
Returns number of commits per month for the given commits.
"""
dates = [get_month(extract_month(commit.get_committer_date())) for commit in commits
if get_year(commit.get_committer_date()) == self.current_year]
return {mn: dates.count(mn) if mn in dates else 0 for mn in range(1, 13)}
def for_commits(self, by, data_format):
"""
Returns dataset for number of commits per given time interval.
"""
if not by in self.VALID_DATA_GENERATION_INTERVALS: raise ValueError('Input interval is not a valid one.')
commits = self.repo.get_commits(self.rev)
if by == self.DAILY_INTERVALS:
return DataPresentation(data_format).present(self._for_commits_daily(commits))
elif by == self.WEEKLY_INTERVALS:
return DataPresentation(data_format).present(self._for_commits_weekly(commits))
elif by == self.MONTHLY_INTERVALS:
return DataPresentation(data_format).present(self._for_commits_monthly(commits))
|
meeb/txhttprelay
|
refs/heads/master
|
config.py
|
1
|
LISTEN_PORT = 9876
LISTEN_HOST = '127.0.0.1'
LISTEN_SOCKET = 'relay.sock'
USE = 'tcp' # 'tcp' or 'unix' to toggle which of the above to use
REQUESTS = {}
try:
from local_config import *
except ImportError:
import sys
sys.stderr.write('unable to import local_config')
sys.exit(1)
|
huzq/scikit-learn
|
refs/heads/master
|
sklearn/metrics/cluster/_bicluster.py
|
11
|
import numpy as np
from scipy.optimize import linear_sum_assignment
from ...utils.validation import check_consistent_length, check_array
from ...utils.validation import _deprecate_positional_args
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
@_deprecate_positional_args
def consensus_score(a, b, *, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : 'jaccard' or callable, default='jaccard'
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
row_indices, col_indices = linear_sum_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[row_indices, col_indices].sum() / max(n_a, n_b)
|
ItsAGeekThing/namebench
|
refs/heads/master
|
nb_third_party/__init__.py
|
176
|
import os.path
import sys
# This bit of evil should inject third_party into the path for relative imports.
sys.path.insert(1, os.path.dirname(__file__))
|
sebastic/python-mapnik
|
refs/heads/master
|
test/python_tests/markers_complex_rendering_test.py
|
2
|
#coding=utf8
import os
import mapnik
from utilities import execution_path, run_all
from nose.tools import eq_
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
if 'csv' in mapnik.DatasourceCache.plugin_names():
def test_marker_ellipse_render1():
m = mapnik.Map(256,256)
mapnik.load_map(m,'../data/good_maps/marker_ellipse_transform.xml')
m.zoom_all()
im = mapnik.Image(m.width,m.height)
mapnik.render(m,im)
actual = '/tmp/mapnik-marker-ellipse-render1.png'
expected = 'images/support/mapnik-marker-ellipse-render1.png'
im.save(actual,'png32')
if os.environ.get('UPDATE'):
im.save(expected,'png32')
expected_im = mapnik.Image.open(expected)
eq_(im.tostring('png32'),expected_im.tostring('png32'), 'failed comparing actual (%s) and expected (%s)' % (actual,'tests/python_tests/'+ expected))
def test_marker_ellipse_render2():
m = mapnik.Map(256,256)
mapnik.load_map(m,'../data/good_maps/marker_ellipse_transform2.xml')
m.zoom_all()
im = mapnik.Image(m.width,m.height)
mapnik.render(m,im)
actual = '/tmp/mapnik-marker-ellipse-render2.png'
expected = 'images/support/mapnik-marker-ellipse-render2.png'
im.save(actual,'png32')
if os.environ.get('UPDATE'):
im.save(expected,'png32')
expected_im = mapnik.Image.open(expected)
eq_(im.tostring('png32'),expected_im.tostring('png32'), 'failed comparing actual (%s) and expected (%s)' % (actual,'tests/python_tests/'+ expected))
if __name__ == "__main__":
setup()
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
|
smalls257/VRvisu
|
refs/heads/master
|
Library/External.LCA_RESTRICTED/Languages/CPython/27/Lib/json/tool.py
|
113
|
r"""Command-line tool to validate and pretty-print JSON
Usage::
$ echo '{"json":"obj"}' | python -m json.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m json.tool
Expecting property name: line 1 column 2 (char 2)
"""
import sys
import json
def main():
if len(sys.argv) == 1:
infile = sys.stdin
outfile = sys.stdout
elif len(sys.argv) == 2:
infile = open(sys.argv[1], 'rb')
outfile = sys.stdout
elif len(sys.argv) == 3:
infile = open(sys.argv[1], 'rb')
outfile = open(sys.argv[2], 'wb')
else:
raise SystemExit(sys.argv[0] + " [infile [outfile]]")
try:
obj = json.load(infile)
except ValueError, e:
raise SystemExit(e)
json.dump(obj, outfile, sort_keys=True, indent=4)
outfile.write('\n')
if __name__ == '__main__':
main()
|
Brocade-OpenSource/OpenStack-DNRM-Neutron
|
refs/heads/master
|
neutron/openstack/common/notifier/test_notifier.py
|
129
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
NOTIFICATIONS = []
def notify(_context, message):
"""Test notifier, stores notifications in memory for unittests."""
NOTIFICATIONS.append(message)
|
TomTranter/OpenPNM
|
refs/heads/master
|
tests/unit/models/phase/DensityTest.py
|
1
|
import openpnm as op
from numpy.testing import assert_approx_equal
class DensityTest:
def setup_class(self):
self.net = op.network.Cubic(shape=[3, 3, 3])
self.phase = op.phases.GenericPhase(network=self.net)
self.phase['pore.temperature'] = 298.0 # K
self.phase['pore.pressure'] = 101325.0 # Pa
self.phase['pore.molecular_weight'] = 0.018 # kg/mol
self.phase['pore.molar_density'] = 55539.0 # mol/m3
self.phase['pore.salinity'] = 0.0 # ppt
def test_standard(self):
# Liquid water
self.phase.add_model(propname='pore.density',
model=op.models.phases.density.standard)
assert_approx_equal(self.phase['pore.density'].mean(), 999.702)
def test_ideal_gas(self):
# Water vapor
self.phase.add_model(propname='pore.density',
model=op.models.phases.density.ideal_gas)
self.phase.regenerate_models()
assert_approx_equal(self.phase['pore.density'].mean(), 0.73610248)
def test_water(self):
# Liquid water
self.phase.add_model(propname='pore.density',
model=op.models.phases.density.water)
self.phase.regenerate_models()
assert_approx_equal(self.phase['pore.density'].mean(), 996.9522)
def teardown_class(self):
del(self.phase)
del(self.net)
if __name__ == '__main__':
t = DensityTest()
self = t
t.setup_class()
for item in t.__dir__():
if item.startswith('test'):
print('running test: '+item)
t.__getattribute__(item)()
|
OmarIthawi/edx-platform
|
refs/heads/master
|
common/djangoapps/embargo/fixtures/country_codes.py
|
259
|
"""
List of valid ISO 3166-1 Alpha-2 country codes, used for
validating entries on entered country codes on django-admin page.
"""
COUNTRY_CODES = set([
"AC", "AD", "AE", "AF", "AG", "AI", "AL", "AM", "AN", "AO", "AQ", "AR", "AS", "AT",
"AU", "AW", "AX", "AZ", "BA", "BB", "BD", "BE", "BF", "BG", "BH", "BI", "BJ", "BM",
"BN", "BO", "BR", "BS", "BT", "BV", "BW", "BY", "BZ", "CA", "CC", "CD", "CF", "CG",
"CH", "CI", "CK", "CL", "CM", "CN", "CO", "CR", "CU", "CV", "CX", "CY", "CZ", "DE",
"DJ", "DK", "DM", "DO", "DZ", "EC", "EE", "EG", "ER", "ES", "ET", "FI", "FJ", "FK",
"FM", "FO", "FR", "GA", "GB", "GD", "GE", "GF", "GG", "GH", "GI", "GL", "GM", "GN",
"GP", "GQ", "GR", "GS", "GT", "GU", "GW", "GY", "HK", "HM", "HN", "HR", "HT", "HU",
"ID", "IE", "IL", "IM", "IN", "IO", "IQ", "IR", "IS", "IT", "JE", "JM", "JO", "JP",
"KE", "KG", "KH", "KI", "KM", "KN", "KP", "KR", "KW", "KY", "KZ", "LA", "LB", "LC",
"LI", "LK", "LR", "LS", "LT", "LU", "LV", "LY", "MA", "MC", "MD", "ME", "MG", "MH",
"MK", "ML", "MM", "MN", "MO", "MP", "MQ", "MR", "MS", "MT", "MU", "MV", "MW", "MX",
"MY", "MZ", "NA", "NC", "NE", "NF", "NG", "NI", "NL", "NO", "NP", "NR", "NU", "NZ",
"OM", "PA", "PE", "PF", "PG", "PH", "PK", "PL", "PM", "PN", "PR", "PT", "PW", "PY",
"QA", "RE", "RO", "RS", "RU", "RW", "SA", "SB", "SC", "SD", "SE", "SG", "SH", "SI",
"SJ", "SK", "SL", "SM", "SN", "SO", "SR", "ST", "SV", "SY", "SZ", "TA", "TC", "TD",
"TF", "TG", "TH", "TJ", "TK", "TL", "TM", "TN", "TO", "TR", "TT", "TV", "TW", "TZ",
"UA", "UG", "UM", "US", "UY", "UZ", "VA", "VC", "VE", "VG", "VI", "VN", "VU", "WF",
"WS", "YE", "YT", "ZA", "ZM", "ZW"
])
|
menandmice/mnm-opendnssec
|
refs/heads/master
|
suds/umx/basic.py
|
211
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides basic unmarshaller classes.
"""
from logging import getLogger
from suds import *
from suds.umx import *
from suds.umx.core import Core
class Basic(Core):
"""
A object builder (unmarshaller).
"""
def process(self, node):
"""
Process an object graph representation of the xml I{node}.
@param node: An XML tree.
@type node: L{sax.element.Element}
@return: A suds object.
@rtype: L{Object}
"""
content = Content(node)
return Core.process(self, content)
|
hachreak/zenodo
|
refs/heads/master
|
tests/unit/default/test_zenodo.py
|
9
|
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2015 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Zenodo module test cases."""
from __future__ import absolute_import, print_function
def test_version():
"""Test version import."""
from zenodo import __version__
assert __version__
|
georgestarcher/TA-SyncKVStore
|
refs/heads/master
|
bin/ta_synckvstore/markupsafe/_compat.py
|
864
|
# -*- coding: utf-8 -*-
"""
markupsafe._compat
~~~~~~~~~~~~~~~~~~
Compatibility module for different Python versions.
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import sys
PY2 = sys.version_info[0] == 2
if not PY2:
text_type = str
string_types = (str,)
unichr = chr
int_types = (int,)
iteritems = lambda x: iter(x.items())
else:
text_type = unicode
string_types = (str, unicode)
unichr = unichr
int_types = (int, long)
iteritems = lambda x: x.iteritems()
|
StratusLab/client
|
refs/heads/master
|
api/code/src/main/python/requests/cookies.py
|
58
|
# -*- coding: utf-8 -*-
"""
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import collections
from .compat import cookielib, urlparse, Morsel
try:
import threading
# grr, pyflakes: this fixes "redefinition of unused 'threading'"
threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
return self._r.url
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""Produce an appropriate Cookie header string to be sent with `request`, or None."""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name == name:
if domain is None or domain == cookie.domain:
if path is None or path == cookie.path:
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific."""
class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Don't use the dict interface internally; it's just for compatibility with
with external client code. All `requests` code should work out of the box
with externally provided instances of CookieJar, e.g., LWPCookieJar and
FileCookieJar.
Caution: dictionary operations that are normally O(1) may be O(n).
Unlike a regular CookieJar, this class is pickleable.
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains. Caution: operation is O(n), not O(1)."""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains."""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the jar.
See values() and items()."""
keys = []
for cookie in iter(self):
keys.append(cookie.name)
return keys
def values(self):
"""Dict-like values() that returns a list of values of cookies from the jar.
See keys() and items()."""
values = []
for cookie in iter(self):
values.append(cookie.value)
return values
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the jar.
See keys() and values(). Allows client-code to call "dict(RequestsCookieJar)
and get a vanilla python dict of key value pairs."""
items = []
for cookie in iter(self):
items.append((cookie.name, cookie.value))
return items
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise."""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain old
Python dict of name-value pairs of cookies that meet the requirements."""
dictionary = {}
for cookie in iter(self):
if (domain is None or cookie.domain == domain) and (path is None
or cookie.path == path):
dictionary[cookie.name] = cookie.value
return dictionary
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws exception
if there are more than one cookie with name. In that case, use the more
explicit get() method instead. Caution: operation is O(n), not O(1)."""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws exception
if there is already a cookie of that name in the jar. In that case, use the more
explicit set() method instead."""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps cookielib.CookieJar's remove_cookie_by_name()."""
remove_cookie_by_name(self, name)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(cookie)
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values. Takes as args name
and optional domain and path. Returns a cookie.value. If there are conflicting cookies,
_find arbitrarily chooses one. See _find_no_duplicates if you want an exception thrown
if there are conflicting cookies."""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""__get_item__ and get call _find_no_duplicates -- never used in Requests internally.
Takes as args name and optional domain and path. Returns a cookie.value.
Throws KeyError if cookie is not found and CookieConflictError if there are
multiple cookies that match name and optionally domain and path."""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.update(self)
return new_cj
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
c = create_cookie(
name=morsel.key,
value=morsel.value,
version=morsel['version'] or 0,
port=None,
port_specified=False,
domain=morsel['domain'],
domain_specified=bool(morsel['domain']),
domain_initial_dot=morsel['domain'].startswith('.'),
path=morsel['path'],
path_specified=bool(morsel['path']),
secure=bool(morsel['secure']),
expires=morsel['max-age'] or morsel['expires'],
discard=False,
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,)
return c
def cookiejar_from_dict(cookie_dict, cookiejar=None):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
for name in cookie_dict:
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
|
smartbgp/libbgp
|
refs/heads/master
|
libbgp/tests/unit/bgp/test_update.py
|
1
|
# Copyright 2015-2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from libbgp.bgp.update import Update
from libbgp.bgp.message import Message
class TestUpdate(unittest.TestCase):
def test_pack_nlri(self):
nlri = ['184.157.224.1/32', '32.65.243.12/30', '89.232.254.0/23', '69.179.221.0/24',
'61.172.0.0/16', '202.223.128.0/17', '156.152.0.0/15', '15.0.0.0/8',
'209.102.178.0/24', '66.112.100.0/22', '208.54.194.0/24']
hex_nlri = b' \xb8\x9d\xe0\x01\x1e A\xf3\x0c\x17Y\xe8\xfe\x18E\xb3\xdd\x10='\
b'\xac\x11\xca\xdf\x80\x0f\x9c\x98\x08\x0f\x18\xd1f\xb2\x16Bpd\x18\xd06\xc2'
self.assertEqual(hex_nlri, Update.pack_nlri(nlri))
def test_unpack_nlri(self):
nlri = ['184.157.224.1/32', '32.65.243.12/30', '89.232.254.0/23', '69.179.221.0/24',
'61.172.0.0/16', '202.223.128.0/17', '156.152.0.0/15', '15.0.0.0/8',
'209.102.178.0/24', '66.112.100.0/22', '208.54.194.0/24']
hex_nlri = b' \xb8\x9d\xe0\x01\x1e A\xf3\x0c\x17Y\xe8\xfe\x18E\xb3\xdd\x10='\
b'\xac\x11\xca\xdf\x80\x0f\x9c\x98\x08\x0f\x18\xd1f\xb2\x16Bpd\x18\xd06\xc2'
self.assertEqual(nlri, Update.unpack_nlri(hex_nlri))
def test_unpack_nlri_with_addpath(self):
hex_nlri = b'\x00\x00\x00\x01\x20\x05\x05\x05\x05\x00\x00\x00\x01\x20\xc0\xa8\x01\x05'
nlri = [
{'prefix': '5.5.5.5/32', 'path_id': 1},
{'prefix': '192.168.1.5/32', 'path_id': 1}
]
self.assertEqual(nlri, Update.unpack_nlri(hex_nlri, True))
def test_pack_nlri_with_addpath(self):
hex_nlri = b'\x00\x00\x00\x01\x20\x05\x05\x05\x05\x00\x00\x00\x01\x20\xc0\xa8\x01\x05'
nlri = [
{'prefix': '5.5.5.5/32', 'path_id': 1},
{'prefix': '192.168.1.5/32', 'path_id': 1}
]
self.assertEqual(hex_nlri, Update.pack_nlri(nlri, True))
def test_ipv4_unicast_with_rr(self):
# data_hex = b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff' \
# b'\xff\xff\xff\x00\x4a\x02\x00\x00\x00\x2e\x40\x01\x01' \
# b'\x00\x40\x02\x00\x40\x03\x04\xac\x10\x01\x0e\x80\x04' \
# b'\x04\x00\x00\x00\x00\x40\x05\x04\x00\x00\x00\x64\x80' \
# b'\x0a\x08\x02\x02\x02\x02\x64\x64\x64\x64\x80\x09\x04' \
# b'\xac\x10\x01\x0e\x20\xac\x10\x01\x0e'
data_dict = {
'msg': {
'attr': {
1: 0,
2: [],
3: '172.16.1.14',
4: 0,
5: 100,
8: ['1234:5678', '1122:3344', 'NO_EXPORT'],
9: '172.16.1.14',
10: ['2.2.2.2', '100.100.100.100']},
'nlri': ['172.16.1.14/32'],
'withdraw': []},
'type': 2}
data_hex = Message.pack(data_dict, {}).hex_value
self.assertEqual(data_dict, Message.unpack(data_hex).dict())
def test_ipv4_unicast_withdraw(self):
data_dict = {
'msg': {
'withdraw': ['172.16.1.14/32', '192.168.1.1/32']
},
'type': 2
}
data_hex = Message.pack(data_dict, {}).hex_value
data_dict_2 = Message.unpack(data_hex).dict()
self.assertEqual(data_dict['type'], data_dict_2['type'])
self.assertEqual(data_dict['msg']['withdraw'], data_dict_2['msg']['withdraw'])
if __name__ == '__main__':
unittest.main()
|
mattilyra/gensim
|
refs/heads/develop
|
gensim/sklearn_api/ldamodel.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Chinmaya Pancholi <chinmayapancholi13@gmail.com>
# Copyright (C) 2017 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""Scikit learn interface for :class:`~gensim.models.ldamodel.LdaModel`.
Follows scikit-learn API conventions to facilitate using gensim along with scikit-learn.
Examples
--------
>>> from gensim.test.utils import common_corpus, common_dictionary
>>> from gensim.sklearn_api import LdaTransformer
>>>
>>> # Reduce each document to 2 dimensions (topics) using the sklearn interface.
>>> model = LdaTransformer(num_topics=2, id2word=common_dictionary, iterations=20, random_state=1)
>>> docvecs = model.fit_transform(common_corpus)
"""
import numpy as np
from scipy import sparse
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.exceptions import NotFittedError
from gensim import models
from gensim import matutils
class LdaTransformer(TransformerMixin, BaseEstimator):
"""Base LDA module, wraps :class:`~gensim.models.ldamodel.LdaModel`.
The inner workings of this class depends heavily on `Matthew D. Hoffman, David M. Blei, Francis Bach:
"Online Learning for Latent Dirichlet Allocation NIPS'10" <https://www.di.ens.fr/~fbach/mdhnips2010.pdf>`_ and
`David M. Blei, Andrew Y. Ng, Michael I. Jordan: "Latent Dirichlet Allocation"
<http://www.jmlr.org/papers/volume3/blei03a/blei03a.pdf>`_.
"""
def __init__(self, num_topics=100, id2word=None, chunksize=2000, passes=1, update_every=1, alpha='symmetric',
eta=None, decay=0.5, offset=1.0, eval_every=10, iterations=50, gamma_threshold=0.001,
minimum_probability=0.01, random_state=None, scorer='perplexity', dtype=np.float32):
"""
Parameters
----------
num_topics : int, optional
The number of requested latent topics to be extracted from the training corpus.
id2word : :class:`~gensim.corpora.dictionary.Dictionary`, optional
Mapping from integer ID to words in the corpus. Used to determine vocabulary size and logging.
chunksize : int, optional
Number of documents in batch.
passes : int, optional
Number of passes through the corpus during training.
update_every : int, optional
Number of documents to be iterated through for each update.
Set to 0 for batch learning, > 1 for online iterative learning.
alpha : {np.ndarray, str}, optional
Can be set to an 1D array of length equal to the number of expected topics that expresses
our a-priori belief for the each topics' probability.
Alternatively default prior selecting strategies can be employed by supplying a string:
* 'asymmetric': Uses a fixed normalized assymetric prior of `1.0 / topicno`.
* 'default': Learns an assymetric prior from the corpus.
eta : {float, np.array, str}, optional
A-priori belief on word probability, this can be:
* scalar for a symmetric prior over topic/word probability,
* vector of length num_words to denote an asymmetric user defined probability for each word,
* matrix of shape (num_topics, num_words) to assign a probability for each word-topic combination,
* the string 'auto' to learn the asymmetric prior from the data.
decay : float, optional
A number between (0.5, 1] to weight what percentage of the previous lambda value is forgotten
when each new document is examined. Corresponds to Kappa from
`Matthew D. Hoffman, David M. Blei, Francis Bach:
"Online Learning for Latent Dirichlet Allocation NIPS'10" <https://www.di.ens.fr/~fbach/mdhnips2010.pdf>`_.
offset : float, optional
Hyper-parameter that controls how much we will slow down the first steps the first few iterations.
Corresponds to Tau_0 from `Matthew D. Hoffman, David M. Blei, Francis Bach:
"Online Learning for Latent Dirichlet Allocation NIPS'10" <https://www.di.ens.fr/~fbach/mdhnips2010.pdf>`_.
eval_every : int, optional
Log perplexity is estimated every that many updates. Setting this to one slows down training by ~2x.
iterations : int, optional
Maximum number of iterations through the corpus when inferring the topic distribution of a corpus.
gamma_threshold : float, optional
Minimum change in the value of the gamma parameters to continue iterating.
minimum_probability : float, optional
Topics with a probability lower than this threshold will be filtered out.
random_state : {np.random.RandomState, int}, optional
Either a randomState object or a seed to generate one. Useful for reproducibility.
scorer : str, optional
Method to compute a score reflecting how well the model has fit the input corpus, allowed values are:
* 'perplexity': Perplexity of language model
* 'mass_u': Use :class:`~gensim.models.coherencemodel.CoherenceModel` to compute a topics coherence.
dtype : {numpy.float16, numpy.float32, numpy.float64}, optional
Data-type to use during calculations inside model. All inputs are also converted.
Notes
-----
Configure `passes` and `update_every` params to choose the mode among:
* online (single-pass): update_every != None and passes == 1
* online (multi-pass): update_every != None and passes > 1
* batch: update_every == None
By default, 'online (single-pass)' mode is used for training the LDA model.
"""
self.gensim_model = None
self.num_topics = num_topics
self.id2word = id2word
self.chunksize = chunksize
self.passes = passes
self.update_every = update_every
self.alpha = alpha
self.eta = eta
self.decay = decay
self.offset = offset
self.eval_every = eval_every
self.iterations = iterations
self.gamma_threshold = gamma_threshold
self.minimum_probability = minimum_probability
self.random_state = random_state
self.scorer = scorer
self.dtype = dtype
def fit(self, X, y=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {iterable of iterable of (int, int), scipy.sparse matrix}
A collection of documents in BOW format used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.ldamodel.LdaTransformer`
The trained model.
"""
if sparse.issparse(X):
corpus = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
else:
corpus = X
self.gensim_model = models.LdaModel(
corpus=corpus, num_topics=self.num_topics, id2word=self.id2word,
chunksize=self.chunksize, passes=self.passes, update_every=self.update_every,
alpha=self.alpha, eta=self.eta, decay=self.decay, offset=self.offset,
eval_every=self.eval_every, iterations=self.iterations,
gamma_threshold=self.gamma_threshold, minimum_probability=self.minimum_probability,
random_state=self.random_state, dtype=self.dtype
)
return self
def transform(self, docs):
"""Infer the topic distribution for `docs`.
Parameters
----------
docs : {iterable of list of (int, number), list of (int, number)}
Document or sequence of documents in BoW format.
Returns
-------
numpy.ndarray of shape [`len(docs)`, `num_topics`]
The topic distribution for each input document.
"""
if self.gensim_model is None:
raise NotFittedError(
"This model has not been fitted yet. Call 'fit' with appropriate arguments before using this method."
)
# The input as array of array
if isinstance(docs[0], tuple):
docs = [docs]
# returning dense representation for compatibility with sklearn
# but we should go back to sparse representation in the future
distribution = [matutils.sparse2full(self.gensim_model[doc], self.num_topics) for doc in docs]
return np.reshape(np.array(distribution), (len(docs), self.num_topics))
def partial_fit(self, X):
"""Train model over a potentially incomplete set of documents.
Uses the parameters set in the constructor.
This method can be used in two ways:
* On an unfitted model in which case the model is initialized and trained on `X`.
* On an already fitted model in which case the model is **updated** by `X`.
Parameters
----------
X : {iterable of iterable of (int, int), scipy.sparse matrix}
A collection of documents in BOW format used for training the model.
Returns
-------
:class:`~gensim.sklearn_api.ldamodel.LdaTransformer`
The trained model.
"""
if sparse.issparse(X):
X = matutils.Sparse2Corpus(sparse=X, documents_columns=False)
if self.gensim_model is None:
self.gensim_model = models.LdaModel(
num_topics=self.num_topics, id2word=self.id2word,
chunksize=self.chunksize, passes=self.passes, update_every=self.update_every,
alpha=self.alpha, eta=self.eta, decay=self.decay, offset=self.offset,
eval_every=self.eval_every, iterations=self.iterations, gamma_threshold=self.gamma_threshold,
minimum_probability=self.minimum_probability, random_state=self.random_state,
dtype=self.dtype
)
self.gensim_model.update(corpus=X)
return self
def score(self, X, y=None):
"""Compute score reflecting how well the model has fitted for the input data.
The scoring method is set using the `scorer` argument in :meth:`~gensim.sklearn_api.ldamodel.LdaTransformer`.
Higher score is better.
Parameters
----------
X : iterable of list of (int, number)
Sequence of documents in BOW format.
Returns
-------
float
The score computed based on the selected method.
"""
if self.scorer == 'perplexity':
corpus_words = sum(cnt for document in X for _, cnt in document)
subsample_ratio = 1.0
perwordbound = \
self.gensim_model.bound(X, subsample_ratio=subsample_ratio) / (subsample_ratio * corpus_words)
return -1 * np.exp2(-perwordbound) # returning (-1*perplexity) to select model with minimum value
elif self.scorer == 'u_mass':
goodcm = models.CoherenceModel(model=self.gensim_model, corpus=X, coherence=self.scorer, topn=3)
return goodcm.get_coherence()
else:
raise ValueError("Invalid value {} supplied for `scorer` param".format(self.scorer))
|
SUSE/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-datalake-store/azure/mgmt/datalake/store/models/encryption_config.py
|
2
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class EncryptionConfig(Model):
"""The encryption configuration for the account.
:param type: The type of encryption configuration being used. Currently
the only supported types are 'UserManaged' and 'ServiceManaged'. Possible
values include: 'UserManaged', 'ServiceManaged'
:type type: str or :class:`EncryptionConfigType
<azure.mgmt.datalake.store.models.EncryptionConfigType>`
:param key_vault_meta_info: The Key Vault information for connecting to
user managed encryption keys.
:type key_vault_meta_info: :class:`KeyVaultMetaInfo
<azure.mgmt.datalake.store.models.KeyVaultMetaInfo>`
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'EncryptionConfigType'},
'key_vault_meta_info': {'key': 'keyVaultMetaInfo', 'type': 'KeyVaultMetaInfo'},
}
def __init__(self, type, key_vault_meta_info=None):
self.type = type
self.key_vault_meta_info = key_vault_meta_info
|
futurepr0n/Books-solutions
|
refs/heads/master
|
Python-For-Everyone-Horstmann/Chapter9-Objects-and-Classes/test_P9_15.py
|
1
|
# Unit tests for P9_15.py
# IMPORTS
from math import sqrt
from math import pi
import P9_15
import unittest
# main
class SphereTests(unittest.TestCase):
def setUp(self):
self.sphere = P9_15.Sphere(5)
def test_get_radius(self):
self.assertEqual(5, self.sphere.get_radius())
def test_volume(self):
self.assertEqual(4/3 * pi * 5 ** 3, self.sphere.volume())
def test_surface(self):
self.assertEqual(4 * pi * 5 ** 2, self.sphere.surface())
class CylinderTests(unittest.TestCase):
def setUp(self):
self.cylinder = P9_15.Cylinder(8, 10)
def test_get_radius(self):
self.assertEqual(8, self.cylinder.get_radius())
def test_get_height(self):
self.assertEqual(10, self.cylinder.get_height())
def test_volume(self):
self.assertEqual(pi * 8 ** 2, self.cylinder.volume())
def test_surface(self):
self.assertEqual(pi * 8 ** 2 * 2 * pi * 8 * 10, self.cylinder.surface())
class ConeTests(unittest.TestCase):
def setUp(self):
self.cone = P9_15.Cone(10, 15)
def test_get_radious(self):
self.assertEqual(10, self.cone.get_radius())
def test_get_height(self):
self.assertEqual(15, self.cone.get_height())
def test_volume(self):
self.assertEqual(1/3 * pi * 10 ** 2 * 15, self.cone.volume())
def test_surface(self):
self.assertEqual(pi * 10 ** 2 + pi * 10 * sqrt(15 ** 2 + 10 ** 2), self.cone.surface())
# PROGRAM RUN
if __name__ == '__main__':
unittest.main()
|
maurerpe/FreeCAD
|
refs/heads/master
|
src/Mod/Plot/plotPositions/__init__.py
|
280
|
#***************************************************************************
#* *
#* Copyright (c) 2011, 2012 *
#* Jose Luis Cercos Pita <jlcercos@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import TaskPanel
def load():
"""Load the tool"""
TaskPanel.createTask()
|
inspectlabs/mongo-python-driver
|
refs/heads/master
|
pymongo/thread_util.py
|
24
|
# Copyright 2012-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for multi-threading support."""
import threading
try:
from time import monotonic as _time
except ImportError:
from time import time as _time
from pymongo.monotonic import time as _time
from pymongo.errors import ExceededMaxWaiters
### Begin backport from CPython 3.2 for timeout support for Semaphore.acquire
class Semaphore:
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = threading.Condition(threading.Lock())
self._value = value
def acquire(self, blocking=True, timeout=None):
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
self._cond.acquire()
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = _time() + timeout
else:
timeout = endtime - _time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value = self._value - 1
rc = True
self._cond.release()
return rc
__enter__ = acquire
def release(self):
self._cond.acquire()
self._value = self._value + 1
self._cond.notify()
self._cond.release()
def __exit__(self, t, v, tb):
self.release()
@property
def counter(self):
return self._value
class BoundedSemaphore(Semaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1):
Semaphore.__init__(self, value)
self._initial_value = value
def release(self):
if self._value >= self._initial_value:
raise ValueError("Semaphore released too many times")
return Semaphore.release(self)
### End backport from CPython 3.2
class DummySemaphore(object):
def __init__(self, value=None):
pass
def acquire(self, blocking=True, timeout=None):
return True
def release(self):
pass
class MaxWaitersBoundedSemaphore(object):
def __init__(self, semaphore_class, value=1, max_waiters=1):
self.waiter_semaphore = semaphore_class(max_waiters)
self.semaphore = semaphore_class(value)
def acquire(self, blocking=True, timeout=None):
if not self.waiter_semaphore.acquire(False):
raise ExceededMaxWaiters()
try:
return self.semaphore.acquire(blocking, timeout)
finally:
self.waiter_semaphore.release()
def __getattr__(self, name):
return getattr(self.semaphore, name)
class MaxWaitersBoundedSemaphoreThread(MaxWaitersBoundedSemaphore):
def __init__(self, value=1, max_waiters=1):
MaxWaitersBoundedSemaphore.__init__(
self, BoundedSemaphore, value, max_waiters)
def create_semaphore(max_size, max_waiters):
if max_size is None:
return DummySemaphore()
else:
if max_waiters is None:
return BoundedSemaphore(max_size)
else:
return MaxWaitersBoundedSemaphoreThread(max_size, max_waiters)
class Event(object):
"""Copy of standard threading.Event, but uses a custom condition class.
Allows async frameworks to override monitors' synchronization behavior
with TopologySettings.condition_class.
Copied from CPython's threading.py at hash c7960cc9.
"""
def __init__(self, condition_class):
self._cond = condition_class(threading.Lock())
self._flag = False
def is_set(self):
"""Return true if and only if the internal flag is true."""
return self._flag
isSet = is_set
def set(self):
"""Set the internal flag to true.
All threads waiting for it to become true are awakened. Threads
that call wait() once the flag is true will not block at all.
"""
self._cond.acquire()
try:
self._flag = True
self._cond.notify_all()
finally:
self._cond.release()
def clear(self):
"""Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.
"""
self._cond.acquire()
try:
self._flag = False
finally:
self._cond.release()
def wait(self, timeout=None):
"""Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.
"""
self._cond.acquire()
try:
signaled = self._flag
if not signaled:
signaled = self._cond.wait(timeout)
return signaled
finally:
self._cond.release()
|
boxbillinggit/RPI_bmw-infotainment
|
refs/heads/master
|
plugin/plugin.service.bmw-infotainment/resources/lib/bmbt.py
|
1
|
import main_thread
import system
import kodi.builtin
import signaldb as sdb
import log as log_module
from kodi import gui
from buttons import Button
log = log_module.init_logger(__name__)
__author__ = 'lars'
def get_active_screens(bitmask):
if not bitmask:
return "None"
states = []
for idx, state in enumerate(Monitor.Screen):
if bitmask & pow(2, idx):
states.append(state)
return ", ".join(states)
class Monitor(object):
# higher number is suppressing states with lower numbers
SCREEN_OFF = 0
SCREEN_MEDIA, SCREEN_INFO, SCREEN_TONE, SCREEN_SELECT = map(lambda exp: pow(2, exp), range(4))
Screen = ("SCREEN_MEDIA", "SCREEN_INFO", "SCREEN_TONE", "SCREEN_SELECT")
Media = ("MEDIA_OFF", "MEDIA_CDC", "MEDIA_RADIO", "MEDIA_TAPE")
MEDIA_OFF, MEDIA_CDC, MEDIA_RADIO, MEDIA_TAPE = range(4)
def __init__(self, send):
self.send = send
self.current_screen = Monitor.SCREEN_OFF
self.current_media = Monitor.MEDIA_OFF
self.cdc_active = False
self.flush_bmbt = False
def init_events(self, bind_event):
self.init_controls(bind_event)
src, dst = "IBUS_DEV_BMBT", "IBUS_DEV_RAD"
# F0 05 FF 47 00 38 75 -> info push
# F0 05 FF 47 00 0F 42 -> select push (to GLOBAL)
# TODO: INFO is not detected for now (haven't found any good strategy for detecting "INFO" off)
# bind_event(sdb.create((src, None, "info.push")), self.set_screen, Monitor.SCREEN_INFO, True)
bind_event(sdb.create((src, dst, "tone.push")), self.tone_or_select, Monitor.SCREEN_TONE)
bind_event(sdb.create((src, None, "select.push")), self.tone_or_select, Monitor.SCREEN_SELECT)
src, dst = "IBUS_DEV_RAD", "IBUS_DEV_GT"
bind_event(sdb.create((src, dst, "screen.mainmenu")), self.set_screen, Monitor.SCREEN_MEDIA, False)
bind_event(sdb.create((src, dst, "screen.current")), self.set_screen, Monitor.SCREEN_MEDIA, False)
bind_event(sdb.create((src, dst, "screen.tone-off")), self.tone_or_select, False)
bind_event(sdb.create((src, dst, "screen.select-off")), self.tone_or_select, False)
bind_event(sdb.create((src, dst, "screen.tone-select-off")), self.tone_or_select, False)
# available values for state RADIO: "FM", "FMD", "AM"
regexp = ".* (?:{FM}|{AM}) .*".format(FM=sdb.hex_string("FM"), AM=sdb.hex_string("AM"))
# rendering finished.
# bind_event(sdb.create((src, dst, "index-area.refresh")), self.refresh_graphics)
bind_event(sdb.create((src, dst, "index-area.write"), FIELD="1", TEXT=regexp), self.set_media_source, Monitor.MEDIA_RADIO)
bind_event(sdb.create((src, dst, "text-area.upper"), LEFT=".*", MID=".*", RIGHT=sdb.hex_string("CD") + ".*"), self.set_media_source, Monitor.MEDIA_CDC)
# TODO: "TAPE" and "OFF"
def init_controls(self, bind_event):
""" Initialize controls, etates, etc. """
src, dst = "IBUS_DEV_BMBT", "IBUS_DEV_GT"
right_knob = Button(hold=kodi.builtin.action("back"), release=kodi.builtin.action("Select"))
bind_event(sdb.create((src, dst, "right-knob.push")), self.ctrl_handler, right_knob.set_state_push)
bind_event(sdb.create((src, dst, "right-knob.hold")), self.ctrl_handler, right_knob.set_state_hold)
bind_event(sdb.create((src, dst, "right-knob.release")), self.ctrl_handler, right_knob.set_state_release)
bind_event(sdb.create((src, dst, "right-knob.turn-left"), SCROLL_SPEED="([1-9])"), self.ctrl_handler, kodi.builtin.scroll("up"))
bind_event(sdb.create((src, dst, "right-knob.turn-right"), SCROLL_SPEED="([1-9])"), self.ctrl_handler, kodi.builtin.scroll("down"))
src, dst = "IBUS_DEV_BMBT", "IBUS_DEV_RAD"
left = Button(push=kodi.builtin.action("Left"), hold=kodi.builtin.action("Left"))
bind_event(sdb.create((src, dst, "left.push")), self.ctrl_handler, left.set_state_push)
bind_event(sdb.create((src, dst, "left.hold")), self.ctrl_handler, left.set_state_hold)
bind_event(sdb.create((src, dst, "left.release")), self.ctrl_handler, left.set_state_release)
right = Button(push=kodi.builtin.action("Right"), hold=kodi.builtin.action("Right"))
bind_event(sdb.create((src, dst, "right.push")), self.ctrl_handler, right.set_state_push)
bind_event(sdb.create((src, dst, "right.hold")), self.ctrl_handler, right.set_state_hold)
bind_event(sdb.create((src, dst, "right.release")), self.ctrl_handler, right.set_state_release)
src, dst = "IBUS_DEV_BMBT", "IBUS_DEV_LOC"
bind_event(sdb.create((src, None, "clock.push")), self.ctrl_handler, toggle_gui, gui.AddonOverview)
src, dst = "IBUS_DEV_RAD", "IBUS_DEV_GT"
bind_event(sdb.create((src, dst, "index-area.refresh")), self.flush_bmbt_menu)
def ctrl_handler(self, fcn, *args, **kwargs):
""" Controls is active only if current state is correct """
if self.cdc_active:
fcn(*args, **kwargs)
def set_media_source(self, source):
""" Only one media source could be active """
self.current_media = source
self.set_screen(Monitor.SCREEN_MEDIA, True)
log.debug("Current Media: {STATE}".format(STATE=Monitor.Media[self.current_media]))
def tone_or_select(self, state):
""" tone and select can only exist one at time (one closes the other, etc) """
# clear bits first
self.current_screen &= ~(Monitor.SCREEN_TONE | Monitor.SCREEN_SELECT)
if state:
self.current_screen |= state
self.evaluate_screen()
def set_screen(self, screen, active):
""" current active screen changed """
if active:
self.current_screen |= screen
else:
self.current_screen &= ~screen
self.evaluate_screen()
def evaluate_screen(self):
log.debug("Current Active Screen: {BITMASK} ({STATES})".format(BITMASK=bin(self.current_screen), STATES=get_active_screens(self.current_screen)))
# if media is active, and no higher-priority screen is occupying, activate screen
if self.current_screen & Monitor.SCREEN_MEDIA and self.current_screen <= Monitor.SCREEN_MEDIA and self.current_media == Monitor.MEDIA_CDC:
self.state_cdc()
else:
self.state_not_cdc()
def flush_bmbt_menu(self):
""" This removes the CD-changer buttons (releases the turn knob from changing disc during use) """
if not self.flush_bmbt:
return
self.flush_bmbt = False
# TODO: does this work? (test using CDC as device also)
src, dst = "IBUS_DEV_RAD", "IBUS_DEV_GT"
self.send(sdb.create((src, dst, "index-area.flush")))
self.send(sdb.create((src, dst, "index-area.refresh")))
def state_not_cdc(self):
""" transition from CDC -> OTHER """
if not self.cdc_active:
return
log.debug("Request screen off")
self.cdc_active = False
system.screen_off()
open_gui(gui.DefaultScreen)
def state_cdc(self):
""" transition from OTHER -> CDC """
if self.cdc_active:
return
log.debug("Request screen on")
self.cdc_active = True
self.flush_bmbt = True
gui.close_all_windows()
system.screen_on()
def toggle_gui(WindowClass):
""" Open or close a window """
if close_gui(WindowClass):
return
open_gui(WindowClass)
def close_gui(WindowClass):
""" Close a specific Window """
win = gui.window_stack.pop(WindowClass.__name__, None)
if not win:
return False
win.close()
del win
return True
def open_gui(WindowClass):
""" close already open windows and open window, """
# prevents flickering if we try opening a visible window.
# if WindowClass.__name__ in gui.window_stack:
# return
gui.close_all_windows()
main_thread.add(gui.open_window, WindowClass)
|
pierce403/EmpirePanel
|
refs/heads/master
|
lib/modules/situational_awareness/network/smbautobrute.py
|
1
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Invoke-SMBAutoBrute',
# list of one or more authors for the module
'Author': ['@curi0usJack'],
# more verbose multi-line description of the module
'Description': ('Runs an SMB brute against a list of usernames/passwords. '
'Will check the DCs to interrogate the bad password count of the '
'users and will keep bruting until either a valid credential is '
'discoverd or the bad password count reaches one below the threshold. '
'Run "shell net accounts" on a valid agent to determine the lockout '
'threshold. VERY noisy! Generates a ton of traffic on the DCs.' ),
# True if the module needs to run in the background
'Background' : True,
# File extension to save the file as
'OutputExtension' : None,
# True if the module needs admin rights to run
'NeedsAdmin' : False,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# The minimum PowerShell version needed for the module to run
'MinPSVersion' : '2',
# list of any references/other comments
'Comments': [
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to grab a screenshot from.',
'Required' : True,
'Value' : ''
},
'UserList' : {
'Description' : 'File of users to brute (on the target), one per line. If not specified, autobrute will query a list of users with badpwdcount < LockoutThreshold - 1 for each password brute. Wrap path in double quotes.',
'Required' : False,
'Value' : ''
},
'PasswordList' : {
'Description' : 'Comma separated list of passwords to test. Wrap in double quotes.',
'Required' : True,
'Value' : ''
},
'ShowVerbose' : {
'Description' : 'Show failed attempts & skipped accounts in addition to success.',
'Required' : False,
'Value' : ''
},
'LockoutThreshold' : {
'Description' : 'The max number of bad password attempts until the account locks. Autobrute will try till one less than this setting.',
'Required' : True,
'Value' : ''
},
'Delay' : {
'Description' : 'Amount of time to wait (in milliseconds) between attempts. Default 100.',
'Required' : False,
'Value' : ''
},
'StopOnSuccess' : {
'Description' : 'Quit running after the first successful authentication.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# use the pattern below
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/Invoke-SMBAutoBrute.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
scriptcmd = "Invoke-SMBAutoBrute"
# add any arguments to the end execution of the script
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
scriptcmd += " -" + str(option)
else:
scriptcmd += " -" + str(option) + " " + str(values['Value'])
script += scriptcmd
#print helpers.color(scriptcmd)
return script
|
pwmarcz/django
|
refs/heads/master
|
django/utils/html_parser.py
|
79
|
from django.utils.six.moves import html_parser as _html_parser
import re
import sys
current_version = sys.version_info
use_workaround = (
(current_version < (2, 7, 3)) or
(current_version >= (3, 0) and current_version < (3, 2, 3))
)
HTMLParseError = _html_parser.HTMLParseError
if not use_workaround:
if current_version >= (3, 4):
class HTMLParser(_html_parser.HTMLParser):
"""Explicitly set convert_charrefs to be False.
This silences a deprecation warning on Python 3.4, but we can't do
it at call time because Python 2.7 does not have the keyword
argument.
"""
def __init__(self, convert_charrefs=False, **kwargs):
_html_parser.HTMLParser.__init__(self, convert_charrefs=convert_charrefs, **kwargs)
else:
HTMLParser = _html_parser.HTMLParser
else:
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
class HTMLParser(_html_parser.HTMLParser):
"""
Patched version of stdlib's HTMLParser with patch from:
http://bugs.python.org/issue670664
"""
def __init__(self):
_html_parser.HTMLParser.__init__(self)
self.cdata_tag = None
def set_cdata_mode(self, tag):
try:
self.interesting = _html_parser.interesting_cdata
except AttributeError:
self.interesting = re.compile(r'</\s*%s\s*>' % tag.lower(), re.I)
self.cdata_tag = tag.lower()
def clear_cdata_mode(self):
self.interesting = _html_parser.interesting_normal
self.cdata_tag = None
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i + 1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
m = _html_parser.attrfind.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif (attrvalue[:1] == '\'' == attrvalue[-1:] or
attrvalue[:1] == '"' == attrvalue[-1:]):
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = (len(self.__starttag_text)
- self.__starttag_text.rfind("\n"))
else:
offset = offset + len(self.__starttag_text)
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag) # <--------------------------- Changed
return endpos
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i + 2] == "</", "unexpected call to parse_endtag"
match = _html_parser.endendtag.search(rawdata, i + 1) # >
if not match:
return -1
j = match.end()
match = _html_parser.endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_tag is not None: # *** add ***
self.handle_data(rawdata[i:j]) # *** add ***
return j # *** add ***
self.error("bad end tag: %r" % (rawdata[i:j],))
# --- changed start ---------------------------------------------------
tag = match.group(1).strip()
if self.cdata_tag is not None:
if tag.lower() != self.cdata_tag:
self.handle_data(rawdata[i:j])
return j
# --- changed end -----------------------------------------------------
self.handle_endtag(tag.lower())
self.clear_cdata_mode()
return j
|
xzYue/odoo
|
refs/heads/8.0
|
addons/document/content_index.py
|
430
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import os
import tempfile
from subprocess import Popen, PIPE
_logger = logging.getLogger(__name__)
class NhException(Exception):
pass
class indexer(object):
""" An indexer knows how to parse the content of some file.
Typically, one indexer should be instantiated per file
type.
Override this class to add more functionality. Note that
you should only override the Content or the File methods
that give an optimal result. """
def _getMimeTypes(self):
""" Return supported mimetypes """
return []
def _getExtensions(self):
return []
def _getDefMime(self, ext):
""" Return a mimetype for this document type, ideally the
closest to the extension ext. """
mts = self._getMimeTypes();
if len (mts):
return mts[0]
return None
def indexContent(self, content, filename=None, realfile=None):
""" Use either content or the real file, to index.
Some parsers will work better with the actual
content, others parse a file easier. Try the
optimal.
"""
res = ''
try:
if content != None:
return self._doIndexContent(content)
except NhException:
pass
if realfile != None:
try:
return self._doIndexFile(realfile)
except NhException:
pass
fp = open(realfile,'rb')
try:
content2 = fp.read()
finally:
fp.close()
# The not-handled exception may be raised here
return self._doIndexContent(content2)
# last try, with a tmp file
if content:
try:
fname,ext = filename and os.path.splitext(filename) or ('','')
fd, rfname = tempfile.mkstemp(suffix=ext)
os.write(fd, content)
os.close(fd)
res = self._doIndexFile(rfname)
os.unlink(rfname)
return res
except NhException:
pass
raise NhException('No appropriate method to index file.')
def _doIndexContent(self, content):
raise NhException("Content cannot be handled here.")
def _doIndexFile(self, fpath):
raise NhException("Content cannot be handled here.")
def __repr__(self):
return "<indexer %s.%s>" %(self.__module__, self.__class__.__name__)
def mime_match(mime, mdict):
if mdict.has_key(mime):
return (mime, mdict[mime])
if '/' in mime:
mpat = mime.split('/')[0]+'/*'
if mdict.has_key(mpat):
return (mime, mdict[mpat])
return (None, None)
class contentIndex(object):
def __init__(self):
self.mimes = {}
self.exts = {}
def register(self, obj):
f = False
for mime in obj._getMimeTypes():
self.mimes[mime] = obj
f = True
for ext in obj._getExtensions():
self.exts[ext] = obj
f = True
if f:
_logger.debug('Register content indexer: %r.', obj)
if not f:
raise Exception("Your indexer should at least support a mimetype or extension.")
def doIndex(self, content, filename=None, content_type=None, realfname=None, debug=False):
fobj = None
fname = None
mime = None
if content_type and self.mimes.has_key(content_type):
mime = content_type
fobj = self.mimes[content_type]
elif filename:
bname,ext = os.path.splitext(filename)
if self.exts.has_key(ext):
fobj = self.exts[ext]
mime = fobj._getDefMime(ext)
if content_type and not fobj:
mime,fobj = mime_match(content_type, self.mimes)
if not fobj:
try:
if realfname :
fname = realfname
else:
try:
bname,ext = os.path.splitext(filename or 'test.tmp')
except Exception:
bname, ext = filename, 'tmp'
fd, fname = tempfile.mkstemp(suffix=ext)
os.write(fd, content)
os.close(fd)
pop = Popen(['file','-b','--mime',fname], shell=False, stdout=PIPE)
(result, _) = pop.communicate()
mime2 = result.split(';')[0]
_logger.debug('File gives us: %s', mime2)
# Note that the temporary file still exists now.
mime,fobj = mime_match(mime2, self.mimes)
if not mime:
mime = mime2
except Exception:
_logger.exception('Cannot determine mime type.')
try:
if fobj:
res = (mime, fobj.indexContent(content,filename,fname or realfname) )
else:
_logger.debug("Have no object, return (%s, None).", mime)
res = (mime, '')
except Exception:
_logger.exception("Cannot index file %s (%s).",
filename, fname or realfname)
res = (mime, '')
# If we created a tmp file, unlink it now
if not realfname and fname:
try:
os.unlink(fname)
except Exception:
_logger.exception("Cannot unlink %s.", fname)
return res
cntIndex = contentIndex()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.