code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
# $File: edge.py
# $Date: Apr 1, 10:40, 2015.
__author__ = 'tobin'
import cv2
def get_canny_edge(image):
""" canny edge detection
:param image:
:return:
edge: edge graph
"""
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
edge = cv2.Canny(gray, 500, 1000, apertureSize=5)
return edge
| skyczhao/silver | AOTee/api/edge.py | Python | gpl-2.0 | 348 |
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import boto3
import unittest
from urlparse import urlparse
from moto import mock_s3
from app import create_app
from app.bitstore import BitStore
class BitStoreTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app()
def test_metadata_s3_key(self):
metadata = BitStore(publisher="pub_test", package="test_package")
expected = "{t}/pub_test/test_package/_v/latest/datapckage.json". \
format(t=metadata.prefix)
self.assertEqual(expected, metadata.build_s3_key('datapckage.json'))
def test_metadata_s3_prefix(self):
metadata = BitStore(publisher="pub_test", package="test_package")
expected = "{t}/pub_test/test_package".format(t=metadata.prefix)
self.assertEqual(expected, metadata.build_s3_base_prefix())
def test_extract_information_from_s3_url(self):
metadata = BitStore(publisher="pub_test", package="test_package")
s3_key = metadata.build_s3_key('datapckage.json')
pub, package, version = BitStore.extract_information_from_s3_url(s3_key)
self.assertEqual(pub, 'pub_test')
self.assertEqual(package, 'test_package')
self.assertEqual(version, 'latest')
@mock_s3
def test_save(self):
with self.app.app_context():
s3 = boto3.client('s3')
bucket_name = self.app.config['S3_BUCKET_NAME']
s3.create_bucket(Bucket=bucket_name)
metadata = BitStore(publisher="pub_test",
package="test_package",
body='hi')
key = metadata.build_s3_key('datapackage.json')
metadata.save_metadata()
obs_list = list(s3.list_objects(Bucket=bucket_name, Prefix=key). \
get('Contents'))
self.assertEqual(1, len(obs_list))
self.assertEqual(key, obs_list[0]['Key'])
res = s3.get_object_acl(Bucket=bucket_name, Key=key)
owner_id = res['Owner']['ID']
aws_all_user_group_url = 'http://acs.amazonaws.com/groups/global/AllUsers'
full_control = filter(lambda grant: grant['Permission'] == 'FULL_CONTROL', res['Grants'])
self.assertEqual(len(full_control), 1)
self.assertEqual(full_control[0].get('Grantee')['ID'], owner_id)
read_control = filter(lambda grant: grant['Permission'] == 'READ', res['Grants'])
self.assertEqual(len(read_control), 1)
self.assertEqual(read_control[0].get('Grantee')['URI'], aws_all_user_group_url)
@mock_s3
def test_get_metadata_body(self):
with self.app.app_context():
s3 = boto3.client('s3')
bucket_name = self.app.config['S3_BUCKET_NAME']
s3.create_bucket(Bucket=bucket_name)
metadata = BitStore(publisher="pub_test",
package="test_package",
body='hi')
s3.put_object(
Bucket=bucket_name,
Key=metadata.build_s3_key('datapackage.json'),
Body=metadata.body)
self.assertEqual(metadata.body, metadata.get_metadata_body())
@mock_s3
def test_get_all_metadata_name_for_publisher(self):
with self.app.app_context():
s3 = boto3.client('s3')
bucket_name = self.app.config['S3_BUCKET_NAME']
s3.create_bucket(Bucket=bucket_name)
metadata = BitStore(publisher="pub_test",
package="test_package",
body='hi')
s3.put_object(
Bucket=bucket_name,
Key=metadata.build_s3_key('datapackage.json'),
Body=metadata.body)
self.assertEqual(1, len(metadata.get_all_metadata_name_for_publisher()))
@mock_s3
def test_get_empty_metadata_name_for_publisher(self):
with self.app.app_context():
s3 = boto3.client('s3')
bucket_name = self.app.config['S3_BUCKET_NAME']
s3.create_bucket(Bucket=bucket_name)
metadata = BitStore(publisher="pub_test",
package="test_package",
body='hi')
s3.put_object(Bucket=bucket_name,
Key='test/key.json',
Body=metadata.body)
self.assertEqual(0, len(metadata.get_all_metadata_name_for_publisher()))
@mock_s3
def test_generate_pre_signed_put_obj_url(self):
with self.app.app_context():
s3 = boto3.client('s3')
bucket_name = self.app.config['S3_BUCKET_NAME']
s3.create_bucket(Bucket=bucket_name)
metadata = BitStore(publisher="pub_test",
package="test_package",
body='hi')
post = metadata.generate_pre_signed_post_object(123, 'datapackage.json')
parsed = urlparse(post['url'])
self.assertEqual(parsed.netloc,
's3-{region}.amazonaws.com'.
format(region=self.app.config['AWS_REGION']))
self.assertEqual('public-read', post['fields']['acl'])
self.assertEqual('text/plain', post['fields']['Content-Type'])
@mock_s3
def test_get_readme_object_key(self):
with self.app.app_context():
bit_store = BitStore('test_pub', 'test_package')
s3 = boto3.client('s3')
bucket_name = self.app.config['S3_BUCKET_NAME']
s3.create_bucket(Bucket=bucket_name)
read_me_key = bit_store.build_s3_key('readme.md')
s3.put_object(Bucket=bucket_name, Key=read_me_key, Body='')
self.assertEqual(bit_store.get_readme_object_key(), read_me_key)
@mock_s3
def test_return_none_if_no_readme_found(self):
with self.app.app_context():
bit_store = BitStore('test_pub', 'test_package')
s3 = boto3.client('s3')
bucket_name = self.app.config['S3_BUCKET_NAME']
s3.create_bucket(Bucket=bucket_name)
read_me_key = bit_store.build_s3_key('test.md')
s3.put_object(Bucket=bucket_name, Key=read_me_key, Body='')
self.assertEqual(bit_store.get_readme_object_key(), 'None')
@mock_s3
def test_return_none_if_object_found(self):
with self.app.app_context():
bit_store = BitStore('test_pub', 'test_package')
s3 = boto3.client('s3')
bucket_name = self.app.config['S3_BUCKET_NAME']
s3.create_bucket(Bucket=bucket_name)
read_me_key = bit_store.build_s3_key('test.md')
s3.put_object(Bucket=bucket_name, Key=read_me_key, Body='')
self.assertEqual(bit_store.get_s3_object(read_me_key + "testing"), None)
self.assertEqual(bit_store.get_s3_object('None'), None)
@mock_s3
def test_change_acl(self):
with self.app.app_context():
public_grants = {
'CanonicalUser': 'FULL_CONTROL',
'Group': 'READ'
}
private_grants = {'CanonicalUser': 'FULL_CONTROL'}
bit_store = BitStore('test_pub', 'test_package', body='test')
s3 = boto3.client('s3')
bucket_name = self.app.config['S3_BUCKET_NAME']
s3.create_bucket(Bucket=bucket_name)
metadata_key = bit_store.build_s3_key('datapackage.json')
bit_store.save_metadata()
res = s3.get_object_acl(Bucket=bucket_name, Key=metadata_key)
owner_id = res['Owner']['ID']
aws_all_user_group_url = 'http://acs.amazonaws.com/groups/global/AllUsers'
full_control = filter(lambda grant: grant['Permission'] == 'FULL_CONTROL', res['Grants'])
self.assertEqual(len(full_control), 1)
self.assertEqual(full_control[0].get('Grantee')['ID'], owner_id)
read_control = filter(lambda grant: grant['Permission'] == 'READ', res['Grants'])
self.assertEqual(len(read_control), 1)
self.assertEqual(read_control[0].get('Grantee')['URI'], aws_all_user_group_url)
bit_store.change_acl("private")
res = s3.get_object_acl(Bucket=bucket_name, Key=metadata_key)
full_control = filter(lambda grant: grant['Permission'] == 'FULL_CONTROL', res['Grants'])
self.assertEqual(len(full_control), 1)
self.assertEqual(full_control[0].get('Grantee')['ID'], owner_id)
read_control = filter(lambda grant: grant['Permission'] == 'READ', res['Grants'])
self.assertEqual(len(read_control), 0)
@mock_s3
def test_delete_data_package(self):
with self.app.app_context():
bit_store = BitStore('test_pub', 'test_package')
s3 = boto3.client('s3')
bucket_name = self.app.config['S3_BUCKET_NAME']
s3.create_bucket(Bucket=bucket_name)
read_me_key = bit_store.build_s3_key('test.md')
data_key = bit_store.build_s3_key('data.csv')
metadata_key = bit_store.build_s3_key('datapackage.json')
s3.put_object(Bucket=bucket_name, Key=read_me_key, Body='readme')
s3.put_object(Bucket=bucket_name, Key=data_key, Body='data')
s3.put_object(Bucket=bucket_name, Key=metadata_key, Body='metedata')
status = bit_store.delete_data_package()
read_me_res = s3.list_objects(Bucket=bucket_name, Prefix=read_me_key)
self.assertTrue('Contents' not in read_me_res)
data_res = s3.list_objects(Bucket=bucket_name, Prefix=data_key)
self.assertTrue('Contents' not in data_res)
self.assertTrue(status)
@mock_s3
def test_should_return_true_delete_data_package_if_data_not_exists(self):
with self.app.app_context():
bit_store = BitStore('test_pub', 'test_package')
s3 = boto3.client('s3')
bucket_name = self.app.config['S3_BUCKET_NAME']
s3.create_bucket(Bucket=bucket_name)
read_me_key = bit_store.build_s3_key('test.md')
data_key = bit_store.build_s3_key('data.csv')
metadata_key = bit_store.build_s3_key('datapackage.json')
status = bit_store.delete_data_package()
read_me_res = s3.list_objects(Bucket=bucket_name, Prefix=read_me_key)
self.assertTrue('Contents' not in read_me_res)
data_res = s3.list_objects(Bucket=bucket_name, Prefix=data_key)
self.assertTrue('Contents' not in data_res)
metadata_res = s3.list_objects(Bucket=bucket_name, Prefix=metadata_key)
self.assertTrue('Contents' not in metadata_res)
self.assertTrue(status)
@mock_s3
def test_should_copy_all_object_from_latest_to_tag(self):
numeric_version = 0.8
with self.app.app_context():
bit_store = BitStore('test_pub', 'test_package')
s3 = boto3.client('s3')
bucket_name = self.app.config['S3_BUCKET_NAME']
s3.create_bucket(Bucket=bucket_name)
read_me_key = bit_store.build_s3_key('test.md')
data_key = bit_store.build_s3_key('data.csv')
metadata_key = bit_store.build_s3_key('datapackage.json')
s3.put_object(Bucket=bucket_name, Key=read_me_key, Body='readme')
s3.put_object(Bucket=bucket_name, Key=data_key, Body='data')
s3.put_object(Bucket=bucket_name, Key=metadata_key, Body='metedata')
bit_store.copy_to_new_version(numeric_version)
bit_store_numeric = BitStore('test_pub', 'test_package',
numeric_version)
objects_nu = s3.list_objects(Bucket=bucket_name,
Prefix=bit_store_numeric
.build_s3_versioned_prefix())
objects_old = s3.list_objects(Bucket=bucket_name,
Prefix=bit_store
.build_s3_versioned_prefix())
self.assertEqual(len(objects_nu['Contents']),
len(objects_old['Contents']))
| frictionlessdata/dpr-api | tests/bitstore/test_bitstore.py | Python | mit | 12,449 |
"""colcat_crowdsourcing tasks URL Configuration
"""
from django.conf.urls import patterns, url
from tasks import views
from tasks.imports import *
urlpatterns = [
url(r'^mturk-template/', views.mturk_template, name='mturk template'),
url(r'^disclaimer/', views.disclaimer, name='disclaimer'),
url(r'^prescreen/', views.prescreen, name='prescreen'),
url(r'^practice-foci', views.practice_foci, {}),
url(r'^(?P<language_id>\d+)/(?P<task_type_id>\d+)/(?P<img_id>\d+)$', views.get_task), # URL match for all tasks
]
| csdevsc/colcat_crowdsourcing_application | tasks/urls.py | Python | mit | 534 |
# Copyright (c) "Neo4j"
# Neo4j Sweden AB [http://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains the core driver exceptions.
Driver API Errors
=================
+ Neo4jError
+ ClientError
+ CypherSyntaxError
+ CypherTypeError
+ ConstraintError
+ AuthError
+ TokenExpired
+ Forbidden
+ DatabaseError
+ TransientError
+ DatabaseUnavailable
+ NotALeader
+ ForbiddenOnReadOnlyDatabase
+ DriverError
+ TransactionError
+ TransactionNestingError
+ ResultError
+ ResultConsumedError
+ ResultNotSingleError
+ SessionExpired
+ ServiceUnavailable
+ RoutingServiceUnavailable
+ WriteServiceUnavailable
+ ReadServiceUnavailable
+ IncompleteCommit
+ ConfigurationError
+ AuthConfigurationError
+ CertificateConfigurationError
Connector API Errors
====================
+ BoltError
+ BoltHandshakeError
+ BoltRoutingError
+ BoltConnectionError
+ BoltSecurityError
+ BoltConnectionBroken
+ BoltConnectionClosed
+ BoltFailure
+ BoltProtocolError
+ Bolt*
"""
CLASSIFICATION_CLIENT = "ClientError"
CLASSIFICATION_TRANSIENT = "TransientError"
CLASSIFICATION_DATABASE = "DatabaseError"
class Neo4jError(Exception):
""" Raised when the Cypher engine returns an error to the client.
"""
message = None
code = None
classification = None
category = None
title = None
metadata = None
@classmethod
def hydrate(cls, message=None, code=None, **metadata):
message = message or "An unknown error occurred"
code = code or "Neo.DatabaseError.General.UnknownError"
try:
_, classification, category, title = code.split(".")
if code == "Neo.ClientError.Security.AuthorizationExpired":
classification = CLASSIFICATION_TRANSIENT
except ValueError:
classification = CLASSIFICATION_DATABASE
category = "General"
title = "UnknownError"
error_class = cls._extract_error_class(classification, code)
inst = error_class(message)
inst.message = message
inst.code = code
inst.classification = classification
inst.category = category
inst.title = title
inst.metadata = metadata
return inst
@classmethod
def _extract_error_class(cls, classification, code):
if classification == CLASSIFICATION_CLIENT:
try:
return client_errors[code]
except KeyError:
return ClientError
elif classification == CLASSIFICATION_TRANSIENT:
try:
return transient_errors[code]
except KeyError:
return TransientError
elif classification == CLASSIFICATION_DATABASE:
return DatabaseError
else:
return cls
def invalidates_all_connections(self):
return self.code == "Neo.ClientError.Security.AuthorizationExpired"
def is_fatal_during_discovery(self):
# checks if the code is an error that is caused by the client. In this
# case the driver should fail fast during discovery.
if not isinstance(self.code, str):
return False
if self.code in ("Neo.ClientError.Database.DatabaseNotFound",
"Neo.ClientError.Transaction.InvalidBookmark",
"Neo.ClientError.Transaction.InvalidBookmarkMixture"):
return True
if (self.code.startswith("Neo.ClientError.Security.")
and self.code != "Neo.ClientError.Security."
"AuthorizationExpired"):
return True
return False
def __str__(self):
return "{{code: {code}}} {{message: {message}}}".format(code=self.code, message=self.message)
class ClientError(Neo4jError):
""" The Client sent a bad request - changing the request might yield a successful outcome.
"""
class DatabaseError(Neo4jError):
""" The database failed to service the request.
"""
class TransientError(Neo4jError):
""" The database cannot service the request right now, retrying later might yield a successful outcome.
"""
def is_retriable(self):
"""These are really client errors but classification on the server is not entirely correct and they are classified as transient.
:return: True if it is a retriable TransientError, otherwise False.
:rtype: bool
"""
return not (self.code in (
"Neo.TransientError.Transaction.Terminated",
"Neo.TransientError.Transaction.LockClientStopped",
))
class DatabaseUnavailable(TransientError):
"""
"""
class ConstraintError(ClientError):
"""
"""
class CypherSyntaxError(ClientError):
"""
"""
class CypherTypeError(ClientError):
"""
"""
class NotALeader(TransientError):
"""
"""
class Forbidden(ClientError):
"""
"""
class ForbiddenOnReadOnlyDatabase(TransientError):
"""
"""
class AuthError(ClientError):
""" Raised when authentication failure occurs.
"""
class TokenExpired(AuthError):
""" Raised when the authentication token has expired.
A new driver instance with a fresh authentication token needs to be created.
"""
client_errors = {
# ConstraintError
"Neo.ClientError.Schema.ConstraintValidationFailed": ConstraintError,
"Neo.ClientError.Schema.ConstraintViolation": ConstraintError,
"Neo.ClientError.Statement.ConstraintVerificationFailed": ConstraintError,
"Neo.ClientError.Statement.ConstraintViolation": ConstraintError,
# CypherSyntaxError
"Neo.ClientError.Statement.InvalidSyntax": CypherSyntaxError,
"Neo.ClientError.Statement.SyntaxError": CypherSyntaxError,
# CypherTypeError
"Neo.ClientError.Procedure.TypeError": CypherTypeError,
"Neo.ClientError.Statement.InvalidType": CypherTypeError,
"Neo.ClientError.Statement.TypeError": CypherTypeError,
# Forbidden
"Neo.ClientError.General.ForbiddenOnReadOnlyDatabase": ForbiddenOnReadOnlyDatabase,
"Neo.ClientError.General.ReadOnly": Forbidden,
"Neo.ClientError.Schema.ForbiddenOnConstraintIndex": Forbidden,
"Neo.ClientError.Schema.IndexBelongsToConstraint": Forbidden,
"Neo.ClientError.Security.Forbidden": Forbidden,
"Neo.ClientError.Transaction.ForbiddenDueToTransactionType": Forbidden,
# AuthError
"Neo.ClientError.Security.AuthorizationFailed": AuthError,
"Neo.ClientError.Security.Unauthorized": AuthError,
# TokenExpired
"Neo.ClientError.Security.TokenExpired": TokenExpired,
# NotALeader
"Neo.ClientError.Cluster.NotALeader": NotALeader,
}
transient_errors = {
# DatabaseUnavailableError
"Neo.TransientError.General.DatabaseUnavailable": DatabaseUnavailable
}
class DriverError(Exception):
""" Raised when the Driver raises an error.
"""
class SessionExpired(DriverError):
""" Raised when a session is no longer able to fulfil
the purpose described by its original parameters.
"""
def __init__(self, session, *args, **kwargs):
super(SessionExpired, self).__init__(session, *args, **kwargs)
class TransactionError(DriverError):
""" Raised when an error occurs while using a transaction.
"""
def __init__(self, transaction, *args, **kwargs):
super(TransactionError, self).__init__(*args, **kwargs)
self.transaction = transaction
class TransactionNestingError(DriverError):
""" Raised when transactions are nested incorrectly.
"""
def __init__(self, transaction, *args, **kwargs):
super(TransactionError, self).__init__(*args, **kwargs)
self.transaction = transaction
class ResultError(DriverError):
"""Raised when an error occurs while using a result object."""
def __init__(self, result, *args, **kwargs):
super(ResultError, self).__init__(*args, **kwargs)
self.result = result
class ResultConsumedError(ResultError):
"""Raised when trying to access records of a consumed result."""
class ResultNotSingleError(ResultError):
"""Raised when a result should have exactly one record but does not."""
class ServiceUnavailable(DriverError):
""" Raised when no database service is available.
"""
class RoutingServiceUnavailable(ServiceUnavailable):
""" Raised when no routing service is available.
"""
class WriteServiceUnavailable(ServiceUnavailable):
""" Raised when no write service is available.
"""
class ReadServiceUnavailable(ServiceUnavailable):
""" Raised when no read service is available.
"""
class IncompleteCommit(ServiceUnavailable):
""" Raised when the client looses connection while committing a transaction
Raised when a disconnection occurs while still waiting for a commit
response. For non-idempotent write transactions, this leaves the data
in an unknown state with regard to whether the transaction completed
successfully or not.
"""
class ConfigurationError(DriverError):
""" Raised when there is an error concerning a configuration.
"""
class AuthConfigurationError(ConfigurationError):
""" Raised when there is an error with the authentication configuration.
"""
class CertificateConfigurationError(ConfigurationError):
""" Raised when there is an error with the authentication configuration.
"""
class UnsupportedServerProduct(Exception):
""" Raised when an unsupported server product is detected.
"""
| neo4j/neo4j-python-driver | neo4j/exceptions.py | Python | apache-2.0 | 10,124 |
#!/usr/bin/env ./batchprofiler.sh
"""sql_jobs.py - launch and monitor remote jobs that submit SQL
This file is a command-line script for running jobs and a set of functions
for running the script on the cluster. To get help on running from the
command-line:
sql_jobs.py --help
CellProfiler is distributed under the GNU General Public License.
See the accompanying file LICENSE for details.
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2015 Broad Institute
All rights reserved.
Please see the AUTHORS file for credits.
Website: http://www.cellprofiler.org
"""
import re
import os
import subprocess
import sys
import RunBatch
def run_sql_file(batch_id, sql_filename):
"""Use the mysql command line to run the given SQL script
batch_id - sql_file is associated with this batch
sql_path - path and file of the sql script
queue - run job on this queue
email - who to email when done
returns the RunBatch.BPJob
"""
cwd = os.path.dirname(__file__)
batch = RunBatch.BPBatch()
batch.select(batch_id)
run = RunBatch.BPSQLRun.select_by_sql_filename(batch, sql_filename)
if run is None:
sql_path = os.path.join(RunBatch.batch_script_directory(batch),
sql_filename)
cmd = "%s -b %d -i %s"%(os.path.join(cwd, "sql_jobs.py"),
batch_id, sql_path)
run = RunBatch.BPSQLRun.create(batch, sql_filename, cmd)
return RunBatch.run_one(batch, run, cwd=cwd)
def sql_file_job_and_status(batch_id, sql_file):
"""Return the latest job ID associated with the batch and sql path
batch_id - batch id associated with the submission
sql_path - path to the sql script submitted
returns latest job or None if not submitted
"""
batch = RunBatch.BPBatch()
batch.select(batch_id)
run = RunBatch.BPSQLRun.select_by_sql_filename(batch, sql_file)
if run is None:
return None, None, None
result = run.select_jobs()
if len(result) == 0:
return None, None, None
return run, result[0][0], result[0][1]
if __name__ == "__main__":
import optparse
sys.path.append(os.path.dirname(__file__))
from bpformdata import \
BATCHPROFILER_MYSQL_DATABASE, BATCHPROFILER_MYSQL_HOST, \
BATCHPROFILER_MYSQL_PASSWORD, BATCHPROFILER_MYSQL_PORT, \
BATCHPROFILER_MYSQL_USER
parser = optparse.OptionParser()
parser.add_option("-i","--input-sql-script",
dest="sql_script",
help="The SQL script to run on the server")
parser.add_option("-b","--batch-id",
dest="batch_id",
help="The batch ID of the batch being run")
options,args = parser.parse_args()
path, filename = os.path.split(options.sql_script)
if len(path):
os.chdir(path)
script_fd = open(filename,"r")
cmd = ["mysql",
"-A",
"-B",
"--column-names=0",
"--local-infile=1"]
if BATCHPROFILER_MYSQL_USER is not None:
cmd += ["-u", BATCHPROFILER_MYSQL_USER]
if BATCHPROFILER_MYSQL_PASSWORD is not None:
cmd += ["--password=" + BATCHPROFILER_MYSQL_PASSWORD]
if BATCHPROFILER_MYSQL_HOST is not None:
cmd += ["-h", BATCHPROFILER_MYSQL_HOST]
if BATCHPROFILER_MYSQL_DATABASE is not None:
cmd += ["-D", BATCHPROFILER_MYSQL_DATABASE]
if BATCHPROFILER_MYSQL_PORT is not None:
cmd += ["-P", BATCHPROFILER_MYSQL_PORT]
print "Executing command %s." % (" ".join(cmd))
p = subprocess.Popen(cmd,
stdin=script_fd,
stdout=sys.stdout,
stderr=sys.stderr,
cwd = path if len(path) == 0 else None)
p.communicate()
script_fd.close()
| LeeKamentsky/CellProfiler | BatchProfiler/sql_jobs.py | Python | gpl-2.0 | 3,871 |
# Copyright (C) 2009, 2010, 2011 Rickard Lindberg, Roger Lindberg
#
# This file is part of Timeline.
#
# Timeline is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Timeline is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Timeline. If not, see <http://www.gnu.org/licenses/>.
import codecs
import os
import os.path
from timelinelib.db.exceptions import TimelineIOError
def safe_write(path, encoding, write_fn):
"""
Write to path in such a way that the contents of path is only modified
correctly or not modified at all.
In some extremely rare cases the contents of path might be incorrect, but
in those cases the correct content is always present in another file.
"""
def raise_error(specific_msg, cause_exception):
err_general = _("Unable to save timeline data to '%s'. File left unmodified.") % path
err_template = "%s\n\n%%s\n\n%%s" % err_general
raise TimelineIOError(err_template % (specific_msg, cause_exception))
tmp_path = create_non_exising_path(path, "tmp")
backup_path = create_non_exising_path(path, "bak")
# Write data to tmp file
try:
if encoding is None:
file = open(tmp_path, "wb")
else:
file = codecs.open(tmp_path, "w", encoding)
try:
try:
write_fn(file)
except Exception, e:
raise_error(_("Unable to write timeline data."), e)
finally:
file.close()
except IOError, e:
raise_error(_("Unable to write to temporary file '%s'.") % tmp_path, e)
# Copy original to backup (if original exists)
if os.path.exists(path):
try:
os.rename(path, backup_path)
except Exception, e: # Can this only be a OSError?
raise_error(_("Unable to take backup to '%s'.") % backup_path, e)
# Copy tmp to original
try:
os.rename(tmp_path, path)
except Exception, e: # Can this only be a OSError?
raise_error(_("Unable to rename temporary file '%s' to original.") % tmp_path, e)
# Delete backup (if backup was created)
if os.path.exists(backup_path):
try:
os.remove(backup_path)
except Exception, e: # Can this only be a OSError?
raise_error(_("Unable to delete backup file '%s'.") % backup_path, e)
def create_non_exising_path(base, suffix):
i = 1
while True:
new_path = "%s.%s%i" % (base, suffix, i)
if os.path.exists(new_path):
i += 1
else:
return new_path
def safe_locking(controller, edit_function, exception_handler=None):
if controller.ok_to_edit():
try:
edit_function()
except Exception, e:
if exception_handler is not None:
controller.edit_ends()
exception_handler(e)
else:
raise
finally:
controller.edit_ends()
| linostar/timeline-clone | source/timelinelib/db/utils.py | Python | gpl-3.0 | 3,411 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
"""Simple XML manipulation"""
from __future__ import unicode_literals
import sys
if sys.version > '3':
basestring = str
unicode = str
import logging
import re
import time
import xml.dom.minidom
from . import __author__, __copyright__, __license__, __version__
# Utility functions used for marshalling, moved aside for readability
from .helpers import TYPE_MAP, TYPE_MARSHAL_FN, TYPE_UNMARSHAL_FN, \
REVERSE_TYPE_MAP, Struct, Date, Decimal
log = logging.getLogger(__name__)
class SimpleXMLElement(object):
"""Simple XML manipulation (simil PHP)"""
def __init__(self, text=None, elements=None, document=None,
namespace=None, prefix=None, namespaces_map={}, jetty=False):
"""
:param namespaces_map: How to map our namespace prefix to that given by the client;
{prefix: received_prefix}
"""
self.__namespaces_map = namespaces_map
_rx = "|".join(namespaces_map.keys()) # {'external': 'ext', 'model': 'mod'} -> 'external|model'
self.__ns_rx = re.compile(r"^(%s):.*$" % _rx) # And now we build an expression ^(external|model):.*$
# to find prefixes in all xml nodes i.e.: <model:code>1</model:code>
# and later change that to <mod:code>1</mod:code>
self.__ns = namespace
self.__prefix = prefix
self.__jetty = jetty # special list support
if text is not None:
try:
self.__document = xml.dom.minidom.parseString(text)
except:
log.error(text)
raise
self.__elements = [self.__document.documentElement]
else:
self.__elements = elements
self.__document = document
def add_child(self, name, text=None, ns=True):
"""Adding a child tag to a node"""
if not ns or self.__ns is False:
##log.debug('adding %s without namespace', name)
element = self.__document.createElement(name)
else:
##log.debug('adding %s ns "%s" %s', name, self.__ns, ns)
if isinstance(ns, basestring):
element = self.__document.createElement(name)
if ns:
element.setAttribute("xmlns", ns)
elif self.__prefix:
element = self.__document.createElementNS(self.__ns, "%s:%s" % (self.__prefix, name))
else:
element = self.__document.createElementNS(self.__ns, name)
# don't append null tags!
if text is not None:
if isinstance(text, xml.dom.minidom.CDATASection):
element.appendChild(self.__document.createCDATASection(text.data))
else:
element.appendChild(self.__document.createTextNode(text))
self._element.appendChild(element)
return SimpleXMLElement(
elements=[element],
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
jetty=self.__jetty,
namespaces_map=self.__namespaces_map
)
def __setattr__(self, tag, text):
"""Add text child tag node (short form)"""
if tag.startswith("_"):
object.__setattr__(self, tag, text)
else:
##log.debug('__setattr__(%s, %s)', tag, text)
self.add_child(tag, text)
def __delattr__(self, tag):
"""Remove a child tag (non recursive!)"""
elements = [__element for __element in self._element.childNodes
if __element.nodeType == __element.ELEMENT_NODE]
for element in elements:
self._element.removeChild(element)
def add_comment(self, data):
"""Add an xml comment to this child"""
comment = self.__document.createComment(data)
self._element.appendChild(comment)
def as_xml(self, filename=None, pretty=False):
"""Return the XML representation of the document"""
if not pretty:
return self.__document.toxml('UTF-8')
else:
return self.__document.toprettyxml(encoding='UTF-8')
if sys.version > '3':
def __repr__(self):
"""Return the XML representation of this tag"""
return self._element.toxml()
else:
def __repr__(self):
"""Return the XML representation of this tag"""
# NOTE: do not use self.as_xml('UTF-8') as it returns the whole xml doc
return self._element.toxml('UTF-8')
def get_name(self):
"""Return the tag name of this node"""
return self._element.tagName
def get_local_name(self):
"""Return the tag local name (prefix:name) of this node"""
return self._element.localName
def get_prefix(self):
"""Return the namespace prefix of this node"""
return self._element.prefix
def get_namespace_uri(self, ns):
"""Return the namespace uri for a prefix"""
element = self._element
while element is not None and element.attributes is not None:
try:
return element.attributes['xmlns:%s' % ns].value
except KeyError:
element = element.parentNode
def attributes(self):
"""Return a dict of attributes for this tag"""
#TODO: use slice syntax [:]?
return self._element.attributes
def __getitem__(self, item):
"""Return xml tag attribute value or a slice of attributes (iter)"""
##log.debug('__getitem__(%s)', item)
if isinstance(item, basestring):
if self._element.hasAttribute(item):
return self._element.attributes[item].value
elif isinstance(item, slice):
# return a list with name:values
return list(self._element.attributes.items())[item]
else:
# return element by index (position)
element = self.__elements[item]
return SimpleXMLElement(
elements=[element],
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
jetty=self.__jetty,
namespaces_map=self.__namespaces_map
)
def add_attribute(self, name, value):
"""Set an attribute value from a string"""
self._element.setAttribute(name, value)
def __setitem__(self, item, value):
"""Set an attribute value"""
if isinstance(item, basestring):
self.add_attribute(item, value)
elif isinstance(item, slice):
# set multiple attributes at once
for k, v in value.items():
self.add_attribute(k, v)
def __delitem__(self, item):
"Remove an attribute"
self._element.removeAttribute(item)
def __call__(self, tag=None, ns=None, children=False, root=False,
error=True, ):
"""Search (even in child nodes) and return a child tag by name"""
try:
if root:
# return entire document
return SimpleXMLElement(
elements=[self.__document.documentElement],
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
jetty=self.__jetty,
namespaces_map=self.__namespaces_map
)
if tag is None:
# if no name given, iterate over siblings (same level)
return self.__iter__()
if children:
# future: filter children? by ns?
return self.children()
elements = None
if isinstance(tag, int):
# return tag by index
elements = [self.__elements[tag]]
if ns and not elements:
for ns_uri in isinstance(ns, (tuple, list)) and ns or (ns, ):
##log.debug('searching %s by ns=%s', tag, ns_uri)
elements = self._element.getElementsByTagNameNS(ns_uri, tag)
if elements:
break
if self.__ns and not elements:
##log.debug('searching %s by ns=%s', tag, self.__ns)
elements = self._element.getElementsByTagNameNS(self.__ns, tag)
if not elements:
##log.debug('searching %s', tag)
elements = self._element.getElementsByTagName(tag)
if not elements:
##log.debug(self._element.toxml())
if error:
raise AttributeError("No elements found")
else:
return
return SimpleXMLElement(
elements=elements,
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
jetty=self.__jetty,
namespaces_map=self.__namespaces_map)
except AttributeError as e:
raise AttributeError("Tag not found: %s (%s)" % (tag, e))
def __getattr__(self, tag):
"""Shortcut for __call__"""
return self.__call__(tag)
def __iter__(self):
"""Iterate over xml tags at this level"""
try:
for __element in self.__elements:
yield SimpleXMLElement(
elements=[__element],
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
jetty=self.__jetty,
namespaces_map=self.__namespaces_map)
except:
raise
def __dir__(self):
"""List xml children tags names"""
return [node.tagName for node
in self._element.childNodes
if node.nodeType != node.TEXT_NODE]
def children(self):
"""Return xml children tags element"""
elements = [__element for __element in self._element.childNodes
if __element.nodeType == __element.ELEMENT_NODE]
if not elements:
return None
#raise IndexError("Tag %s has no children" % self._element.tagName)
return SimpleXMLElement(
elements=elements,
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
jetty=self.__jetty,
namespaces_map=self.__namespaces_map
)
def __len__(self):
"""Return element count"""
return len(self.__elements)
def __contains__(self, item):
"""Search for a tag name in this element or child nodes"""
return self._element.getElementsByTagName(item)
def __unicode__(self):
"""Returns the unicode text nodes of the current element"""
rc = ''
for node in self._element.childNodes:
if node.nodeType == node.TEXT_NODE or node.nodeType == node.CDATA_SECTION_NODE:
rc = rc + node.data
return rc
if sys.version > '3':
__str__ = __unicode__
else:
def __str__(self):
return self.__unicode__().encode('utf-8')
def __int__(self):
"""Returns the integer value of the current element"""
return int(self.__str__())
def __float__(self):
"""Returns the float value of the current element"""
try:
return float(self.__str__())
except:
raise IndexError(self._element.toxml())
_element = property(lambda self: self.__elements[0])
def unmarshall(self, types, strict=True):
#import pdb; pdb.set_trace()
"""Convert to python values the current serialized xml element"""
# types is a dict of {tag name: convertion function}
# strict=False to use default type conversion if not specified
# example: types={'p': {'a': int,'b': int}, 'c': [{'d':str}]}
# expected xml: <p><a>1</a><b>2</b></p><c><d>hola</d><d>chau</d>
# returnde value: {'p': {'a':1,'b':2}, `'c':[{'d':'hola'},{'d':'chau'}]}
d = {}
for node in self():
name = str(node.get_local_name())
ref_name_type = None
# handle multirefs: href="#id0"
if 'href' in node.attributes().keys():
href = node['href'][1:]
for ref_node in self(root=True)("multiRef"):
if ref_node['id'] == href:
node = ref_node
ref_name_type = ref_node['xsi:type'].split(":")[1]
break
try:
if isinstance(types, dict):
fn = types[name]
# custom array only in the response (not defined in the WSDL):
# <results soapenc:arrayType="xsd:string[199]>
if any([k for k,v in node[:] if 'arrayType' in k]) and not isinstance(fn, list):
fn = [fn]
else:
fn = types
except (KeyError, ) as e:
xmlns = node['xmlns'] or node.get_namespace_uri(node.get_prefix())
if 'xsi:type' in node.attributes().keys():
xsd_type = node['xsi:type'].split(":")[1]
try:
# get fn type from SOAP-ENC:arrayType="xsd:string[28]"
if xsd_type == 'Array':
array_type = [k for k,v in node[:] if 'arrayType' in k][0]
xsd_type = node[array_type].split(":")[1]
if "[" in xsd_type:
xsd_type = xsd_type[:xsd_type.index("[")]
fn = [REVERSE_TYPE_MAP[xsd_type]]
else:
fn = REVERSE_TYPE_MAP[xsd_type]
except:
fn = None # ignore multirefs!
elif xmlns == "http://www.w3.org/2001/XMLSchema":
# self-defined schema, return the SimpleXMLElement
# TODO: parse to python types if <s:element ref="s:schema"/>
fn = None
elif None in types:
# <s:any/>, return the SimpleXMLElement
# TODO: check position of None if inside <s:sequence>
fn = None
elif strict:
raise TypeError("Tag: %s invalid (type not found)" % (name,))
else:
# if not strict, use default type conversion
fn = str
if isinstance(fn, list):
# append to existing list (if any) - unnested dict arrays -
value = d.setdefault(name, [])
children = node.children()
# TODO: check if this was really needed (get first child only)
##if len(fn[0]) == 1 and children:
## children = children()
if fn and not isinstance(fn[0], dict):
# simple arrays []
for child in (children or []):
tmp_dict = child.unmarshall(fn[0], strict)
value.extend(tmp_dict.values())
elif (self.__jetty and len(fn[0]) > 1):
# Jetty array style support [{k, v}]
for parent in node:
tmp_dict = {} # unmarshall each value & mix
for child in (node.children() or []):
tmp_dict.update(child.unmarshall(fn[0], strict))
value.append(tmp_dict)
else: # .Net / Java
for child in (children or []):
value.append(child.unmarshall(fn[0], strict))
elif isinstance(fn, tuple):
value = []
_d = {}
children = node.children()
as_dict = len(fn) == 1 and isinstance(fn[0], dict)
for child in (children and children() or []): # Readability counts
if as_dict:
_d.update(child.unmarshall(fn[0], strict)) # Merging pairs
else:
value.append(child.unmarshall(fn[0], strict))
if as_dict:
value.append(_d)
if name in d:
_tmp = list(d[name])
_tmp.extend(value)
value = tuple(_tmp)
else:
value = tuple(value)
elif isinstance(fn, dict):
##if ref_name_type is not None:
## fn = fn[ref_name_type]
children = node.children()
value = children and children.unmarshall(fn, strict)
else:
if fn is None: # xsd:anyType not unmarshalled
value = node
elif unicode(node) or (fn == str and unicode(node) != ''):
try:
# get special deserialization function (if any)
fn = TYPE_UNMARSHAL_FN.get(fn, fn)
if fn == str:
# always return an unicode object:
# (avoid encoding errors in py<3!)
value = unicode(node)
else:
value = fn(unicode(node))
except (ValueError, TypeError) as e:
raise ValueError("Tag: %s: %s" % (name, e))
else:
value = None
d[name] = value
return d
def _update_ns(self, name):
"""Replace the defined namespace alias with tohse used by the client."""
pref = self.__ns_rx.search(name)
if pref:
pref = pref.groups()[0]
try:
name = name.replace(pref, self.__namespaces_map[pref])
except KeyError:
log.warning('Unknown namespace alias %s' % name)
return name
def marshall(self, name, value, add_child=True, add_comments=False,
ns=False, add_children_ns=True):
"""Analyze python value and add the serialized XML element using tag name"""
# Change node name to that used by a client
name = self._update_ns(name)
if isinstance(value, dict): # serialize dict (<key>value</key>)
# for the first parent node, use the document target namespace
# (ns==True) or use the namespace string uri if passed (elements)
child = add_child and self.add_child(name, ns=ns) or self
for k, v in value.items():
if not add_children_ns:
ns = False
elif hasattr(value, 'namespaces'):
# for children, use the wsdl element target namespace:
ns = value.namespaces.get(k)
else:
# simple type
ns = None
child.marshall(k, v, add_comments=add_comments, ns=ns)
elif isinstance(value, tuple): # serialize tuple (<key>value</key>)
child = add_child and self.add_child(name, ns=ns) or self
if not add_children_ns:
ns = False
for k, v in value:
getattr(self, name).marshall(k, v, add_comments=add_comments, ns=ns)
elif isinstance(value, list): # serialize lists
child = self.add_child(name, ns=ns)
if not add_children_ns:
ns = False
if add_comments:
child.add_comment("Repetitive array of:")
for t in value:
child.marshall(name, t, False, add_comments=add_comments, ns=ns)
elif isinstance(value, (xml.dom.minidom.CDATASection, basestring)): # do not convert strings or unicodes
self.add_child(name, value, ns=ns)
elif value is None: # sent a empty tag?
self.add_child(name, ns=ns)
elif value in TYPE_MAP.keys():
# add commented placeholders for simple tipes (for examples/help only)
child = self.add_child(name, ns=ns)
child.add_comment(TYPE_MAP[value])
else: # the rest of object types are converted to string
# get special serialization function (if any)
fn = TYPE_MARSHAL_FN.get(type(value), str)
self.add_child(name, fn(value), ns=ns)
def import_node(self, other):
x = self.__document.importNode(other._element, True) # deep copy
self._element.appendChild(x)
def write_c14n(self, output=None, exclusive=True):
"Generate the canonical version of the XML node"
from . import c14n
xml = c14n.Canonicalize(self._element, output,
unsuppressedPrefixes=[] if exclusive else None)
return xml
| GabrielReusRodriguez/DummySoapServer | pysimplesoap/simplexml.py | Python | gpl-2.0 | 21,628 |
"""
WSGI config for ctfapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "conf.localsettings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| blstream/CaptureTheFlag | ctf-web-app/wsgi.py | Python | apache-2.0 | 390 |
DEBUG = 0
# cardinal diretions
directions = ("left","up","right","down")
# logic
maxExamined = 75000 # maximum number of tries when solving
maxMoves = 19 # maximum number of moves
cullFrequency = 75000 # number of tries per cull update
cullCutoff = 1.2 # fraction of average to cull
# grid size
gridRows = 5
gridColumns = 6
# text strings
textCalculateCurrentCombos = "Calculate Damage"
textClose = "Close"
textDamageDisplayAmount = "Total: "
textChoosePaint = "Choose a color to paint:"
textSolve = "Solve"
textTitle = "Puzzle and Dragons Helper"
# orbs
orbDefault = "light"
orbDefaultConfig = ("heal","light","wood","wood","fire","light","dark","heal","wood","water","heal","dark","fire","light","light","fire","fire","wood","heal","wood","dark","wood","water","light","light","dark","heal","heal","fire","dark")
orbDefaultStrength = 100
orbList = ("heal","fire","water","wood","light","dark")
# orb image URLs
orbImageURL = dict(light="img/light.png",
dark="img/dark.png",
fire="img/fire.png",
water="img/water.png",
wood="img/wood.png",
heal="img/heal.png",
bg="img/bgOrb.png"
);
# TKinter styles
tkButtonInactive = "flat"
tkButtonActive = "groove"
tkButtonBorder = 3
tkOrbStrengthEntryWidth = 7 | discomethod/pad-helper | constants.py | Python | gpl-2.0 | 1,240 |
import urllib.parse
from unittest.mock import PropertyMock
import responses
from .c14 import C14
class TestC14:
def setup_method(self, method):
url = urllib.parse.urljoin(C14.C14_API_URL, "safe/abcdefg/archive")
responses.add(responses.GET, url, json=[{
"uuid_ref": "833a45fd-e87f-415b-b89b-a444f45a2de3",
"name": "elegant_tesla",
"status": "active"
}])
C14.safe = PropertyMock(return_value="abcdefg")
self.c14 = C14()
@responses.activate
def test_get_archives(self):
assert self.c14.archives()[0]["name"] == "elegant_tesla"
| jsurloppe/N14 | N14/test_c14.py | Python | gpl-3.0 | 627 |
from datetime import datetime, timedelta
import unittest
import gnip_client
class BasicTests(unittest.TestCase):
def setUp(self):
self.client = gnip_client.AllAPIs(config='DEFAULT')
now = datetime.now()
self.to_date = now.strftime("%Y%m%d0000")
self.from_date = (now - timedelta(days=30)).strftime("%Y%m%d0000")
def tearDown(self):
self.client = None
def test_usage_returns_200(self):
response = (self.client.api(
'UsageAPI').get_usage(
decode_json=False,
bucket='month',
fromDate=self.from_date,
toDate=self.to_date))
self.assertEqual(200, response.status_code)
def test_search_returns_200(self):
response = (self.client.api(
'SearchAPI').max_results_only(
decode_json=False,
query='gnip',
publisher='twitter',
fromDate=self.from_date,
toDate=self.to_date,
maxResults='10'))
self.assertEqual(200, response.status_code)
def test_search_count_returns_200(self):
response = (self.client.api(
'SearchAPICount').search_count(
decode_json=False,
query='gnip',
publisher='twitter',
fromDate=self.from_date,
toDate=self.to_date,
bucket='day'))
self.assertEqual(200, response.status_code)
def test_powertrack_can_set_rule(self):
response = (self.client.api(
'PowertrackAPIRules').post_rule(value='test_rule', tag='test_rules'))
self.assertEqual(201, response.status_code)
def test_previously_set_rule_exists(self):
response = (self.client.api(
'PowertrackAPIRules').get_rules(decode_json=False))
#TO DO: assert value:rule2 in response.json()
self.assertEqual(200, response.status_code)
def test_powertrack_can_delete_previously_set_rule(self):
response = (self.client.api(
'PowertrackAPIRules').delete_rule(value='test_rule', tag='test_rules'))
self.assertEqual(200, response.status_code)
def test_rule_no_longer_exists(self):
response = (self.client.api(
'PowertrackAPIRules').get_rules(decode_json=False))
#TO DO: assert value:rule2 not in response.json()
self.assertEqual(200, response.status_code)
if __name__ == '__main__':
unittest.main()
| bee-keeper/pygnip-allapis | pygnip_allapis/tests.py | Python | gpl-3.0 | 2,625 |
#!/usr/bin/env python
"""
Common util classes / functions for the NGS project
"""
import collections
import gzip
import os
import re
import sys
def format_number(n):
'''
>>> format_number(1000)
'1,000'
>>> format_number(1234567)
'1,234,567'
'''
ar = list(str(n))
for i in range(len(ar))[::-3][1:]:
ar.insert(i + 1, ',')
return ''.join(ar)
def natural_sort(ar):
'''
>>> natural_sort('1 3 4 2 5'.split())
['1', '2', '3', '4', '5']
>>> natural_sort('1 10 20 2 3 4'.split())
['1', '2', '3', '4', '10', '20']
'''
to_sort = []
for item in ar:
spl = re.split('(\d+)', item)
l2 = []
for el in spl:
try:
n = int(el)
except:
n = el
l2.append(n)
to_sort.append((l2, item))
to_sort.sort()
return [x[1] for x in to_sort]
def dictify(values, colnames):
"""
Convert a list of values into a dictionary based upon given column names.
If the column name starts with an '@', the value is assumed to be a comma
separated list.
If the name starts with a '#', the value is assumed to be an int.
If the name starts with '@#', the value is assumed to a comma separated
list of ints.
"""
d = {}
for i in xrange(len(colnames)):
key = colnames[i]
split = False
num = False
if key[0] == '@':
key = key[1:]
split = True
if key[0] == '#':
key = key[1:]
num = True
if i < len(values):
if num and split:
val = [int(x) for x in values[i].rstrip(',').split(',')]
elif num:
val = int(values[i])
elif split:
val = values[i].rstrip(',').split(',')
else:
val = values[i]
d[key] = val
else:
d[key] = None
return d
def gzip_aware_open(fname):
if fname == '-':
f = sys.stdin
elif fname[-3:] == '.gz' or fname[-4:] == '.bgz':
f = gzip.open(os.path.expanduser(fname))
else:
f = open(os.path.expanduser(fname))
return f
class gzip_opener:
'''
A Python 2.6 class to handle 'with' opening of text files that may
or may not be gzip compressed.
'''
def __init__(self, fname):
self.fname = fname
def __enter__(self):
self.f = gzip_aware_open(self.fname)
return self.f
def __exit__(self, type, value, traceback):
if self.f != sys.stdin:
self.f.close()
return False
def filenames_to_uniq(names, new_delim='.'):
'''
Given a set of file names, produce a list of names consisting of the
uniq parts of the names. This works from the end of the name. Chunks of
the name are split on '.' and '-'.
For example:
A.foo.bar.txt
B.foo.bar.txt
returns: ['A','B']
AA.BB.foo.txt
CC.foo.txt
returns: ['AA.BB','CC']
>>> filenames_to_uniq('a.foo.bar.txt b.foo.bar.txt'.split())
['a', 'b']
>>> filenames_to_uniq('a.b.foo.txt c.foo.txt'.split())
['a.b', 'c']
'''
name_words = []
maxlen = 0
for name in names:
name_words.append(name.replace('.', ' ').replace('-', ' ').strip().split())
name_words[-1].reverse()
if len(name_words[-1]) > maxlen:
maxlen = len(name_words[-1])
common = [False, ] * maxlen
for i in xrange(maxlen):
last = None
same = True
for nameword in name_words:
if i >= len(nameword):
same = False
break
if not last:
last = nameword[i]
elif nameword[i] != last:
same = False
break
common[i] = same
newnames = []
for nameword in name_words:
nn = []
for (i, val) in enumerate(common):
if not val and i < len(nameword):
nn.append(nameword[i])
nn.reverse()
newnames.append(new_delim.join(nn))
return newnames
def parse_args(argv, defaults=None, expected_argc=0):
opts = {}
if defaults:
opts.update(defaults)
args = []
i = 0
while i < len(argv):
if argv[i][0] == '-':
arg = argv[i].lstrip('-')
if '=' in arg:
k, v = arg.split('=', 2)
if k in defaults:
if type(defaults[k]) == float:
opts[k] = float(v)
elif type(defaults[k]) == int:
opts[k] = int(v)
else:
opts[k] = v
else:
opts[arg] = True
else:
args.append(argv[i])
i += 1
while len(args) < expected_argc:
args.append(None)
return opts, args
class memoize(object):
'Simple memoizing decorator to cache results'
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
| nitesh1989/tools-iuc | tools/ngsutils/ngsutils/support/ngs_utils.py | Python | mit | 5,476 |
#!/usr/bin/env python
#
# Copyright 2010 Andrei <vish@gravitysoft.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
from __future__ import with_statement
import sys, threading, datetime
#
# Data base module to centralize working with data base connections,
# support connection pools and have a possibility to track connection
# leakage.
#
def create_db(id, parameters):
return Database(id, parameters)
def remove_db(id):
pass
def get_db(id):
return Database.getDatabase(id)
def get_dbconnection(id):
return Database(id).getConnection()
class Database(object):
DS_LOCK = threading.RLock()
SOURCES = {}
class CursorWrapper(object):
def __init__(self, cursor, owner):
assert cursor and owner
self._cursor = cursor
self._owner = owner
@property
def description(self):
return self._cursor.description
@property
def rowcount(self):
return self._cursor.rowcount
def close(self): self._owner._closeMe(self)
def callproc(self, procname, parameters=None):
return self._cursor.callproc(procname, parameters)
def execute(self, operation, parameters=None):
return self._cursor.execute(operation, parameters)
def executemany(self, operation, seq_of_parameters):
return self._cursor.executemany(operation, seq_of_parameters)
def fetchone(self):
return self._cursor.fetchone()
def fetchmany(self, size=None):
s = self.arraysize
if size : s = size
return self._cursor.fetchmany(s)
def fetchall(self):
return self._cursor.fetchall()
def nextset(self):
return self._cursor.nextset()
@property
def arraysize(self):
return self._cursor.arraysize
def setinputsizes(self, sizes):
return self._cursor.setinputsizes(sizes)
def setoutputsize(self, size, column=None):
return self._cursor.setoutputsize(size, column)
def _orig_cursor(self):
return self._cursor
class ConnectionWrapper(object):
def __init__(self, connection, owner):
assert connection and owner
self._connection = connection
self._creation_time = datetime.datetime.now()
self._owner = owner
self._cursors = []
def creationTime(self): return self._creation_time
def close(self):
with Database.DS_LOCK:
self._owner._closeMe(self)
def commit(self):
return self._connection.commit()
def rollback(self):
return self._connection.rollback()
def cursor(self):
with Database.DS_LOCK:
c = Database.CursorWrapper(self._connection.cursor(), self)
self._cursors.append(c)
return c
def _closeMe(self, cur):
with Database.DS_LOCK:
try: i = self._cursors.index(cur)
except ValueError: i = -1
if i >= 0:
self._cursors.pop(i)
cur._orig_cursor().close()
def cleanup(self):
with Database.DS_LOCK:
for cur in self._cursors:
try: cur._orig_cursor().close()
except: pass
self._cursors = []
def __str__(self):
return "'%s' connection wrapper, created: " % self._owner._id + str(self._creation_time)
def __enter__(self): pass
def __exit__(self, type, value, traceback):
self.close()
def _orig_conn(self):
return self._connection
@classmethod
def createDatabase(cls, id, parameters):
assert id and parameters
with cls.DS_LOCK:
if id in cls.SOURCES:
raise BaseException("Data base '%s' already exists." % id)
return Database(id, parameters)
@classmethod
def getDatabase(cls, id):
assert id
with cls.DS_LOCK: return cls.SOURCES[id]
@classmethod
def hasDatabase(cls, id):
assert id
with cls.DS_LOCK: return id in cls.SOURCES
def init(self, id, parameters):
global DBMODULE
self._poolSize = 1
key = 'db.poolsize'
if key in parameters:
self._poolSize = int(parameters[key])
del parameters[key]
self._poolLatency = self._poolSize / 5
key = 'db.poollatency'
if key in parameters:
self._poolLatency = int(parameters[key])
del parameters[key]
assert self._poolLatency >= 0 and self._poolSize >= 0
if self._poolLatency > self._poolSize:
raise BaseException("DB '%s' pool latency cannot be less than max pool size." % id)
self._parameters = parameters
self._id = id
self._pool = []
self._module = DBMODULE
self._firstfree = 0
def __new__(cls, id, parameters = None):
assert id and len(id.strip()) > 0
id = id.strip()
with cls.DS_LOCK:
if id in cls.SOURCES:
ds = cls.SOURCES[id]
if parameters and parameters != ds._parameters:
raise BaseException("Data base '%s' have been defined with another db parameters.")
return ds
else:
if parameters == None:
raise BaseException("DB parameters have not been specified for '%s' data base." % id)
ds = object.__new__(cls)
ds.init(id, parameters)
ds.ping()
cls.SOURCES[id] = ds
return ds
def ping(self):
con = None
try: con = self._module.connect(**self._parameters)
finally:
if con : con.close()
def getConnection(self):
with self.DS_LOCK:
# connection pool should not be used
if self._poolSize == 0:
return Database.ConnectionWrapper(self._module.connect(**self._parameters), owner = self)
else:
# found free connection in pool
if self._firstfree < len(self._pool):
self._firstfree += 1
return self._pool[self._firstfree - 1]
else:
# pool is not full
if self._poolSize > len(self._pool):
c = Database.ConnectionWrapper(self._module.connect(**self._parameters), owner = self)
self._pool.append(c)
self._firstfree = len(self._pool)
return c
else:
# pool is full
raise BaseException("'%s' connection pool is full (%d connections opened)." % (self._id, len(self._pool)))
def cleanup(self):
with self.DS_LOCK:
for c in self._pool:
try: c.orig_conn().close()
except: pass
self._pool = []
self._firstfree = 0
def _closeMe(self, con):
with self.DS_LOCK:
# pool is not supported
if self._poolSize == 0:
con._orig_conn().close()
else:
try: i = self._pool.index(con)
except ValueError: i = -1
if i == -1 or i >= self._firstfree:
raise BaseException("DB '%s' connection has been already closed." % self._id)
# check if have already enough opened free connection
# and really close connection if it is true
if self._poolLatency == (len(self._pool) - self._firstfree):
c = self._pool.pop(i)
c.cleanup()
c._orig_conn().close()
else:
c = self._pool.pop(i)
c.cleanup()
self._pool.append(c)
self._firstfree -= 1
def __str__(self):
s = "Data base '%s' {" % self._id + "\n Parameters:" + str(self._parameters)
s += "\n pool size = %d" % self._poolSize
s += "\n pool latency = %d" % self._poolLatency
s += "\n free connections = %d" % (len(self._pool) - self._firstfree)
s += "\n first free pos = %d" % self._firstfree
s += "\n " + str(self._pool) + "\n}"
return s
DBMODULE = None
def __LOAD_DB_MODULE__(module='MySQLdb'):
global DBMODULE
if DBMODULE and DBMODULE.__name__ != module:
raise BaseException("Only one db specific module can be loaded at the same time.")
elif DBMODULE == None:
DBMODULE = __import__(module)
for k in DBMODULE.__dict__:
o = DBMODULE.__dict__[k]
if k.find("Error") >=0 or k.find("Warning") >= 0:
setattr(sys.modules[__name__], k, o)
if __name__ != '__main__':
__LOAD_DB_MODULE__()
| barmalei/scalpel | lib/gravity/common/db.py | Python | lgpl-3.0 | 10,270 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
from essh.ec2 import EC2Controller
from essh.ssh import SSHController
from essh.exceptions import ESSHException
@click.command(context_settings=dict(ignore_unknown_options=True,))
@click.option('--profile', '-p', help='AWS credentials profile (i.e. ~/.aws/credentials)')
@click.option('--zone', '-z', help='Filter hosts by availability zone specified (i.e. us-east-1a)')
@click.option('--keypair-dir', default="~/.ssh", help='Directory where keypair files are located')
@click.option('--keypair-extension', default="pem", help='File extension for keypari files')
@click.argument('target')
@click.argument('command', nargs=-1, type=click.UNPROCESSED)
def cli(profile, zone, keypair_dir, keypair_extension, target, command):
cmd = " ". join(command)
if len(cmd) > 0:
cmd = " " + cmd
ec2_controller = EC2Controller(profile, zone)
try:
instance = ec2_controller.find(target)
SSHController(keypair_dir, instance.keypair, keypair_extension, instance.private_ip).ssh(cmd)
except ESSHException as ex:
print(ex.message)
if __name__ == '__main__':
cli()
| patrickbcullen/essh | essh/cli.py | Python | mit | 1,163 |
# encoding: utf-8
import curses
from . import wgmultilinetree
class TreeLineSelectable(wgmultilinetree.TreeLine):
# NB - as print is currently defined, it is assumed that these will
# NOT contain multi-width characters, and that len() will correctly
# give an indication of the correct offset
CAN_SELECT = '[ ]'
CAN_SELECT_SELECTED = '[*]'
CANNOT_SELECT = ' '
CANNOT_SELECT_SELECTED = ' * '
def _print_select_controls(self):
SELECT_DISPLAY = None
if self._tree_real_value.selectable:
if self.value.selected:
SELECT_DISPLAY = self.CAN_SELECT_SELECTED
else:
SELECT_DISPLAY = self.CAN_SELECT
else:
if self.value.selected:
SELECT_DISPLAY = self.CANNOT_SELECT_SELECTED
else:
SELECT_DISPLAY = self.CANNOT_SELECT
if self.do_colors():
attribute_list = self.parent.theme_manager.findPair(self, 'CONTROL')
else:
attribute_list = curses.A_NORMAL
#python2 compatibility
if isinstance(SELECT_DISPLAY, bytes):
SELECT_DISPLAY = SELECT_DISPLAY.decode()
self.add_line(self.rely,
self.left_margin + self.relx,
SELECT_DISPLAY,
self.make_attributes_list(SELECT_DISPLAY, attribute_list),
self.width - self.left_margin)
return len(SELECT_DISPLAY)
def _print(self, left_margin=0):
if not hasattr(self._tree_real_value, 'selected'):
return None
self.left_margin = left_margin
self.parent.curses_pad.bkgdset(' ',curses.A_NORMAL)
self.left_margin += self._print_tree(self.relx)
self.left_margin += self._print_select_controls() + 1
if self.highlight:
self.parent.curses_pad.bkgdset(' ',curses.A_STANDOUT)
super(wgmultilinetree.TreeLine, self)._print()
class TreeLineSelectableAnnotated(TreeLineSelectable,
wgmultilinetree.TreeLineAnnotated):
def _print(self, left_margin=0):
if not hasattr(self._tree_real_value, 'selected'):
return None
self.left_margin = left_margin
self.parent.curses_pad.bkgdset(' ',curses.A_NORMAL)
self.left_margin += self._print_tree(self.relx)
self.left_margin += self._print_select_controls() + 1
if self.do_colors():
self.left_margin += self.annotationColor(self.left_margin+self.relx)
else:
self.left_margin += self.annotationNoColor(self.left_margin+self.relx)
if self.highlight:
self.parent.curses_pad.bkgdset(' ',curses.A_STANDOUT)
super(wgmultilinetree.TreeLine, self)._print()
class MLTreeMultiSelect(wgmultilinetree.MLTree):
_contained_widgets = TreeLineSelectable
def __init__(self, screen, select_cascades=True, *args, **keywords):
super(MLTreeMultiSelect, self).__init__(screen, *args, **keywords)
self.select_cascades = select_cascades
def h_select(self, ch):
vl = self.values[self.cursor_line]
vl_to_set = not vl.selected
if self.select_cascades:
for v in vl.walkTree(onlyExpanded=False, ignoreRoot=False):
if v.selectable:
v.selected = vl_to_set
else:
vl.selected = vl_to_set
if self.select_exit:
self.editing = False
self.how_exited = True
self.display()
def get_selected_objects(self, return_node=True):
for v in self._myFullValues.walkTree(onlyExpanded=False,
ignoreRoot=False):
if v.selected:
if return_node:
yield v
else:
yield v.getContent()
class MLTreeMultiSelectAnnotated(MLTreeMultiSelect):
_contained_widgets = TreeLineSelectableAnnotated
| tescalada/npyscreen-restructure | npyscreen/widget/multilinetreeselectable.py | Python | bsd-2-clause | 4,001 |
"""ZFS based backup workflows."""
import datetime
import shlex
import gflags
import lvm
import workflow
FLAGS = gflags.FLAGS
gflags.DEFINE_string('rsync_options',
'--archive --acls --numeric-ids --delete --inplace',
'rsync command options')
gflags.DEFINE_string('rsync_path', '/usr/bin/rsync', 'path to rsync binary')
gflags.DEFINE_string('zfs_snapshot_prefix', 'ari-backup-',
'prefix for historical ZFS snapshots')
gflags.DEFINE_string('zfs_snapshot_timestamp_format', '%Y-%m-%d--%H%M',
'strftime() formatted timestamp used when naming new ZFS snapshots')
class ZFSLVMBackup(lvm.LVMSourceMixIn, workflow.BaseWorkflow):
"""Workflow for backing up a logical volume to a ZFS dataset.
Data is copied from and LVM snapshot to a ZFS dataset using rsync and then
ZFS commands are issued to create historical snapshots. The ZFS snapshot
lifecycle is also managed by this class. When a backup completes, snapshots
older than snapshot_expiration_days are destroyed.
This approach has some benefits over rdiff-backup in that all backup
datapoints are easily browseable and replication of the backup data using
ZFS streams is generally less resource intensive than using something like
rsync to mirror the files created by rdiff-backup.
One downside is that it's easier to store all file metadata using
rdiff-backup. Rsync can only store metadata for files that the destination
file system can also store. For example, if extended file system
attributes are used on the source file system, but aren't available on the
destination, rdiff-backup will still record those attributes in its own
files. If faced with that same scenario, rsync would lose those attributes.
Furthermore, rsync must have root privilege to write arbitrary file
metadata.
New post-job hooks are added for creating ZFS snapshots and trimming old
ones.
"""
def __init__(self, label, source_hostname, rsync_dst, zfs_hostname,
dataset_name, snapshot_expiration_days, **kwargs):
"""Configure a ZFSLVMBackup object.
Args:
label: str, label for the backup job (e.g. database-server1).
source_hostname: str, the name of the host with the source data to
backup.
rsync_dst: str, the destination argument for the rsync command line
(e.g. backupbox:/backup-store/database-server1).
zfs_hostname: str, the name of the backup destination host where we will
be managing the ZFS snapshots.
dataset_name: str, the full ZFS path (not file system path) to the
dataset holding the backups for this job
(e.g. tank/backup-store/database-server1).
snapshot_expiration_days: int, the maxmium age of a ZFS snapshot in days.
Pro tip: It's a good practice to reuse the label argument as the last
path component in the rsync_dst and dataset_name arguments.
"""
# Call our super class's constructor to enable LVM snapshot management
super(ZFSLVMBackup, self).__init__(label, **kwargs)
# Assign instance vars specific to this class.
self.source_hostname = source_hostname
self.rsync_dst = rsync_dst
self.zfs_hostname = zfs_hostname
self.dataset_name = dataset_name
# Assign flags to instance vars so they might be easily overridden in
# workflow configs.
self.rsync_options = FLAGS.rsync_options
self.rsync_path = FLAGS.rsync_path
self.zfs_snapshot_prefix = FLAGS.zfs_snapshot_prefix
self.zfs_snapshot_timestamp_format = FLAGS.zfs_snapshot_timestamp_format
self.add_post_hook(self._create_zfs_snapshot)
self.add_post_hook(self._destroy_expired_zfs_snapshots,
{'days': snapshot_expiration_days})
def _get_current_datetime(self):
"""Returns datetime object with the current date and time.
This method is mostly useful for testing purposes.
"""
return datetime.datetime.now()
def _run_custom_workflow(self):
"""Run rsync backup of LVM snapshot to ZFS dataset."""
# TODO(jpwoodbu) Consider throwing an exception if we see things in the
# include or exclude lists since we don't use them in this class.
self.logger.debug('ZFSLVMBackup._run_custom_workflow started.')
# Since we're dealing with ZFS datasets, let's always exclude the .zfs
# directory in our rsync options.
rsync_options = shlex.split(self.rsync_options) + ['--exclude', '/.zfs']
# We add a trailing slash to the src path otherwise rsync will make a
# subdirectory at the destination, even if the destination is already a
# directory.
rsync_src = self._snapshot_mount_point_base_path + '/'
command = [self.rsync_path] + rsync_options + [rsync_src, self.rsync_dst]
self.run_command(command, self.source_hostname)
self.logger.debug('ZFSLVMBackup._run_custom_workflow completed.')
def _create_zfs_snapshot(self, error_case):
"""Creates a new ZFS snapshot of our destination dataset.
The name of the snapshot will include the zfs_snapshot_prefix provided by
FLAGS and a timestamp. The zfs_snapshot_prefix is used by
_remove_zfs_snapshots_older_than() when deciding which snapshots to
destroy. The timestamp encoded in a snapshot name is only for end-user
convenience. The creation metadata on the ZFS snapshot is what is used to
determine a snapshot's age.
This method does nothing if error_case is True.
Args:
error_case: bool, whether an error has occurred during the backup.
"""
if not error_case:
self.logger.info('Creating ZFS snapshot...')
timestamp = self._get_current_datetime().strftime(
self.zfs_snapshot_timestamp_format)
snapshot_name = self.zfs_snapshot_prefix + timestamp
snapshot_path = '{dataset_name}@{snapshot_name}'.format(
dataset_name=self.dataset_name, snapshot_name=snapshot_name)
command = ['zfs', 'snapshot', snapshot_path]
self.run_command(command, self.zfs_hostname)
def _find_snapshots_older_than(self, days):
"""Returns snapshots older than the given number of days.
Only snapshots that meet the following criteria are returned:
1. They were created at least "days" ago.
2. Their name is prefixed with FLAGS.zfs_snapshot_prefix.
Args:
days: int, the minimum age of the snapshots in days.
Returns:
A list of filtered snapshots.
"""
expiration = self._get_current_datetime() - datetime.timedelta(days=days)
# Let's find all the snapshots for this dataset.
command = ['zfs', 'get', '-rH', '-o', 'name,value', 'type',
self.dataset_name]
stdout, unused_stderr = self.run_command(command, self.zfs_hostname)
snapshots = list()
# Sometimes we get extra lines which are empty, so we'll strip the lines.
for line in stdout.strip().splitlines():
name, dataset_type = line.split('\t')
if dataset_type == 'snapshot':
# Let's try to only consider destroying snapshots made by us ;)
if name.split('@')[1].startswith(self.zfs_snapshot_prefix):
snapshots.append(name)
expired_snapshots = list()
for snapshot in snapshots:
creation_time = self._get_snapshot_creation_time(snapshot)
if creation_time <= expiration:
expired_snapshots.append(snapshot)
return expired_snapshots
def _get_snapshot_creation_time(self, snapshot):
"""Gets the creation time of a snapshot as a Python datetime object
Args:
snapshot: str, the fule ZFS path to the snapshot.
Returns:
A datetime object representing the creation time of the snapshot.
"""
command = ['zfs', 'get', '-H', '-o', 'value', 'creation', snapshot]
stdout, unused_stderr = self.run_command(command, self.zfs_hostname)
return datetime.datetime.strptime(stdout.strip(), '%a %b %d %H:%M %Y')
def _destroy_expired_zfs_snapshots(self, days, error_case):
"""Destroy snapshots older than the given numnber of days.
Any snapshots in the target dataset with a name that starts with
FLAGS.zfs_snapshot_prefix and a creation date older than days will be
destroyed. Depending on the size of the snapshots and the performance of
the disk subsystem, this operation could take a while.
This method does nothing if error_case is True.
Args:
days: int, the max age of a snapshot in days.
error_case: bool, whether an error has occurred during the backup.
"""
if not error_case:
self.logger.info('Looking for expired ZFS snapshots...')
snapshots = self._find_snapshots_older_than(days)
# Sentinel value used to log if we destroyed no snapshots.
snapshots_destroyed = False
# Destroy expired snapshots.
for snapshot in snapshots:
command = ['zfs', 'destroy', snapshot]
self.run_command(command, self.zfs_hostname)
snapshots_destroyed = True
self.logger.info('{snapshot} destroyed.'.format(snapshot=snapshot))
if not snapshots_destroyed:
self.logger.info('Found no expired ZFS snapshots.')
| rbarlow/ari-backup | ari_backup/zfs.py | Python | bsd-3-clause | 9,110 |
from datetime import timedelta
from urllib.parse import urlencode
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.http import Http404
from django.contrib import auth
from django.db import models
from django.views.generic import DetailView, FormView
from django.contrib.auth.views import PasswordResetConfirmView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.utils.html import format_html
from django.utils import formats, timezone
from django.utils import translation
from django.utils.translation import gettext_lazy as _
from django.views.decorators.http import require_POST
from django.views.generic import TemplateView, RedirectView
from crossdomainmedia import CrossDomainMediaMixin
from froide.foirequest.models import FoiRequest
from froide.foirequest.services import ActivatePendingRequestService
from froide.helper.utils import render_403, get_redirect, get_redirect_url
from .forms import (
UserLoginForm,
PasswordResetForm,
SignUpForm,
SetPasswordForm,
UserEmailConfirmationForm,
UserChangeDetailsForm,
UserDeleteForm,
TermsForm,
ProfileForm,
AccountSettingsForm,
)
from .services import AccountService
from .utils import start_cancel_account_process, make_account_private
from .export import (
request_export,
ExportCrossDomainMediaAuth,
get_export_access_token,
get_export_access_token_by_token,
)
User = auth.get_user_model()
class AccountView(RedirectView):
# Temporary redirect
pattern_name = "account-requests"
class NewAccountView(TemplateView):
template_name = "account/new.html"
def get_context_data(self, **kwargs):
context = super(NewAccountView, self).get_context_data(**kwargs)
context["title"] = self.request.GET.get("title", "")
context["email"] = self.request.GET.get("email", "")
return context
class AccountConfirmedView(LoginRequiredMixin, TemplateView):
template_name = "account/confirmed.html"
def get_context_data(self, **kwargs):
context = super(AccountConfirmedView, self).get_context_data(**kwargs)
context["foirequest"] = self.get_foirequest()
context["ref"] = self.request.GET.get("ref")
return context
def get_foirequest(self):
request_pk = self.request.GET.get("request")
if request_pk:
try:
return FoiRequest.objects.get(user=self.request.user, pk=request_pk)
except FoiRequest.DoesNotExist:
pass
return None
def confirm(request, user_id, secret, request_id=None):
if request.user.is_authenticated:
if request.user.id != user_id:
messages.add_message(
request,
messages.ERROR,
_("You are logged in and cannot use a confirmation link."),
)
return redirect("account-show")
user = get_object_or_404(User, pk=int(user_id))
if user.is_active or (not user.is_active and user.email is None):
return redirect("account-login")
account_service = AccountService(user)
result = account_service.confirm_account(secret, request_id)
if not result:
messages.add_message(
request,
messages.ERROR,
_(
"You can only use the confirmation link once, "
"please login with your password."
),
)
return redirect("account-login")
auth.login(request, user)
params = {}
if request.GET.get("ref"):
params["ref"] = request.GET["ref"]
if request_id is not None:
req_service = ActivatePendingRequestService({"request_id": request_id})
foirequest = req_service.process(request=request)
if foirequest:
params["request"] = str(foirequest.pk)
default_url = "%s?%s" % (reverse("account-confirmed"), urlencode(params))
return get_redirect(request, default=default_url, params=params)
def go(request, user_id, token, url):
if request.user.is_authenticated:
if request.user.id != int(user_id):
messages.add_message(
request,
messages.INFO,
_(
"You are logged in with a different user account. Please logout first before using this link."
),
)
# Delete token without using
AccountService.delete_autologin_token(user_id, token)
return redirect(url)
if request.method == "POST":
user = User.objects.filter(pk=int(user_id)).first()
if user:
account_manager = AccountService(user)
if account_manager.check_autologin_token(token):
if not user.is_active:
# Confirm user account (link came from email)
account_manager.reactivate_account()
# Perform login
auth.login(request, user)
return redirect(url)
# If login-link fails, prompt login with redirect
return get_redirect(request, default="account-login", params={"next": url})
return render(request, "account/go.html", {"form_action": request.path})
class ProfileView(DetailView):
queryset = User.objects.filter(private=False)
slug_field = "username"
template_name = "account/profile.html"
def get_context_data(self, **kwargs):
from froide.campaign.models import Campaign
from froide.publicbody.models import PublicBody
ctx = super().get_context_data(**kwargs)
ctx.pop("user", None) # Remove 'user' key set by super
foirequests = FoiRequest.published.filter(user=self.object)
aggregates = foirequests.aggregate(
count=models.Count("id"),
first_date=models.Min("first_message"),
successful=models.Count(
"id",
filter=models.Q(
status=FoiRequest.STATUS.RESOLVED,
resolution=FoiRequest.RESOLUTION.SUCCESSFUL,
)
| models.Q(
status=FoiRequest.STATUS.RESOLVED,
resolution=FoiRequest.RESOLUTION.PARTIALLY_SUCCESSFUL,
),
),
refused=models.Count(
"id",
filter=models.Q(
status=FoiRequest.STATUS.RESOLVED,
resolution=FoiRequest.RESOLUTION.REFUSED,
),
),
total_costs=models.Sum("costs"),
)
campaigns = (
Campaign.objects.filter(
foirequest__in=foirequests,
)
.exclude(url="")
.distinct()
.order_by("-start_date")
)
TOP_PUBLIC_BODIES = 3
top_publicbodies = (
PublicBody.objects.filter(foirequest__in=foirequests)
.annotate(user_request_count=models.Count("id"))
.order_by("-user_request_count")[:TOP_PUBLIC_BODIES]
)
TOP_FOLLOWERS = 3
top_followers = (
foirequests.annotate(
follower_count=models.Count(
"followers", filter=models.Q(followers__confirmed=True)
)
)
.filter(follower_count__gt=0)
.order_by("-follower_count")[:TOP_FOLLOWERS]
)
user_days = (timezone.now() - self.object.date_joined).days
no_index = aggregates["count"] < 5 and user_days < 30
ctx.update(
{
"foirequests": foirequests.order_by("-first_message")[:10],
"aggregates": aggregates,
"campaigns": campaigns,
"top_followers": top_followers,
"top_publicbodies": top_publicbodies,
"no_index": no_index,
}
)
return ctx
@require_POST
def logout(request):
auth.logout(request)
messages.add_message(request, messages.INFO, _("You have been logged out."))
return redirect("/")
def login(request, context=None, template="account/login.html", status=200):
if request.user.is_authenticated:
return get_redirect(request, default="account-show")
if not context:
context = {}
if "reset_form" not in context:
context["reset_form"] = PasswordResetForm(prefix="pwreset")
if request.method == "POST" and status == 200:
status = 400 # if ok, we are going to redirect anyways
form = UserLoginForm(request.POST)
if form.is_valid():
user = auth.authenticate(
request,
username=form.cleaned_data["email"],
password=form.cleaned_data["password"],
)
if user is not None:
if user.is_active:
auth.login(request, user)
messages.add_message(
request, messages.INFO, _("You are now logged in.")
)
translation.activate(user.language)
return get_redirect(request, default="account-show")
else:
messages.add_message(
request,
messages.ERROR,
_("Please activate your mail address before logging in."),
)
else:
messages.add_message(
request, messages.ERROR, _("E-mail and password do not match.")
)
else:
form = UserLoginForm(initial=None)
context.update({"form": form, "next": request.GET.get("next")})
return render(request, template, context, status=status)
class SignupView(FormView):
template_name = "account/signup.html"
form_class = SignUpForm
def dispatch(self, request, *args, **kwargs):
if request.user.is_authenticated:
return redirect("account-show")
return super().dispatch(request, *args, **kwargs)
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.update({"request": self.request})
return kwargs
def get_success_url(self, email=""):
next_url = self.request.POST.get("next")
if next_url:
# Store next in session to redirect on confirm
self.request.session["next"] = next_url
url = reverse("account-new")
query = urlencode({"email": self.user.email.encode("utf-8")})
return "%s?%s" % (url, query)
def form_invalid(self, form):
messages.add_message(
self.request, messages.ERROR, _("Please correct the errors below.")
)
return super().form_invalid(form)
def form_valid(self, form):
user, user_created = AccountService.create_user(**form.cleaned_data)
if user_created:
form.save(user)
self.user = user
next_url = self.request.POST.get("next")
account_service = AccountService(user)
time_since_joined = timezone.now() - user.date_joined
joined_recently = time_since_joined > timedelta(hours=1)
mail_sent = True
if user_created:
account_service.send_confirmation_mail(redirect_url=next_url)
elif user.is_active:
# Send login-link email
account_service.send_reminder_mail()
elif not user.is_blocked and not joined_recently:
# User exists, but not activated
account_service.send_confirmation_mail()
else:
mail_sent = False
if mail_sent:
messages.add_message(
self.request,
messages.SUCCESS,
_(
"Please check your emails for a mail from us with a "
"confirmation link."
),
)
return super().form_valid(form)
@require_POST
@login_required
def change_password(request):
form = request.user.get_password_change_form(request.POST)
if form.is_valid():
form.save()
auth.update_session_auth_hash(request, form.user)
messages.add_message(
request, messages.SUCCESS, _("Your password has been changed.")
)
return get_redirect(request, default=reverse("account-show"))
else:
messages.add_message(
request,
messages.ERROR,
_("Your password was NOT changed. Please fix the errors."),
)
return account_settings(request, context={"password_change_form": form}, status=400)
@require_POST
def send_reset_password_link(request):
if request.user.is_authenticated:
messages.add_message(
request,
messages.ERROR,
_("You are currently logged in, you cannot get a password reset link."),
)
return get_redirect(request)
form = auth.forms.PasswordResetForm(request.POST, prefix="pwreset")
if form.is_valid():
if request.POST.get("next"):
request.session["next"] = request.POST.get("next")
form.save(
use_https=True,
email_template_name="account/emails/password_reset_email.txt",
)
messages.add_message(
request,
messages.SUCCESS,
_(
"Check your mail, we sent you a password reset link."
" If you don't receive an email, check if you entered your"
" email correctly or if you really have an account."
),
)
return get_redirect(request, keep_session=True)
return login(request, context={"reset_form": form}, status=400)
class CustomPasswordResetConfirmView(PasswordResetConfirmView):
template_name = "account/password_reset_confirm.html"
post_reset_login = True
form_class = SetPasswordForm
def form_valid(self, form):
messages.add_message(
self.request,
messages.SUCCESS,
_("Your password has been set and you are now logged in."),
)
return super().form_valid(form)
def get_success_url(self):
"""
Returns the supplied success URL.
"""
return get_redirect_url(self.request, default=reverse("account-show"))
@login_required
def account_settings(request, context=None, status=200):
if not context:
context = {}
if "new" in request.GET:
request.user.is_new = True
if "user_delete_form" not in context:
context["user_delete_form"] = UserDeleteForm(request)
if "change_form" not in context:
context["change_form"] = UserChangeDetailsForm(request.user)
return render(request, "account/settings.html", context, status=status)
@require_POST
@login_required
def change_user(request):
form = UserChangeDetailsForm(request.user, request.POST)
if form.is_valid():
new_email = form.cleaned_data["email"]
if new_email and request.user.email != new_email:
AccountService(request.user).send_email_change_mail(
form.cleaned_data["email"]
)
messages.add_message(
request,
messages.SUCCESS,
_(
"We sent a confirmation email to your new address. Please click the link in there."
),
)
form.save()
messages.add_message(
request, messages.SUCCESS, _("Your profile information has been changed.")
)
return redirect("account-settings")
messages.add_message(
request,
messages.ERROR,
_("Please correct the errors below. You profile information was not changed."),
)
return account_settings(request, context={"change_form": form}, status=400)
@require_POST
@login_required
def change_profile(request):
form = ProfileForm(data=request.POST, files=request.FILES, instance=request.user)
if form.is_valid():
form.save()
messages.add_message(
request, messages.SUCCESS, _("Your profile information has been changed.")
)
return redirect("account-settings")
messages.add_message(
request,
messages.ERROR,
_("Please correct the errors below. You profile information was not changed."),
)
return account_settings(request, context={"profile_form": form}, status=400)
@require_POST
@login_required
def change_account_settings(request):
form = AccountSettingsForm(
data=request.POST, files=request.FILES, instance=request.user
)
if form.is_valid():
form.save()
messages.add_message(
request, messages.SUCCESS, _("Your account settings have been changed.")
)
translation.activate(form.cleaned_data["language"])
return redirect("account-settings")
messages.add_message(
request,
messages.ERROR,
_("Please correct the errors below. You account settings were not changed."),
)
return account_settings(
request, context={"account_settings_form": form}, status=400
)
@require_POST
@login_required
def make_user_private(request):
if request.user.private:
messages.add_message(
request, messages.ERROR, _("Your account is already private.")
)
return redirect("account-settings")
make_account_private(request.user)
messages.add_message(
request,
messages.SUCCESS,
_("Your account has been made private. The changes are being applied now."),
)
return redirect("account-settings")
@login_required
def change_email(request):
form = UserEmailConfirmationForm(request.user, request.GET)
if form.is_valid():
form.save()
messages.add_message(
request, messages.SUCCESS, _("Your email address has been changed.")
)
else:
messages.add_message(
request,
messages.ERROR,
_("The email confirmation link was invalid or expired."),
)
return redirect("account-settings")
@login_required
def profile_redirect(request):
if request.user.private or not request.user.username:
messages.add_message(
request,
messages.INFO,
_("Your account is private, so you don't have a public profile."),
)
return redirect("account-requests")
return redirect("account-profile", slug=request.user.username)
@require_POST
@login_required
def delete_account(request):
form = UserDeleteForm(request, data=request.POST)
if not form.is_valid():
messages.add_message(
request,
messages.ERROR,
_("Password or confirmation phrase were wrong. Account was not deleted."),
)
return account_settings(request, context={"user_delete_form": form}, status=400)
# Removing all personal data from account
start_cancel_account_process(request.user)
auth.logout(request)
messages.add_message(
request,
messages.INFO,
_("Your account has been deleted and you have been logged out."),
)
return redirect("/")
def new_terms(request):
next = request.GET.get("next")
if not request.user.is_authenticated:
return get_redirect(request, default=next)
if request.user.terms:
return get_redirect(request, default=next)
form = TermsForm()
if request.POST:
form = TermsForm(request.POST)
if form.is_valid():
form.save(request.user)
messages.add_message(
request, messages.SUCCESS, _("Thank you for accepting our new terms!")
)
return get_redirect(request, default=next)
else:
messages.add_message(
request,
messages.ERROR,
_("You need to accept our new terms to continue."),
)
return render(request, "account/new_terms.html", {"terms_form": form, "next": next})
def csrf_failure(request, reason=""):
return render_403(
request,
message=_(
"You probably do not have cookies enabled, but you need cookies to "
"use this site! Cookies are only ever sent securely. The technical "
"reason is: %(reason)s"
)
% {"reason": reason},
)
@login_required
def create_export(request):
if request.method == "POST":
result = request_export(request.user)
if result is None:
messages.add_message(
request,
messages.SUCCESS,
_(
"Your export has been started. "
"You will receive an email when it is finished."
),
)
else:
if result is True:
messages.add_message(
request,
messages.INFO,
_(
"Your export is currently being created. "
"You will receive an email once it is available."
),
)
else:
messages.add_message(
request,
messages.INFO,
format_html(
_(
"Your next export will be possible at {date}. "
'<a href="{url}">You can download your current '
"export here</a>."
),
date=formats.date_format(result, "SHORT_DATETIME_FORMAT"),
url=reverse("account-download_export"),
),
)
return redirect(reverse("account-settings"))
@login_required
def download_export(request):
access_token = get_export_access_token(request.user)
if not access_token:
return redirect(reverse("account-settings") + "#export")
mauth = ExportCrossDomainMediaAuth({"object": access_token})
return redirect(mauth.get_full_media_url(authorized=True))
class ExportFileDetailView(CrossDomainMediaMixin, DetailView):
"""
Add the CrossDomainMediaMixin
and set your custom media_auth_class
"""
media_auth_class = ExportCrossDomainMediaAuth
def get_object(self):
access_token = get_export_access_token_by_token(self.kwargs["token"])
if not access_token:
raise Http404
return access_token
def render_to_response(self, context):
return super().render_to_response(context)
| fin/froide | froide/account/views.py | Python | mit | 22,708 |
import Fast5File
def run(parser, args):
if args.read:
for i, fast5 in enumerate(Fast5File.Fast5FileSet(args.files)):
for metadata_dict in fast5.read_metadata:
if i == 0:
header = metadata_dict.keys()
print "\t".join(["filename"] + header)
print "\t".join([fast5.filename] + [str( metadata_dict[k] ) for k in header])
else:
print "asic_id\tasic_temp\theatsink_temp"
for fast5 in Fast5File.Fast5FileSet(args.files):
asic_temp = fast5.get_asic_temp()
asic_id = fast5.get_asic_id()
heatsink_temp = fast5.get_heatsink_temp()
print "%s\t%s\t%s" % (asic_id, asic_temp, heatsink_temp)
fast5.close()
| arq5x/poretools | poretools/metadata.py | Python | mit | 641 |
#!/usr/bin/env python3.8
'Calculations.'
import numpy as np
import cv2 as cv
from process_image import shape, odd, rotate
from images import Images
from angle import Angle
class Calculate():
'Calculate results.'
def __init__(self, core, input_images):
self.settings = core.settings.settings
self.imgs = core.settings.images
self.log = core.log
self.results = core.results
self.images = Images(core, input_images, self.calculate_soil_z)
self.z_info = self.images._get_z_info()
self.calculated_angle = 0
def check_images(self):
'Check capture images.'
self.log.debug('Checking images...', verbosity=2)
for images in self.images.input.values():
for image in images:
if image.image is None:
self.log.error('Image missing.')
pre_rotation_angle = self.settings['pre_rotation_angle']
if pre_rotation_angle:
image.image = rotate(image.image, pre_rotation_angle)
image.reduce_data()
content = image.data.report
self.log.debug(content['report'])
if content['coverage'] < self.settings['input_coverage_threshold']:
self.log.error('Not enough detail. Check recent images.')
def _validate_calibration_data(self):
calibrated = {
'width': self.settings['calibration_image_width'],
'height': self.settings['calibration_image_height']}
current = shape(self.images.input['left'][0].image)
mismatch = {k: (v and v != current[k]) for k, v in calibrated.items()}
if any(mismatch.values()):
self.log.error('Image size must match calibration.')
def _z_at_dist(self, distance, z_reference=None):
if z_reference is None:
z_reference = self.z_info['current']
z_value = z_reference + self.z_info['direction'] * distance
return 0 if np.isnan(z_value) else int(z_value)
def calculate_soil_z(self, disparity_value):
'Calculate soil z from disparity value.'
calculated_soil_z = None
measured_distance = self.settings['measured_distance']
measured_at_z = self.settings['calibration_measured_at_z']
measured_soil_z = self._z_at_dist(measured_distance, measured_at_z)
disparity_offset = self.settings['calibration_disparity_offset']
calibration_factor = self.settings['calibration_factor']
current_z = self.z_info['current']
direction = self.z_info['direction']
values = {
'measured_distance': measured_distance,
'z_offset_from_measured': self.z_info['offset'],
'new_meas_dist': measured_distance - self.z_info['offset'],
'measured_at_z': measured_at_z,
'measured_soil_z': measured_soil_z,
'disparity_offset': disparity_offset,
'calibration_factor': calibration_factor,
'current_z': current_z,
'direction': direction,
'disparity': disparity_value,
'calculated_soil_z': calculated_soil_z,
}
calcs = [''] * 4
calcs[0] += f'({measured_soil_z = :<7}) = '
calcs[0] += f'({measured_at_z = :<7})'
calcs[0] += f' + {direction} * ({measured_distance = })'
if calibration_factor == 0:
return calculated_soil_z, {'lines': calcs, 'values': values}
self._validate_calibration_data()
disparity_delta = disparity_value - disparity_offset
distance = measured_distance - disparity_delta * calibration_factor
calculated_soil_z = self._z_at_dist(distance)
values['disparity_delta'] = round(disparity_delta, 4)
values['calc_distance'] = round(distance, 4)
values['calculated_soil_z'] = calculated_soil_z
calcs[1] += f'({disparity_delta = :<7.1f}) = '
calcs[1] += f'({disparity_value = :<7}) - ({disparity_offset = })'
calcs[2] += f'({distance = :<7.1f}) = '
calcs[2] += f'({measured_distance = :<7})'
calcs[2] += f' - ({disparity_delta = :.1f}) * ({calibration_factor = })'
calcs[3] += f'({calculated_soil_z = :<7}) = '
calcs[3] += f'({current_z = :<7}) + {direction} * ({distance = :.1f})'
return calculated_soil_z, {'lines': calcs, 'values': values}
def _from_stereo(self):
self.log.debug('Calculating disparity...', verbosity=2)
num_disparities = int(16 * self.settings['disparity_search_depth'])
block_size_setting = int(self.settings['disparity_block_size'])
block_size = min(max(5, odd(block_size_setting)), 255)
if block_size != block_size_setting:
self.settings['disparity_block_size'] = block_size
self.results.save_config('disparity_block_size')
stereo = cv.StereoBM().create(num_disparities, block_size)
disparities = []
for j, left_image in enumerate(self.images.input['left']):
for k, right_image in enumerate(self.images.input['right']):
left = left_image.preprocess()
right = right_image.preprocess()
result = stereo.compute(left, right)
multiple = len(self.images.input['left']) > 1
if multiple and self.imgs['multi_depth']:
tag = f'disparity_{j}_{k}'
self.images.output_init(result, tag, reduce=False)
self.images.output[tag].normalize()
self.images.output[tag].save(f'depth_map_bw_{j}_{k}')
disparities.append(result)
disparity_data = disparities[0]
for computed in disparities[1:]:
mask = disparity_data < self.settings['pixel_value_threshold']
disparity_data[mask] = computed[mask]
self.images.output_init(disparity_data, 'disparity_from_stereo')
def _from_flow(self):
self.log.debug('Calculating flow...')
flow = Angle(self.settings, self.log, self.images)
flow.calculate()
self.images.set_angle(flow.angle)
self.calculated_angle = flow.angle
disparity_from_flow = self.images.output['disparity_from_flow']
_soil_z_ff, details_ff = self.calculate_soil_z(
disparity_from_flow.data.reduced['stats']['mid'])
disparity_from_flow.data.report['calculations'] = details_ff
def calculate_disparity(self):
'Calculate and reduce disparity data.'
self._from_flow()
self._from_stereo()
output = self.images.output
output['raw_disparity'] = output.get('disparity_from_stereo')
if self.settings['use_flow']:
self.images.rotated = False
output['raw_disparity'] = output.get('disparity_from_flow')
if output['raw_disparity'] is None:
self.log.error('No algorithm chosen.')
disparity = self.images.filter_plants(output['raw_disparity'].image)
disparity[-1][-1] = self.settings['calibration_maximum']
self.images.output_init(disparity, 'disparity')
self._check_disparity()
def _check_disparity(self):
data = self.images.output['disparity'].data
if data.data.max() < 1:
msg = 'Zero disparity.'
self.save_debug_output()
self.log.error(msg)
percent_threshold = self.settings['disparity_percent_threshold']
if data.reduced['stats']['mid_size_p'] < percent_threshold:
msg = "Couldn't find surface."
self.save_debug_output()
self.log.error(msg)
def calculate(self):
'Calculate disparity, calibration factor, and soil height.'
self.check_images()
missing_measured_distance = self.settings['measured_distance'] == 0
missing_calibration_factor = self.settings['calibration_factor'] == 0
if missing_measured_distance and missing_calibration_factor:
self.log.error('Calibration measured distance input required.')
self.calculate_disparity()
self.disparity_debug_logs()
missing_disparity_offset = self.settings['calibration_disparity_offset'] == 0
if missing_disparity_offset:
self.set_disparity_offset()
elif missing_calibration_factor:
self.set_calibration_factor()
self.results.save_calibration()
details = {}
if not missing_disparity_offset:
disparity = self.images.output['disparity'].data.report
soil_z, details = self.calculate_soil_z(disparity['mid'])
if len(details['lines']) > 0:
self.log.debug('\n'.join(details['lines']))
disparity['calculations'] = details
low_soil_z, _ = self.calculate_soil_z(disparity['low'])
high_soil_z, _ = self.calculate_soil_z(disparity['high'])
soil_z_range_text = f'Soil z range: {low_soil_z} to {high_soil_z}'
self.log.debug(soil_z_range_text, verbosity=2)
disparity['calculations']['lines'].append(soil_z_range_text)
use_flow = self.settings['use_flow']
alt = 'disparity_from_stereo' if use_flow else 'disparity_from_flow'
disparity_alt = self.images.output.get(alt)
if disparity_alt is not None:
details_alt = disparity_alt.data.report.get('calculations')
if details_alt is not None:
soil_z_alt = details_alt['values']['calculated_soil_z']
msg = f'(alternate method would have calculated {soil_z_alt})'
self.log.debug(msg)
if missing_calibration_factor:
self.check_soil_z(details['values'])
self.results.save_soil_height(soil_z)
details['title'] = self.images.core.settings.title
details['method'] = 'flow' if self.settings['use_flow'] else 'stereo'
details['angle'] = self.calculated_angle
self.save_debug_output()
return details
def save_debug_output(self):
'Save debug output.'
self.images.save()
self.images.save_data()
self.results.save_report(self.images)
def check_soil_z(self, values):
'Verify soil z height is within expected range.'
calculated_soil_z = values['calculated_soil_z']
expected_soil_z = values['measured_soil_z']
if abs(calculated_soil_z - expected_soil_z) > 2:
error_message = 'Soil height calculation error: '
error_message += f'expected {expected_soil_z} got {calculated_soil_z}'
self.log.error(error_message)
def disparity_debug_logs(self):
'Send disparity debug logs.'
disparity = self.images.output['disparity'].data.report
value = disparity['mid']
coverage = disparity['coverage']
self.log.debug(disparity['report'])
self.log.debug(f'Average disparity: {value} {coverage}% coverage')
if coverage < self.settings['disparity_coverage_threshold']:
self.log.error('Not enough disparity information. Check images.')
def set_disparity_offset(self):
'Set disparity offset.'
self.log.debug('Saving disparity offset...')
disparity = self.images.output['disparity'].data
self.settings['calibration_disparity_offset'] = disparity.report['mid']
self.log.debug(f'z: {self.z_info}')
self.settings['calibration_measured_at_z'] = self.z_info['current']
img_size = shape(self.images.input['left'][0].image)
self.settings['calibration_image_width'] = img_size['width']
self.settings['calibration_image_height'] = img_size['height']
self.settings['calibration_maximum'] = int(disparity.data.max())
def set_calibration_factor(self):
'Set calibration_factor.'
self.log.debug('Calculating calibration factor...', verbosity=2)
disparity = self.images.output['disparity'].data.report['mid']
disparity_offset = self.settings['calibration_disparity_offset']
disparity_difference = disparity - disparity_offset
if disparity_difference == 0:
self.log.error('Zero disparity difference.')
if self.z_info['offset'] == 0:
self.log.debug(f'z: {self.z_info}')
self.log.error('Zero offset.')
factor = round(self.z_info['offset'] / disparity_difference, 4)
self.settings['calibration_factor'] = factor
| FarmBot/farmbot_os | priv/farmware/measure-soil-height/calculate.py | Python | mit | 12,513 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2007 Carabos Coop. V. All rights reserved
# Copyright (C) 2008-2017 Vicent Mas. All rights reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Vicent Mas - vmas@vitables.org
"""
This module manages the ``ViTables`` configuration.
The module provides methods for reading and writing settings. Whether the
settings are stored in a plain text file or in a Windows registry is
transparent for this module because it deals with settings via
`QtCore.QSettings`.
Every access to the config settings is done via a `QSettings` instance that,
in turn, will access the config file and return the read setting to the
application. Saving settings works in a similar way, the application passes
the setting to the `QSetting` instance and it (the instance) will write the
setting into the config file.
.. Note:: *About the config file location*.
If format is NativeFormat then the default search path will be:
- Unix
- UserScope
- ``$HOME/.config/MyCompany/ViTables-version.conf``
- ``$HOME/.config/MyCompany.conf``
- SystemScope
- ``/etc/xdg/MyCompany/ViTables-version.conf``
- ``/etc/xdg/MyCompany.conf``
- MacOSX
- UserScope
- ``$HOME/Library/Preferences/org.vitables.ViTables-version.plist``
- ``$HOME/Library/Preferences/org.vitables.plist``
- SystemScope
- ``/Library/Preferences/org.vitables.ViTables-version.plist``
- ``/Library/Preferences/org.vitables.plist``
- Windows
- UserScope
- ``HKEY_CURRENT_USER/Software/MyCompany/ViTables/version``
- ``HKEY_CURRENT_USER/Software/MyCompany/``
- SystemScope
- ``HKEY_LOCAL_MACHINE/Software/MyCompany/ViTables/version``
- ``HKEY_LOCAL_MACHINE/Software/MyCompany/``
If format is NativeFormat and platform is Unix the path can be set via
QSettings.setPath static method.
.. Note:: *About the config file name*.
If format is NativeFormat:
- under Unix, Product Name -> Product Name.conf so the product name
``ViTables`` will match a configuration file named ``ViTables.conf``
- under MacOSX, Internet Domain and Product Name ->
reversed Internet Domain.Product Name.plist so the domain
``vitables.org`` and the product ``ViTables`` become
``org.vitables.ViTables.plist``
"""
__docformat__ = 'restructuredtext'
__version__ = '3.0.0'
import logging
import sys
from vitables.preferences import cfgexception
import vitables.utils
from qtpy import QtCore
from qtpy import QtGui
from qtpy import QtWidgets
import vitables.vttables.datasheet as datasheet
translate = QtWidgets.QApplication.translate
def getVersion():
"""The application version."""
return __version__
log = logging.getLogger(__name__)
class Config(QtCore.QSettings):
"""
Manages the application configuration dynamically.
This class defines accessor methods that allow the application (a
:meth:`vitables.vtapp.VTApp` instance) to read the configuration in
file/registry/plist. The class also provides a method to save the current
configuration in the configuration file/registry/plist.
"""
def __init__(self):
"""
Setup the application configurator.
On Windows systems settings will be stored in the registry
under the HKCU\Software\ViTables\__version__ key
Mac OS X saves settings in a properties list stored in a
standard location, either on a global or user basis (see
docstring for more information).
In all platforms QSettings format is NativeFormat and scope
is UserScope.
"""
organization = QtWidgets.qApp.organizationName()
product = QtWidgets.qApp.applicationName()
version = QtWidgets.qApp.applicationVersion()
if sys.platform.startswith('win'):
path = 'HKEY_CURRENT_USER\\Software\\{0}\\{1}'
rpath = path.format(product, version)
super(Config, self).__init__(rpath, QtCore.QSettings.NativeFormat)
elif sys.platform.startswith('darwin'):
super(Config, self).__init__(product, version)
else:
arg1 = organization
arg2 = '-'.join((product, version))
super(Config, self).__init__(arg1, arg2)
# System-wide settings will not be searched as a fallback
# Setting the NativeFormat paths on MacOSX has no effect
self.setFallbacksEnabled(False)
# The application default style depends on the platform
styles = QtWidgets.QStyleFactory.keys()
self.default_style = styles[0]
self.vtapp = vitables.utils.getVTApp()
if not (self.vtapp is None):
style_name = self.vtapp.gui.style().objectName()
for item in styles:
if item.lower() == style_name:
self.default_style = item
break
def loggerPaper(self):
"""
Returns the logger background color.
"""
key = 'Logger/Paper'
default_value = QtGui.QColor("#ffffff")
setting_value = self.value(key)
if isinstance(setting_value, QtGui.QColor):
return setting_value
else:
return default_value
def loggerText(self):
"""
Returns the logger text color.
"""
key = 'Logger/Text'
default_value = QtGui.QColor("#000000")
setting_value = self.value(key)
if isinstance(setting_value, QtGui.QColor):
return setting_value
else:
return default_value
def loggerFont(self):
"""
Returns the logger font.
"""
key = 'Logger/Font'
default_value = QtWidgets.qApp.font()
setting_value = self.value(key)
if isinstance(setting_value, QtGui.QFont):
return setting_value
else:
return default_value
def workspaceBackground(self):
"""
Returns the workspace background color.
"""
key = 'Workspace/Background'
default_value = QtGui.QBrush(QtGui.QColor("#ffffff"))
setting_value = self.value(key)
if isinstance(setting_value, QtGui.QBrush):
return setting_value
else:
return default_value
def readStyle(self):
"""Returns the current application style."""
# The property key and its default value
key = 'Look/currentStyle'
default_value = self.default_style
# Read the entry from the configuration file/registry
setting_value = self.value(key)
# Check the entry format and value
styles = QtWidgets.QStyleFactory.keys()
if not isinstance(setting_value, str):
return default_value
elif setting_value not in styles:
return default_value
else:
return setting_value
def windowPosition(self):
"""
Returns the main window geometry setting.
"""
key = 'Geometry/Position'
default_value = None
setting_value = self.value(key)
if isinstance(setting_value, QtCore.QByteArray):
return setting_value
else:
return default_value
def windowLayout(self):
"""
Returns the main window layout setting.
This setting stores the position and size of toolbars and
dockwidgets.
"""
key = 'Geometry/Layout'
default_value = None
setting_value = self.value(key)
if isinstance(setting_value, QtCore.QByteArray):
return setting_value
else:
return default_value
def hsplitterPosition(self):
"""
Returns the horizontal splitter geometry setting.
"""
key = 'Geometry/HSplitter'
default_value = None
setting_value = self.value(key)
if isinstance(setting_value, QtCore.QByteArray):
return setting_value
else:
return default_value
def startupLastSession(self):
"""
Returns the `Restore last session` setting.
"""
key = 'Startup/restoreLastSession'
default_value = False
# Warning!
# If the application settings have not yet been saved
# in the registry then self.value(key) returns a Null
# QVariant (its type is None) and self.value(key, type=bool)
# raises an exception because None cannot be converted
# to a boolean value
try:
setting_value = self.value(key, type=bool)
except TypeError:
setting_value = default_value
if setting_value in (False, True):
return setting_value
else:
return default_value
# TODO: remove this setting
def startupWorkingDir(self):
"""
Returns the `Startup working directory` setting.
"""
key = 'Startup/startupWorkingDir'
default_value = 'home'
setting_value = self.value(key)
if isinstance(setting_value, str):
return setting_value
else:
return default_value
def lastWorkingDir(self):
"""
Returns the `Last working directory` setting.
"""
key = 'Startup/lastWorkingDir'
default_value = vitables.utils.getHomeDir()
setting_value = self.value(key)
if isinstance(setting_value, str):
return setting_value
else:
return default_value
def recentFiles(self):
"""
Returns the list of most recently opened files setting.
"""
key = 'Recent/Files'
default_value = []
setting_value = self.value(key)
if isinstance(setting_value, list):
return setting_value
else:
return default_value
def sessionFiles(self):
"""
Returns the list of files and nodes opened when the last session quit.
"""
key = 'Session/Files'
default_value = []
setting_value = self.value(key)
if isinstance(setting_value, list):
return setting_value
else:
return default_value
def helpHistory(self):
"""
Returns the navigation history of the docs browser.
"""
key = 'HelpBrowser/History'
default_value = []
setting_value = self.value(key)
if isinstance(setting_value, list):
return setting_value
else:
return default_value
def helpBookmarks(self):
"""
Returns the bookmarks of the docs browser.
"""
key = 'HelpBrowser/Bookmarks'
default_value = []
setting_value = self.value(key)
if isinstance(setting_value, list):
return setting_value
else:
return default_value
def enabledPlugins(self):
"""Returns the list of enabled plugins.
"""
key = 'Plugins/Enabled'
default_value = []
setting_value = self.value(key)
if isinstance(setting_value, list):
return setting_value
else:
return default_value
def writeValue(self, key, value):
"""
Write an entry to the configuration file.
:Parameters:
- `key`: the name of the property we want to set.
- `value`: the value we want to assign to the property
"""
try:
self.setValue(key, value)
if self.status():
raise cfgexception.ConfigFileIOException(
'{0}={1}'.format(key, value))
except cfgexception.ConfigFileIOException as inst:
log.error(inst.error_message)
def readConfiguration(self):
"""
Get the application configuration currently stored on disk.
Read the configuration from the stored settings. If a setting
cannot be read (as it happens when the package is just
installed) then its default value is returned.
Geometry and Recent settings are returned as lists, color
settings as QColor instances. The rest of settings are returned
as strings or integers.
:Returns: a dictionary with the configuration stored on disk
"""
config = {}
config['Logger/Paper'] = self.loggerPaper()
config['Logger/Text'] = self.loggerText()
config['Logger/Font'] = self.loggerFont()
config['Workspace/Background'] = self.workspaceBackground()
config['Startup/restoreLastSession'] = self.startupLastSession()
config['Startup/startupWorkingDir'] = self.startupWorkingDir()
config['Startup/lastWorkingDir'] = self.lastWorkingDir()
config['Geometry/Position'] = self.windowPosition()
config['Geometry/Layout'] = self.windowLayout()
config['Geometry/HSplitter'] = self.hsplitterPosition()
config['Recent/Files'] = self.recentFiles()
config['Session/Files'] = self.sessionFiles()
config['HelpBrowser/History'] = self.helpHistory()
config['HelpBrowser/Bookmarks'] = self.helpBookmarks()
config['Look/currentStyle'] = self.readStyle()
config['Plugins/Enabled'] = self.enabledPlugins()
return config
def saveConfiguration(self):
"""
Store current application settings on disk.
Note that we are using ``QSettings`` for writing to the config file,
so we **must** rely on its searching algorithms in order to find
that file.
"""
vtgui = self.vtapp.gui
# Logger paper
style_sheet = vtgui.logger.styleSheet()
paper = style_sheet[-7:]
self.writeValue('Logger/Paper', QtGui.QColor(paper))
# Logger text color
self.writeValue('Logger/Text', vtgui.logger.textColor())
# Logger text font
self.writeValue('Logger/Font', vtgui.logger.font())
# Workspace
self.writeValue('Workspace/Background', vtgui.workspace.background())
# Style
self.writeValue('Look/currentStyle', self.current_style)
# Startup working directory
self.writeValue('Startup/startupWorkingDir',
self.startup_working_directory)
# Startup restore last session
self.writeValue('Startup/restoreLastSession',
self.restore_last_session)
# Startup last working directory
self.writeValue('Startup/lastWorkingDir', self.last_working_directory)
# Window geometry
self.writeValue('Geometry/Position', vtgui.saveGeometry())
# Window layout
self.writeValue('Geometry/Layout', vtgui.saveState())
# Horizontal splitter geometry
self.writeValue('Geometry/HSplitter', vtgui.hsplitter.saveState())
# The list of recent files
self.writeValue('Recent/Files', self.recent_files)
# The list of session files and nodes
self.session_files_nodes = self.getSessionFilesNodes()
self.writeValue('Session/Files', self.session_files_nodes)
# The Help Browser history
self.writeValue('HelpBrowser/History', self.hb_history)
# The Help Browser bookmarks
self.writeValue('HelpBrowser/Bookmarks', self.hb_bookmarks)
# The list of enabled plugins
self.writeValue('Plugins/Enabled',
self.vtapp.plugins_mgr.enabled_plugins)
self.sync()
def getSessionFilesNodes(self):
"""
The list of files and nodes currently open.
The list looks like::
['mode#@#filepath1#@#nodepath1#@#nodepath2, ...',
'mode#@#filepath2#@#nodepath1#@#nodepath2, ...', ...]
"""
# Get the list of views
workspace = self.vtapp.gui.workspace
node_views = [window for window in workspace.subWindowList()
if isinstance(window, datasheet.DataSheet)]
# Get the list of open files (temporary database is not included)
dbt_model = self.vtapp.gui.dbs_tree_model
session_files_nodes = []
filepaths = dbt_model.getDBList()
for path in filepaths:
mode = dbt_model.getDBDoc(path).mode
# If a new file has been created during the current session
# then write mode must be replaced by append mode or the file
# will be created from scratch in the next ViTables session
if mode == 'w':
mode = 'a'
item_path = mode + '#@#' + path
for view in node_views:
if view.dbt_leaf.filepath == path:
item_path = item_path + '#@#' + view.dbt_leaf.nodepath
session_files_nodes.append(item_path)
# Format the list in a handy way to store it on disk
return session_files_nodes
def loadConfiguration(self, config):
"""
Configure the application with the given settings.
We call `user settings` to those settings that can be setup via
Settings dialog and `internal settings` to the rest of settings.
At startup all settings will be loaded. At any time later the
`users settings` can be explicitly changed via Settings dialog.
:Parameter config: a dictionary with the settings to be (re)loaded
"""
# Load the user settings
self.userSettings(config)
# Load the internal settings (if any)
gui = self.vtapp.gui
try:
key = 'Geometry/Position'
value = config[key]
if isinstance(value, QtCore.QByteArray):
# Default position is provided by the underlying window manager
gui.restoreGeometry(value)
key = 'Geometry/Layout'
value = config[key]
if isinstance(value, QtCore.QByteArray):
# Default layout is provided by the underlying Qt installation
gui.restoreState(value)
key = 'Geometry/HSplitter'
value = config[key]
if isinstance(value, QtCore.QByteArray):
# Default geometry provided by the underlying Qt installation
gui.hsplitter.restoreState(value)
key = 'Startup/lastWorkingDir'
self.last_working_directory = config[key]
key = 'Recent/Files'
self.recent_files = config[key]
key = 'Session/Files'
self.session_files_nodes = config[key]
key = 'HelpBrowser/History'
self.hb_history = config[key]
key = 'HelpBrowser/Bookmarks'
self.hb_bookmarks = config[key]
except KeyError:
pass
def userSettings(self, config):
"""Load settings that can be setup via Settings dialog.
:Parameter config: a dictionary with the settings to be (re)loaded
"""
# Usually after calling the Settings dialog only some user
# settings will need to be reloaded. So for every user setting
# we have to check if it needs to be reloaded or not
key = 'Startup/restoreLastSession'
if key in config:
self.restore_last_session = config[key]
key = 'Startup/startupWorkingDir'
if key in config:
self.startup_working_directory = config[key]
key = 'Logger/Paper'
logger = self.vtapp.gui.logger
if key in config:
value = config[key]
paper = value.name()
stylesheet = logger.styleSheet()
old_paper = stylesheet[-7:]
new_stylesheet = stylesheet.replace(old_paper, paper)
logger.setStyleSheet(new_stylesheet)
key = 'Logger/Text'
if key in config:
logger.moveCursor(QtGui.QTextCursor.End)
logger.setTextColor(config[key])
key = 'Logger/Font'
if key in config:
logger.setFont(config[key])
key = 'Workspace/Background'
workspace = self.vtapp.gui.workspace
if key in config:
workspace.setBackground(config[key])
workspace.viewport().update()
key = 'Look/currentStyle'
if key in config:
self.current_style = config[key]
# Default style is provided by the underlying window manager
QtWidgets.qApp.setStyle(self.current_style)
key = 'Plugins/Enabled'
if key in config:
self.enabled_plugins = config[key]
| ankostis/ViTables | vitables/preferences/vtconfig.py | Python | gpl-3.0 | 21,206 |
import logging
from docker.utils import kwargs_from_env
from cattle import default_value, Config
log = logging.getLogger('docker')
_ENABLED = True
class DockerConfig:
def __init__(self):
pass
@staticmethod
def docker_enabled():
return default_value('DOCKER_ENABLED', 'true') == 'true'
@staticmethod
def docker_host_ip():
return default_value('DOCKER_HOST_IP', Config.agent_ip())
@staticmethod
def docker_home():
return default_value('DOCKER_HOME', '/var/lib/docker')
@staticmethod
def docker_uuid_file():
def_value = '{0}/.docker_uuid'.format(Config.state_dir())
return default_value('DOCKER_UUID_FILE', def_value)
@staticmethod
def docker_uuid():
return Config.get_uuid_from_file('DOCKER_UUID',
DockerConfig.docker_uuid_file())
@staticmethod
def url_base():
return default_value('DOCKER_URL_BASE', None)
@staticmethod
def api_version():
return default_value('DOCKER_API_VERSION', '1.18')
@staticmethod
def docker_required():
return default_value('DOCKER_REQUIRED', 'true') == 'true'
@staticmethod
def delegate_timeout():
return int(default_value('DOCKER_DELEGATE_TIMEOUT', '120'))
@staticmethod
def use_boot2docker_connection_env_vars():
use_b2d = default_value('DOCKER_USE_BOOT2DOCKER', 'false')
return use_b2d.lower() == 'true'
@staticmethod
def is_host_pidns():
return default_value('AGENT_PIDNS', 'container') == 'host'
def docker_client(version=None, base_url_override=None, tls_config=None):
if DockerConfig.use_boot2docker_connection_env_vars():
kwargs = kwargs_from_env(assert_hostname=False)
else:
kwargs = {'base_url': DockerConfig.url_base()}
if base_url_override:
kwargs['base_url'] = base_url_override
if tls_config:
kwargs['tls'] = tls_config
if version is None:
version = DockerConfig.api_version()
kwargs['version'] = version
log.debug('docker_client=%s', kwargs)
return Client(**kwargs)
def pull_image(image, progress):
_DOCKER_POOL.pull_image(image, progress)
def get_compute():
return _DOCKER_COMPUTE
try:
from docker import Client
except:
log.info('Disabling docker, docker-py not found')
_ENABLED = False
try:
if _ENABLED:
docker_client().info()
except Exception, e:
log.exception('Disabling docker, could not contact docker')
_ENABLED = False
if _ENABLED and DockerConfig.docker_enabled():
from .storage import DockerPool
from .compute import DockerCompute
from .delegate import DockerDelegate
from cattle import type_manager
_DOCKER_POOL = DockerPool()
_DOCKER_COMPUTE = DockerCompute()
_DOCKER_DELEGATE = DockerDelegate()
type_manager.register_type(type_manager.STORAGE_DRIVER, _DOCKER_POOL)
type_manager.register_type(type_manager.COMPUTE_DRIVER, _DOCKER_COMPUTE)
type_manager.register_type(type_manager.PRE_REQUEST_HANDLER,
_DOCKER_DELEGATE)
if not _ENABLED and DockerConfig.docker_required():
raise Exception('Failed to initialize Docker')
| sonchang/python-agent | cattle/plugins/docker/__init__.py | Python | apache-2.0 | 3,232 |
import os
import logging
from yaml import safe_load
from collections import namedtuple
log = logging.getLogger('kkross;conf_parser')
# -------------------------------------------------------------------------------------------------
def conf_parser(conf_file):
log.debug('Reading data from configuration file "%s"' % conf_file)
conf_data = safe_load(open(os.path.expanduser(conf_file)))
return namedtuple('Conf', conf_data.keys())(**conf_data)
| Doctor-love/kkross | playground/kkross/conf_parser.py | Python | gpl-2.0 | 460 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from trml2pdf import parseString, parseNode
| addition-it-solutions/project-all | openerp/report/render/rml2pdf/__init__.py | Python | agpl-3.0 | 1,032 |
#!/usr/bin/env python
# coding: utf-8
import re
def rm(target, pattern):
pattern = re.compile(pattern, re.UNICODE)
return re.sub(pattern, '', target)
def number_translator(target):
def num_han(base):
def get_num(s):
num = 0
if len(s) == 2:
num += __word2num__(s[0])*10*base + __word2num__(s[1])*base
return num
return get_num
target = __num__(target, u'[一二两三四五六七八九123456789]万[一二两三四五六七八九123456789](?!(千|百|十))', u'万', num_han(1000))
target = __num__(target, u'[一二两三四五六七八九123456789]千[一二两三四五六七八九123456789](?!(百|十))', u'千', num_han(100))
target = __num__(target, u'[一二两三四五六七八九123456789]百[一二两三四五六七八九123456789](?!十)', u'百', num_han(10))
target = __num0__(target, u'[零一二两三四五六七八九]')
target = __num0__(target, u'(?<=周)[天日]|(?<=星期)[天日]')
def num_digit(base):
def get_num(s):
num = 0
if len(s) == 0:
num += base
elif len(s) == 1:
coef = int(s[0])
if coef == 0:
num += base
else:
num += coef * base
elif len(s) == 2:
if s[0] == '':
num += base
else:
coef = int(s[0])
if coef == 0:
num += base
else:
num += coef * base
if s[1] != '':
num += int(s[1])
return num
return get_num
target = __num__(target, u'(?<!周)0?[0-9]?十[0-9]?|(?<!星期)0?[0-9]?十[0-9]?', u'十', num_digit(10))
target = __num__(target, u'0?[1-9]百[0-9]?[0-9]?', u'百', num_digit(100))
target = __num__(target, u'0?[1-9]千[0-9]?[0-9]?[0-9]?', u'千', num_digit(1000))
target = __num__(target, u'[0-9]+万[0-9]?[0-9]?[0-9]?[0-9]?', u'万', num_digit(10000))
return target
def __num__(target, regex, unit, get_num):
pattern = re.compile(regex, re.UNICODE)
res = u''
m = pattern.search(target)
while m:
group = m.group()
s = group.split(unit)
res += target[:m.start()] + str(get_num(s))
target = target[m.end():]
m = pattern.search(target)
res += target
return res
def __num0__(target, regex):
pattern = re.compile(regex, re.UNICODE)
res = u''
m = pattern.search(target)
while m:
res += target[:m.start()] + str(__word2num__(m.group()))
target = target[m.end():]
m = pattern.search(target)
res += target
return res
def __word2num__(s):
word_num_dict = {
u'零': 0, '0': 0, '': 0,
u'一': 1, '1': 1,
u'二': 2, u'两': 2, '2': 2,
u'三': 3, '3': 3,
u'四': 4, '4': 4,
u'五': 5, '5': 5,
u'六': 6, '6': 6,
u'七': 7, u'天': 7, u'日': 7, '7': 7,
u'八': 8, '8': 8,
u'九': 9, '9': 9,
}
return word_num_dict.get(s, -1)
if __name__ == '__main__':
t1 = u'一千三百五十二年以前'
t2 = u'两千零二年'
print number_translator(t1)
print number_translator(t2)
| hsinhuang/pytimeextract | time_expr/prehandler.py | Python | gpl-2.0 | 3,333 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import numpy as np
from pyspark import SparkContext
# $example on$
from pyspark.mllib.stat import Statistics
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="CorrelationsExample") # SparkContext
# $example on$
seriesX = sc.parallelize([1.0, 2.0, 3.0, 3.0, 5.0]) # a series
# seriesY must have the same number of partitions and cardinality as seriesX
seriesY = sc.parallelize([11.0, 22.0, 33.0, 33.0, 555.0])
# Compute the correlation using Pearson's method. Enter "spearman" for Spearman's method.
# If a method is not specified, Pearson's method will be used by default.
print("Correlation is: " + str(Statistics.corr(seriesX, seriesY, method="pearson")))
data = sc.parallelize(
[np.array([1.0, 10.0, 100.0]), np.array([2.0, 20.0, 200.0]), np.array([5.0, 33.0, 366.0])]
) # an RDD of Vectors
# calculate the correlation matrix using Pearson's method. Use "spearman" for Spearman's method.
# If a method is not specified, Pearson's method will be used by default.
print(Statistics.corr(data, method="pearson"))
# $example off$
sc.stop()
| lhfei/spark-in-action | spark-2.x/src/main/python/mllib/correlations_example.py | Python | apache-2.0 | 2,008 |
from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import config, ConfigSubsection, ConfigInteger, ConfigSlider, getConfigListEntry
config.plugins.OSDPositionSetup = ConfigSubsection()
config.plugins.OSDPositionSetup.dst_left = ConfigInteger(default = 0)
config.plugins.OSDPositionSetup.dst_width = ConfigInteger(default = 720)
config.plugins.OSDPositionSetup.dst_top = ConfigInteger(default = 0)
config.plugins.OSDPositionSetup.dst_height = ConfigInteger(default = 576)
def setPosition(dst_left, dst_width, dst_top, dst_height):
if dst_left + dst_width > 720:
dst_width = 720 - dst_left
if dst_top + dst_height > 576:
dst_height = 576 - dst_top
try:
open("/proc/stb/fb/dst_left", "w").write('%08x' % dst_left)
open("/proc/stb/fb/dst_width", "w").write('%08x' % dst_width)
open("/proc/stb/fb/dst_top", "w").write('%08x' % dst_top)
open("/proc/stb/fb/dst_height", "w").write('%08x' % dst_height)
except:
return
def setConfiguredPosition():
setPosition(int(config.plugins.OSDPositionSetup.dst_left.value), int(config.plugins.OSDPositionSetup.dst_width.value), int(config.plugins.OSDPositionSetup.dst_top.value), int(config.plugins.OSDPositionSetup.dst_height.value))
def main(session, **kwargs):
from overscanwizard import OverscanWizard
session.open(OverscanWizard, timeOut=False)
def startSetup(menuid):
return menuid == "system" and [(_("Overscan wizard"), main, "sd_position_setup", 0)] or []
def startup(reason, **kwargs):
setConfiguredPosition()
def Plugins(**kwargs):
from Plugins.Plugin import PluginDescriptor
return [PluginDescriptor(name = "Overscan wizard", description = "", where = PluginDescriptor.WHERE_SESSIONSTART, fnc = startup),
PluginDescriptor(name = "Overscan wizard", description = _("Wizard to arrange the overscan"), where = PluginDescriptor.WHERE_MENU, fnc = startSetup)]
| factorybuild/stbgui | lib/python/Plugins/SystemPlugins/OSDPositionSetup/plugin.py | Python | gpl-2.0 | 1,888 |
#!/usr/bin/env python3
###############################################################################
# Copyright 2019 The Apollo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
"""
This is a tool to extract useful information from given record files. It does
self-check the validity of the uploaded data and able to inform developer's when
the data is not qualified, and reduce the size of uploaded data significantly.
"""
from datetime import datetime
import os
import shutil
import six
import sys
import time
import yaml
from absl import app
from absl import flags
from google.protobuf import text_format
import numpy as np
from cyber.python.cyber_py3 import cyber
from cyber.python.cyber_py3.record import RecordReader
from cyber.proto import record_pb2
from modules.dreamview.proto import preprocess_table_pb2
from modules.tools.common.proto_utils import get_pb_from_text_file
from modules.tools.sensor_calibration.configuration_yaml_generator import ConfigYaml
from modules.tools.sensor_calibration.extract_static_data import get_subfolder_list, select_static_image_pcd
from modules.tools.sensor_calibration.proto import extractor_config_pb2
from modules.tools.sensor_calibration.sanity_check import sanity_check
from modules.tools.sensor_calibration.sensor_msg_extractor import GpsParser, ImageParser, PointCloudParser, PoseParser, ContiRadarParser
SMALL_TOPICS = [
'/apollo/canbus/chassis',
'/apollo/canbus/chassis_detail',
'/apollo/control',
'/apollo/control/pad',
'/apollo/drive_event',
'/apollo/guardian',
'/apollo/hmi/status',
'/apollo/localization/pose',
'/apollo/localization/msf_gnss',
'/apollo/localization/msf_lidar',
'/apollo/localization/msf_status',
'/apollo/monitor',
'/apollo/monitor/system_status',
'/apollo/navigation',
'/apollo/perception/obstacles',
'/apollo/perception/traffic_light',
'/apollo/planning',
'/apollo/prediction',
'/apollo/relative_map',
'/apollo/routing_request',
'/apollo/routing_response',
'/apollo/routing_response_history',
'/apollo/sensor/conti_radar',
'/apollo/sensor/delphi_esr',
'/apollo/sensor/gnss/best_pose',
'/apollo/sensor/gnss/corrected_imu',
'/apollo/sensor/gnss/gnss_status',
'/apollo/sensor/gnss/imu',
'/apollo/sensor/gnss/ins_stat',
'/apollo/sensor/gnss/odometry',
'/apollo/sensor/gnss/raw_data',
'/apollo/sensor/gnss/rtk_eph',
'/apollo/sensor/gnss/rtk_obs',
'/apollo/sensor/gnss/heading',
'/apollo/sensor/mobileye',
'/tf',
'/tf_static',
]
flags.DEFINE_string('config', '',
'protobuf text format configuration file abosolute path')
flags.DEFINE_string('root_dir', '/apollo/modules/tools/sensor_calibration',
'program root dir')
FLAGS = flags.FLAGS
class Extractor(object):
def __init__(self):
self.node = cyber.Node("sendor_calibration_preprocessor")
self.writer = self.node.create_writer("/apollo/dreamview/progress",
preprocess_table_pb2.Progress, 6)
self.config = extractor_config_pb2.DataExtractionConfig()
self.progress = preprocess_table_pb2.Progress()
self.progress.percentage = 0.0
self.progress.log_string = "Preprocessing in progress..."
self.progress.status = preprocess_table_pb2.Status.UNKNOWN
try:
get_pb_from_text_file(FLAGS.config, self.config)
except text_format.ParseError:
print(f'Error: Cannot parse {FLAGS.config} as text proto')
self.records = []
for r in self.config.records.record_path:
self.records.append(str(r))
self.start_timestamp = -1
self.end_timestamp = -1
if self.config.io_config.start_timestamp == "FLOAT_MIN":
self.start_timestamp = np.finfo(np.float32).min
else:
self.start_timestamp = np.float32(
self.config.io_config.start_timestamp)
if self.config.io_config.end_timestamp == "FLOAT_MAX":
self.end_timestamp = np.finfo(np.float32).max
else:
self.end_timestamp = np.float32(
self.config.io_config.end_timestamp)
@staticmethod
def process_dir(path, operation):
"""Create or remove directory."""
try:
if operation == 'create':
if os.path.exists(path):
print(f'folder: {path} exists')
else:
print(f'create folder: {path}')
os.makedirs(path)
elif operation == 'remove':
os.remove(path)
else:
print(
f'Error! Unsupported operation {operation} for directory.')
return False
except OSError as e:
print(f'Failed to {operation} directory: {path}. '
f'Error: {six.text_type(e)}')
return False
return True
@staticmethod
def get_sensor_channel_list(record_file):
"""Get the channel list of sensors for calibration."""
record_reader = RecordReader(record_file)
return set(channel_name
for channel_name in record_reader.get_channellist()
if 'sensor' in channel_name
or '/localization/pose' in channel_name)
@staticmethod
def validate_channel_list(channels, dictionary):
ret = True
for channel in channels:
if channel not in dictionary:
print(f'ERROR: channel {channel} does not exist in '
'record sensor channels')
ret = False
return ret
@staticmethod
def in_range(v, s, e):
return True if v >= s and v <= e else False
@staticmethod
def build_parser(channel, output_path):
parser = None
if channel.endswith("/image"):
parser = ImageParser(output_path=output_path, instance_saving=True)
elif channel.endswith("/PointCloud2"):
parser = PointCloudParser(output_path=output_path,
instance_saving=True)
elif channel.endswith("/gnss/odometry"):
parser = GpsParser(output_path=output_path, instance_saving=False)
elif channel.endswith("/localization/pose"):
parser = PoseParser(output_path=output_path, instance_saving=False)
elif channel.startswith("/apollo/sensor/radar"):
parser = ContiRadarParser(output_path=output_path,
instance_saving=True)
else:
raise ValueError(f"Not Support this channel type: {channel}")
return parser
def print_and_publish(self,
str,
status=preprocess_table_pb2.Status.UNKNOWN):
"""status: 0 for success, 1 for fail, 2 for unknown"""
print(str)
self.progress.log_string = str
self.progress.status = status
self.writer.write(self.progress)
time.sleep(0.5)
def extract_data(self, record_files, output_path, channels,
extraction_rates):
"""
Extract the desired channel messages if channel_list is specified.
Otherwise extract all sensor calibration messages according to
extraction rate, 10% by default.
"""
# all records have identical sensor channels.
sensor_channels = self.get_sensor_channel_list(record_files[0])
if (len(channels) > 0
and not self.validate_channel_list(channels, sensor_channels)):
print('The input channel list is invalid.')
return False
# Extract all the sensor channels if channel_list is empty(no input arguments).
print(sensor_channels)
if len(channels) == 0:
channels = sensor_channels
# Declare logging variables
process_channel_success_num = len(channels)
process_channel_failure_num = 0
process_msg_failure_num = 0
channel_success = {}
channel_occur_time = {}
channel_output_path = {}
# channel_messages = {}
channel_parsers = {}
channel_message_number = {}
channel_processed_msg_num = {}
for channel in channels:
channel_success[channel] = True
channel_occur_time[channel] = -1
topic_name = channel.replace('/', '_')
channel_output_path[channel] = os.path.join(
output_path, topic_name)
self.process_dir(channel_output_path[channel], operation='create')
channel_parsers[channel] =\
self.build_parser(channel, channel_output_path[channel])
channel_message_number[channel] = 0
for record_file in record_files:
record_reader = RecordReader(record_file)
channel_message_number[
channel] += record_reader.get_messagenumber(channel)
channel_message_number[channel] = channel_message_number[
channel] // extraction_rates[channel]
channel_message_number_total = 0
for num in channel_message_number.values():
channel_message_number_total += num
channel_processed_msg_num = 0
# if channel in SMALL_TOPICS:
# channel_messages[channel] = list()
for record_file in record_files:
record_reader = RecordReader(record_file)
for msg in record_reader.read_messages():
if msg.topic in channels:
# Only care about messages in certain time intervals
msg_timestamp_sec = msg.timestamp / 1e9
if not self.in_range(msg_timestamp_sec,
self.start_timestamp,
self.end_timestamp):
continue
channel_occur_time[msg.topic] += 1
# Extract the topic according to extraction_rate
if channel_occur_time[msg.topic] % extraction_rates[
msg.topic] != 0:
continue
ret = channel_parsers[msg.topic].parse_sensor_message(msg)
channel_processed_msg_num += 1
self.progress.percentage = channel_processed_msg_num / \
channel_message_number_total * 90.0
# Calculate parsing statistics
if not ret:
process_msg_failure_num += 1
if channel_success[msg.topic]:
channel_success[msg.topic] = False
process_channel_failure_num += 1
process_channel_success_num -= 1
log_string = (
'Failed to extract data from channel: '
f'{msg.topic} in record {record_file}')
print(log_string)
self.progress.log_string = log_string
self.writer.write(self.progress)
# traverse the dict, if any channel topic stored as a list
# then save the list as a summary file, mostly binary file
for channel, parser in channel_parsers.items():
self.save_combined_messages_info(parser, channel)
# Logging statics about channel extraction
self.print_and_publish(
(f"Extracted sensor channel number {len(channels)} "
f"from record files: {' '.join(record_files)}"))
self.print_and_publish(
(f'Successfully processed {process_channel_success_num} channels, '
f'and {process_channel_failure_num} was failed.'))
if process_msg_failure_num > 0:
self.print_and_publish(
f'Channel extraction failure number is {process_msg_failure_num}.',
preprocess_table_pb2.Status.FAIL)
return True
@staticmethod
def save_combined_messages_info(parser, channel):
if not parser.save_messages_to_file():
raise ValueError(
f"cannot save combined messages into single file for : {channel}"
)
if not parser.save_timestamps_to_file():
raise ValueError(f"cannot save tiemstamp info for {channel}")
@staticmethod
def generate_compressed_file(input_path,
input_name,
output_path,
compressed_file='sensor_data'):
"""
Compress data extraction directory as a single tar.gz archive
"""
cwd_path = os.getcwd()
os.chdir(input_path)
shutil.make_archive(base_name=os.path.join(output_path,
compressed_file),
format='gztar',
root_dir=input_path,
base_dir=input_name)
os.chdir(cwd_path)
@staticmethod
def generate_extraction_rate_dict(channels,
large_topic_extraction_rate,
small_topic_extraction_rate=1):
"""
Default extraction rate for small topics is 1, which means no sampling
"""
# Validate extration_rate, and set it as an integer.
if large_topic_extraction_rate < 1.0 or small_topic_extraction_rate < 1.0:
raise ValueError(
"Extraction rate must be a number no less than 1.")
large_topic_extraction_rate = np.floor(large_topic_extraction_rate)
small_topic_extraction_rate = np.floor(small_topic_extraction_rate)
rates = {}
for channel in channels:
if channel in SMALL_TOPICS:
rates[channel] = small_topic_extraction_rate
else:
rates[channel] = large_topic_extraction_rate
return rates
@staticmethod
def validate_record(record_file):
"""Validate the record file."""
# Check the validity of a cyber record file according to header info.
record_reader = RecordReader(record_file)
header_msg = record_reader.get_headerstring()
header = record_pb2.Header()
header.ParseFromString(header_msg)
print(f"header is {header}")
if not header.is_complete:
print(f'Record file: {record_file} is not completed.')
return False
if header.size == 0:
print(f'Record file: {record_file}. size is 0.')
return False
if header.major_version != 1 and header.minor_version != 0:
print(
f'Record file: {record_file}. version [{header.major_version}: '
f'{header.minor_version}] is wrong.')
return False
if header.begin_time >= header.end_time:
print(
f'Record file: {record_file}. begin time [{header.begin_time}] '
f'is equal or larger than end time [{header.end_time}].')
return False
if header.message_number < 1 or header.channel_number < 1:
print(
f'Record file: {record_file}. [message:channel] number '
f'[{header.message_number}:{header.channel_number}] is invalid.'
)
return False
# There should be at least one sensor channel
sensor_channels = Extractor.get_sensor_channel_list(record_file)
if len(sensor_channels) < 1:
print(f'Record file: {record_file}. cannot find sensor channels.')
return False
return True
def validate_record_files(self, kword='.record.'):
# load file list from directory if needs
file_abs_paths = []
if not isinstance(self.records, list):
raise ValueError("Record files must be in a list")
records = self.records
if len(records) == 1 and os.path.isdir(records[0]):
print(f'Load cyber records from: {records[0]}')
for f in sorted(os.listdir(records[0])):
if kword in f:
file_abs_path = os.path.join(records[0], f)
if Extractor.validate_record(file_abs_path):
file_abs_paths.append(file_abs_path)
else:
print(f'Invalid record file: {file_abs_path}')
else:
for f in records:
if not os.path.isfile(f):
raise ValueError("Input cyber record does not exist "
f"or not a regular file: {f}")
if Extractor.validate_record(f):
file_abs_paths.append(f)
else:
print(f'Invalid record file: {f}')
if len(file_abs_paths) < 1:
raise ValueError("All the input record files are invalid")
# Validate all record files have the same sensor topics
first_record_file = file_abs_paths[0]
default_sensor_channels = Extractor.get_sensor_channel_list(
first_record_file)
for i, f in enumerate(file_abs_paths[1:]):
sensor_channels = Extractor.get_sensor_channel_list(f)
if sensor_channels != default_sensor_channels:
print(
f'Default sensor channel list in {first_record_file} is: ')
print(default_sensor_channels)
print(f'but sensor channel list in {file_abs_paths[i]} is: ')
print(sensor_channels)
raise ValueError(
"The record files should contain the same channel list")
return file_abs_paths
def parse_channel_config(self):
channel_list = set()
extraction_rate_dict = dict()
for channel in self.config.channels.channel:
if channel.name in channel_list:
raise ValueError(
f"Duplicated channel config for : {channel.name}")
else:
channel_list.add(channel.name)
extraction_rate_dict[channel.name] = channel.extraction_rate
return channel_list, extraction_rate_dict
@staticmethod
def get_substring(str, prefix, suffix):
"""return substring, eclusive prefix or suffix"""
str_p = str.rfind(prefix) + len(prefix)
end_p = str.rfind(suffix)
return str[str_p:end_p]
def reorganize_extracted_data(self,
tmp_data_path,
remove_input_data_cache=False):
root_path = os.path.dirname(os.path.normpath(tmp_data_path))
output_path = None
config_yaml = ConfigYaml()
task_name = self.config.io_config.task_name
if task_name == 'lidar_to_gnss':
subfolders = [
x for x in get_subfolder_list(tmp_data_path)
if '_apollo_sensor_' in x or '_localization_pose' in x
]
odometry_subfolders = [
x for x in subfolders if '_odometry' in x or '_pose' in x
]
lidar_subfolders = [x for x in subfolders if '_PointCloud2' in x]
print(lidar_subfolders)
print(odometry_subfolders)
if len(lidar_subfolders) == 0 or len(odometry_subfolders) != 1:
raise ValueError(('one odometry and more than 0 lidar(s)'
'sensor are needed for sensor calibration'))
odometry_subfolder = odometry_subfolders[0]
yaml_list = []
gnss_name = 'novatel'
multi_lidar_out_path = os.path.join(
root_path, 'multi_lidar_to_gnss_calibration')
output_path = multi_lidar_out_path
for lidar in lidar_subfolders:
# get the lidar name from folder name string
lidar_name = Extractor.get_substring(str=lidar,
prefix='_apollo_sensor_',
suffix='_PointCloud2')
# reorganize folder structure: each lidar has its raw data,
# corresponding odometry and configuration yaml file
if not Extractor.process_dir(multi_lidar_out_path, 'create'):
raise ValueError(
f'Failed to create directory: {multi_lidar_out_path}')
lidar_in_path = os.path.join(tmp_data_path, lidar)
lidar_out_path = os.path.join(multi_lidar_out_path, lidar)
if not os.path.exists(lidar_out_path):
shutil.copytree(lidar_in_path, lidar_out_path)
odometry_in_path = os.path.join(tmp_data_path,
odometry_subfolder)
odometry_out_path = os.path.join(multi_lidar_out_path,
odometry_subfolder)
if not os.path.exists(odometry_out_path):
shutil.copytree(odometry_in_path, odometry_out_path)
generated_config_yaml = os.path.join(
tmp_data_path, lidar_name + '_' + 'sample_config.yaml')
config_yaml.generate_task_config_yaml(
task_name=task_name,
source_sensor=lidar_name,
dest_sensor=gnss_name,
source_folder=lidar,
dest_folder=odometry_subfolder,
out_config_file=generated_config_yaml)
print(f'lidar {lidar_name} calibration data and configuration'
' are generated.')
yaml_list.append(generated_config_yaml)
out_data = {
'calibration_task': task_name,
'destination_sensor': gnss_name,
'odometry_file': odometry_subfolder + '/odometry'
}
sensor_files_directory_list = []
source_sensor_list = []
transform_list = []
for i in range(len(yaml_list)):
with open(yaml_list[i], 'r') as f:
data = yaml.safe_load(f)
sensor_files_directory_list.append(
data['sensor_files_directory'])
source_sensor_list.append(data['source_sensor'])
transform_list.append(data['transform'])
out_data['sensor_files_directory'] = sensor_files_directory_list
out_data['source_sensor'] = source_sensor_list
out_data['transform'] = transform_list
out_data['main_sensor'] = source_sensor_list[0]
table = preprocess_table_pb2.PreprocessTable()
user_config = os.path.join(FLAGS.root_dir, 'config',
'lidar_to_gnss_user.config')
if os.path.exists(user_config):
try:
get_pb_from_text_file(user_config, table)
except text_format.ParseError:
print(f'Error: Cannot parse {user_config} as text proto')
if table.HasField("main_sensor"):
out_data['main_sensor'] = table.main_sensor
multi_lidar_yaml = os.path.join(multi_lidar_out_path,
'sample_config.yaml')
with open(multi_lidar_yaml, 'w') as f:
yaml.safe_dump(out_data, f)
elif task_name == 'camera_to_lidar':
# data selection.
pair_data_folder_name = 'camera-lidar-pairs'
cameras, lidar = select_static_image_pcd(
path=tmp_data_path,
min_distance=5,
stop_times=4,
wait_time=3,
check_range=50,
image_static_diff_threshold=0.005,
output_folder_name=pair_data_folder_name,
image_suffix='.jpg',
pcd_suffix='.pcd')
lidar_name = Extractor.get_substring(str=lidar,
prefix='_apollo_sensor_',
suffix='_PointCloud2')
for camera in cameras:
camera_name = Extractor.get_substring(str=camera,
prefix='_apollo_sensor_',
suffix='_image')
out_path = os.path.join(
root_path,
camera_name + '_to_' + lidar_name + '_calibration')
output_path = out_path
if not Extractor.process_dir(out_path, 'create'):
raise ValueError(f'Failed to create directory: {out_path}')
# reorganize folder structure: each camera has its images,
# corresponding lidar pointclouds, camera initial extrinsics,
# intrinsics, and configuration yaml file
in_pair_data_path = os.path.join(tmp_data_path, camera,
pair_data_folder_name)
out_pair_data_path = os.path.join(out_path,
pair_data_folder_name)
shutil.copytree(in_pair_data_path, out_pair_data_path)
generated_config_yaml = os.path.join(out_path,
'sample_config.yaml')
config_yaml.generate_task_config_yaml(
task_name=task_name,
source_sensor=camera_name,
dest_sensor=lidar_name,
source_folder=None,
dest_folder=None,
out_config_file=generated_config_yaml)
elif task_name == 'radar_to_gnss':
print('not ready. stay tuned')
else:
raise ValueError(
f'Unsupported data extraction task for {task_name}')
if remove_input_data_cache:
print(f'removing the cache at {tmp_data_path}')
os.system(f'rm -rf {tmp_data_path}')
return output_path
def sanity_check_path(self, path):
"""Sanity check wrapper"""
result, log_str = sanity_check(path)
if result is True:
self.progress.percentage = 100.0
self.progress.status = preprocess_table_pb2.Status.SUCCESS
else:
self.progress.status = preprocess_table_pb2.Status.FAIL
self.progress.log_string = log_str
self.writer.write(self.progress)
time.sleep(0.5)
def create_tmp_directory(self):
"""Create directory to save the extracted data use time now() as folder name"""
output_relative_path = self.config.io_config.task_name + datetime.now(
).strftime("-%Y-%m-%d-%H-%M") + '/tmp/'
output_abs_path = os.path.join(self.config.io_config.output_path,
output_relative_path)
ret = self.process_dir(output_abs_path, 'create')
if not ret:
raise ValueError(
f'Failed to create extrated data directory: {output_abs_path}')
return output_abs_path
def main(argv):
"""Main function"""
cyber.init("data_extractor")
extractor = Extractor()
valid_record_list = extractor.validate_record_files(kword='.record.')
channels, extraction_rates = extractor.parse_channel_config()
print(f'parsing the following channels: {channels}')
output_tmp_path = extractor.create_tmp_directory()
extractor.extract_data(valid_record_list, output_tmp_path, channels,
extraction_rates)
output_abs_path = extractor.reorganize_extracted_data(
tmp_data_path=output_tmp_path, remove_input_data_cache=True)
print('Data extraction is completed successfully!')
extractor.sanity_check_path(output_abs_path)
cyber.shutdown()
sys.exit(0)
if __name__ == '__main__':
# root_path = '/apollo/data/extracted_data/MKZ5-2019-05-15/lidar_to_gnss-2019-11-25-11-02/tmp'
# task_name = 'lidar_to_gnss'
# root_path = '/apollo/data/extracted_data/udevl002-2019-06-14/camera_to_lidar-2019-11-26-19-49/tmp'
# task_name = 'camera_to_lidar'
# reorganize_extracted_data(tmp_data_path=root_path, task_name=task_name)
app.run(main)
| ApolloAuto/apollo | modules/tools/sensor_calibration/extract_data.py | Python | apache-2.0 | 29,171 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Puttichai Lertkultanon <L.Puttichai@gmail.com>
#
# This file is part of pymanip.
#
# pymanip is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pymanip is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# pymanip. If not, see <http://www.gnu.org/licenses/>.
import openravepy as orpy
from Grasp import *
import numpy as np
import copy
from scipy.spatial import ConvexHull
import Utils
X = np.array([1., 0., 0.])
Y = np.array([0., 1., 0.])
Z = np.array([0., 0., 1.])
import pkg_resources
gripper_aabb = pkg_resources.resource_string\
('pymanip', 'utils/gripper_aabb.xml')
## assume that all parts are boxes
def PlacementPreprocess(manip_object, COM = None):
nparts = len(manip_object.GetLinks())
with manip_object:
manip_object.SetTransform(np.eye(4))
if COM is None:
## COM and total mass
COM = 0
M = 0
for l in manip_object.GetLinks():
m = l.GetMass()
M += m
COM += m*l.GetGlobalCOM()
COM /= M
## Relative transformation between the object's frame and the first
## link's frame (the object's frame)
## The origin of the object's frame is set to be at the COM.
## The rotation is set to be the same as the first link's frame.
TCOM = np.eye(4)
TCOM[0][3] = COM[0]
TCOM[1][3] = COM[1]
TCOM[2][3] = COM[2]
TCOMinv = np.linalg.inv(TCOM)
pointsets = []
extents = []
manip_object.SetTransform(TCOMinv)
for i in range(nparts):
l = manip_object.GetLinks()[i]
extent = l.GetGeometries()[0].GetBoxExtents()
extent = np.around(extent, decimals = 6)
extents.append(extent)
com = l.GetGlobalCOM()
[dx, dy, dz] = extent
X = l.GetTransform()[0:3, 0]
Y = l.GetTransform()[0:3, 1]
Z = l.GetTransform()[0:3, 2]
## extreme points of the part
pointset = [com + dx*X + dy*Y + dz*Z,
com + dx*X + dy*Y - dz*Z,
com + dx*X - dy*Y + dz*Z,
com + dx*X - dy*Y - dz*Z,
com - dx*X + dy*Y + dz*Z,
com - dx*X + dy*Y - dz*Z,
com - dx*X - dy*Y + dz*Z,
com - dx*X - dy*Y - dz*Z]
pointsets += pointset
hull = ConvexHull(pointsets, qhull_options = 'E0.001')
## temp contains equations describing surfaces.
## each equation is in the form av + b = 0
## however, there is redundancy in temp.
## we need to remove duplicate equations
temp = hull.equations.tolist()
E = []
for i in xrange(len(temp)):
similar = False
for e in E:
if np.allclose(temp[i], e):
similar = True
break
if not similar:
E.append(temp[i])
E = np.asarray(E)
E = np.around(E, decimals = 6)
nsurfaces = len(E)
## Av + b <= 0 for v in the polyhedron
A = np.array(E[0:nsurfaces, 0:3])
b = np.array(E[0:nsurfaces, 3])
S = []
S.append(TCOM)
## S[i] is the relative transformation between the surface's
## coordinate and the object's cooredinate
## except S[0] which is TCOM
## NOTE: S contains only the surfaces that can be contact surfaces,
## i.e., the surfaces that p lies inside.
for i in range(nsurfaces):
z = np.array(A[i]) ## surface normal
d = -np.array(b[i]) ## surface offset
p = d*z ## surface's frame origin
if (not np.all(np.around(np.dot(A, p) + b, decimals = 6) <= 0)):
## p lies outside the polygon
# print "surface {0} is an invalid contact surface".format(i)
continue
## x-axis
x = PerpendicularTo(z)
## y-axis
y = np.cross(z, x)
## each column of R is the axis described in the object's frame
R = np.vstack((x, y, z)).T
# p = np.dot(TCOM, np.append(p, 1))[0:3]
p = np.reshape(p, (3, 1))
T = np.vstack((np.hstack((R, p)), np.array([0., 0., 0., 1.])))
S.append(T)
return S
def PerpendicularTo(v):
""" Finds an arbitrary perpendicular vector to *v*."""
# for two vectors (x, y, z) and (a, b, c) to be perpendicular,
# the following equation has to be fulfilled
# 0 = ax + by + cz
if (not (len(v) == 3)):
raise ValueError('dimension not compatible')
# x = y = z = 0 is not an acceptable solution
if v[0] == v[1] == v[2] == 0:
raise ValueError('zero-vector')
# If one dimension is zero, this can be solved by setting that to
# non-zero and the others to zero. Example: (4, 2, 0) lies in the
# x-y-Plane, so (0, 0, 1) is orthogonal to the plane.
if v[0] == 0:
return np.array([1., 0., 0.])
if v[1] == 0:
return np.array([0., 1., 0.])
if v[2] == 0:
return np.array([0., 0., 1.])
# arbitrarily set a = b = 1
# then the equation simplifies to
# c = -(x + y)/z
c = -(v[0] + v[1])/float(v[2])
d = 1./np.sqrt(2 + abs(c)**2)
return np.array([d, d, d*c])
class BoxInfo(object):
DMAX = 0.0425 ## max width that the gripper can grip
GRIPPEROFFSET = 0.08
L = 0.35 #0.320592 (gripper length)
def __init__(self, manip_object, linkindex):
self.objectname = manip_object.GetName()
self.linkindex = linkindex
self.env = orpy.Environment()
Clone_Bodies = 1
self.env.Clone(manip_object.GetEnv(), Clone_Bodies)
self.collisionchecker = orpy.RaveCreateCollisionChecker(self.env, 'ode')
self.env.SetCollisionChecker(self.collisionchecker)
## remove all other body
for kinbody in self.env.GetBodies():
if not (kinbody.GetName() == self.objectname):
self.env.Remove(kinbody)
assert(len(self.env.GetBodies()) == 1)
self.object = self.env.GetKinBody(self.objectname)
self.link = self.object.GetLinks()[self.linkindex]
## load the gripper
self.gripper = self.env.ReadKinBodyXMLData(gripper_aabb)
self.env.Add(self.gripper)
## create a floor for testing
floor = orpy.RaveCreateKinBody(self.env, '')
floor.InitFromBoxes(np.array([[0.0, 0.0, 0.0, 10.0, 10.0, 0.05]]))
for geom in floor.GetLinks()[0].GetGeometries():
geom.SetDiffuseColor(np.array([0.6, 0.6, 0.6]))
floor.SetName('floor')
self.env.Add(floor)
Tfloor = np.eye(4)
Tfloor[2][3] -= 0.050001
floor.SetTransform(Tfloor)
self.extents = self.link.GetGeometries()[0].GetBoxExtents()
self.possibleapproachingdir = dict() # each entry depends on a contact surface
self.possibleslidingdir = dict() # each entry depends on an approaching dir
self.intervals = dict()
def GetPossibleSlidingDirections(self):
"""
GetPossibleSlidingDirections returns a set containing possible
sliding direction of the gripper for each case of approaching
directions.
return value: lib
lib[approachingdir] is a set of possible sliding direction given
the 'approachingdir'.
"""
objx = self.extents[0]
objy = self.extents[1]
objz = self.extents[2]
## approaching direction is +X (and -X)
temp = []
if (objz < self.DMAX):
temp.append(pY)
if (objy < self.DMAX):
temp.append(pZ)
elif (objy < self.DMAX):
temp.append(pZ)
self.possibleslidingdir[pX] = temp
self.possibleslidingdir[mX] = temp
## approaching direction is +Y (and -Y)
temp = []
if (objz < self.DMAX):
temp.append(pX)
if (objx < self.DMAX):
temp.append(pZ)
elif (objx < self.DMAX):
temp.append(pZ)
self.possibleslidingdir[pY] = temp
self.possibleslidingdir[mY] = temp
## approaching direction is +Z (and -Z)
temp = []
if (objy < self.DMAX):
temp.append(pX)
if (objx < self.DMAX):
temp.append(pY)
elif (objx < self.DMAX):
temp.append(pY)
self.possibleslidingdir[pZ] = temp
self.possibleslidingdir[mZ] = temp
def Preprocess(self, transformationset):
"""
Preprocess examines valid approaching directions for each
object's transformation. Also for each pair of approaching
direction and sliding direction Preprocess examines the
sliding range.
transformationset contains every object's transformation T
that results in stable configurations.
"""
nsurfaces = len(transformationset)
for isurface in xrange(nsurfaces):
self.object.SetTransform(transformationset[isurface])
plink = self.link.GetGlobalCOM()
Tlink = self.link.GetTransform()
xvect = np.reshape(copy.copy(Tlink[0:3, pX]), (3, ))
yvect = np.reshape(copy.copy(Tlink[0:3, pY]), (3, ))
zvect = np.reshape(copy.copy(Tlink[0:3, pZ]), (3, ))
## for each object's contact surface check all six
## surfaces of the box
self.possibleapproachingdir[isurface] = []
for appdir in [pX, pY, pZ, mX, mY, mZ]:
## for each approachingdirection
appvector = np.reshape(copy.copy(Tlink[0:3, np.mod(appdir, 3)]), (3, ))
# approaching vector
if (appdir > 2):
appvector *= -1
aZ = np.dot(appvector, Z)
########## CHECK I
## check if the approached surface is in contact
if np.allclose(aZ, 1):
## the approaching direction is aligned with Z
if (plink[2] - self.extents[np.mod(appdir, 3)] < self.L):
## this approacing direction is invalid.
## continue to the next direction.
continue
########## CHECK II
## check if the approached surface is perpendicular to the floor
elif np.allclose(aZ, 0):
ax = np.dot(appvector, xvect)
if (np.allclose(ax, 1) or np.allclose(-ax, 1)):
## approaching direction is x or -x
yZ = np.dot(yvect, Z)
zZ = np.dot(zvect, Z)
if (np.allclose(yZ, 1) or np.allclose(-yZ, 1)):
## the local y is parallel to Z
if (plink[2] + self.extents[1] < self.GRIPPEROFFSET):
## this approacing direction is invalid.
## continue to the next direction.
continue
elif (np.allclose(zZ, 1) or np.allclose(-zZ, 1)):
## the local z is parallel to Z
if (plink[2] + self.extents[2] < self.GRIPPEROFFSET):
## this approacing direction is invalid.
## continue to the next direction.
continue
ay = np.dot(appvector, yvect)
if (np.allclose(ay, 1) or np.allclose(-ay, 1)):
## approaching direction is y or -y
xZ = np.dot(xvect, Z)
zZ = np.dot(zvect, Z)
if (np.allclose(xZ, 1) or np.allclose(-xZ, 1)):
## the local y is parallel to Z
if (plink[2] + self.extents[0] < self.GRIPPEROFFSET):
## this approacing direction is invalid.
## continue to the next direction.
continue
elif (np.allclose(zZ, 1) or np.allclose(-zZ, 1)):
## the local z is parallel to Z
if (plink[2] + self.extents[2] < self.GRIPPEROFFSET):
## this approacing direction is invalid.
## continue to the next direction.
continue
az = np.dot(appvector, zvect)
if (np.allclose(az, 1) or np.allclose(-az, 1)):
## approaching direction is x or -x
xZ = np.dot(xvect, Z)
yZ = np.dot(yvect, Z)
if (np.allclose(xZ, 1) or np.allclose(-xZ, 1)):
## the local x is parallel to Z
if (plink[2] + self.extents[0] < self.GRIPPEROFFSET):
## this approacing direction is invalid.
## continue to the next direction.
continue
elif (np.allclose(yZ, 1) or np.allclose(-yZ, 1)):
## the local y is parallel to Z
if (plink[2] + self.extents[1] < self.GRIPPEROFFSET):
## this approacing direction is invalid.
## continue to the next direction.
continue
########## CHECK III
elif (aZ > 0):
normalvector = -1.0*appvector # normal vector to the surface
k1 = PerpendicularTo(normalvector)
k2 = np.cross(normalvector, k1)
## normal vector is pointing to the floor
theta = np.pi/2 - np.arccos(aZ)
tantheta = np.tan(theta)
if (np.allclose(np.dot(k1, Z), 0)):
## k1 is parallel to the floor
k2x = np.dot(k2, xvect)
if (np.allclose(k2x, 1) or np.allclose(-k2x, 1)):
## k2 is parallel to the local x
D = 2*self.extents[0]/tantheta
if D < self.L:
continue
k2y = np.dot(k2, yvect)
if (np.allclose(k2y, 1) or np.allclose(-k2y, 1)):
## k2 is parallel to the local y
D = 2*self.extents[1]/tantheta
if D < self.L:
continue
k2z = np.dot(k2, zvect)
if (np.allclose(k2z, 1) or np.allclose(-k2z, 1)):
## k2 is parallel to the local z
D = 2*self.extents[2]/tantheta
if D < self.L:
continue
elif (np.allclose(np.dot(k2, Z), 0)):
## k1 is parallel to the floor
k1x = np.dot(k1, xvect)
if (np.allclose(k1x, 1) or np.allclose(-k1x, 1)):
## k1 is parallel to the local x
D = 2*self.extents[0]/tantheta
if D < self.L:
continue
k1y = np.dot(k1, yvect)
if (np.allclose(k1y, 1) or np.allclose(-k1y, 1)):
## k1 is parallel to the local y
D = 2*self.extents[1]/tantheta
if D < self.L:
continue
k1z = np.dot(k1, zvect)
if (np.allclose(k1z, 1) or np.allclose(-k1z, 1)):
## k1 is parallel to the local z
D = 2*self.extents[2]/tantheta
if D < self.L:
continue
## this approaching direction passes the three tests
## it is now a candidate for obtaining sliding ranges
"""
interval = [interval_1, interval_2, ..., interval_n]
interval_i = (start, length)
"""
possibleslidingdir = self.possibleslidingdir[appdir]
validapproachingdir = False
if len(possibleslidingdir) == 2:
slidingdir1 = possibleslidingdir[0]
interval1 = self.ObtainSlidingRanges(Tlink, appdir,
slidingdir1, case = 2)
if len(interval1) > 0:
self.intervals[isurface, appdir, slidingdir1] = interval1
validapproachingdir = True
slidingdir2 = possibleslidingdir[1]
interval2 = self.ObtainSlidingRanges(Tlink, appdir,
slidingdir2, case = 2)
if len(interval2) > 0:
self.intervals[isurface, appdir, slidingdir2] = interval2
validapproachingdir = True
elif len(possibleslidingdir) == 1:
slidingdir = possibleslidingdir[0]
interval = self.ObtainSlidingRanges(Tlink, appdir, slidingdir)
if len(interval) > 0:
self.intervals[isurface, appdir, slidingdir] = interval
validapproachingdir = True
if not validapproachingdir:
continue
self.possibleapproachingdir[isurface].append(appdir)
def ObtainSlidingRanges(self, Tobj, approachingdir, slidingdir, case = 1):
## assume that the object is already in place
d = self.extents[np.mod(slidingdir, 3)]
step = 0.04 ## each step along the sliding direction is 4 cm.
Tstep = np.eye(4)
qgrasp = [approachingdir, slidingdir, 0.0]
## Tgripper places the gripper at the middle of the object
Tgripper = Utils.ComputeTGripper(Tobj, qgrasp, self.extents)
interval = []
if case == 2:
self.gripper.SetTransform(Tgripper)
if not (self.env.CheckCollision(self.gripper)):
interval = [(0, 0)]
elif case == 1:
domain = np.linspace(-d, d, int(2*d/step) + 1)
Tstep[0][3] = domain[0]
self.gripper.SetTransform(np.dot(Tgripper, Tstep))
if (self.env.CheckCollision(self.gripper)):
prev_in_collision = True
else:
prev_in_collision = False
prevstart = domain[0]
for i in xrange(1, len(domain)):
Tstep[0][3] = domain[i]
self.gripper.SetTransform(np.dot(Tgripper, Tstep))
if (self.env.CheckCollision(self.gripper)):
if prev_in_collision:
prevstart = domain[i]
else:
if np.allclose(domain[i - 1], prevstart):
## this interavl contains only one number
## but we relax it by assinging length =
## 0.02 (half a step) so that when we
## sample this interval later, the prob of
## obtaining a number here is not zero
interval.append((prevstart, 0.02))
else:
interval.append((prevstart, domain[i - 1] - prevstart))
prevstart = domain[i]
prev_in_collision = True
else:
if (prev_in_collision):
prev_in_collision = False
prevstart = domain[i]
if not np.allclose(domain[-1], prevstart):
interval.append((prevstart, domain[-1] - prevstart))
return np.around(interval, decimals = 6).tolist()
| Puttichai/pymanip | pymanip/utils/ObjectPreprocessing.py | Python | gpl-3.0 | 21,276 |
"""
Module for the `assistant` service.
This is a standard Django-Celery integration setup.
See `make run-assistant` for running during development with the
necessary environment variables.
See `assistant.Dockerfile` for running in production.
See
- https://docs.celeryproject.org/en/latest/django/first-steps-with-django.html
- https://django-configurations.readthedocs.io/en/stable/cookbook/#celery
"""
import os
from celery import Celery
# Set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "manager.settings")
os.environ.setdefault("DJANGO_CONFIGURATION", "Prod")
import configurations # noqa
configurations.setup()
from django.conf import settings # noqa
app = Celery("manager", broker=settings.BROKER_URL, backend=settings.CACHE_URL)
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# Namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load `tasks.py` modules from all registered Django app configs.
app.autodiscover_tasks()
| stencila/hub | manager/manager/assistant.py | Python | apache-2.0 | 1,186 |
from flask import render_template
from flask import url_for
from flask import redirect
from flask import request
from datetime import datetime
from flask_login import current_user, login_required
from classes.operations.person_operations import person_operations
from classes.operations.project_operations import project_operations
from classes.operations.followed_person_operations import followed_person_operations
from classes.operations.personComment_operations import personComment_operations
from classes.look_up_tables import *
from classes.person import Person
from classes.operations.followed_project_operations import followed_project_operations
from classes.followed_project import FollowedProject
from classes.operations.team_operations import team_operations
from classes.operations.education_operations import education_operations
from classes.operations.skill_operations import skill_operations
from classes.operations.Experience_operations import experience_operations
from classes.operations.information_operations import information_operations
from classes.operations.language_operations import language_operations
from classes.operations.CV_operations import cv_operations
import os
from werkzeug.utils import secure_filename
from passlib.apps import custom_app_context as pwd_context
from templates_operations.user import*
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
def personal_default_page_config(request):
PersonProvider = person_operations()
Current_Person = PersonProvider.GetPerson(current_user.email)
comments = personComment_operations()
store_followed_projects = followed_project_operations()
EducationProvider = education_operations()
SkillProvider = skill_operations()
InformationProvider = information_operations()
LanguageProvider = language_operations()
TeamProvider = team_operations()
if request and 'delete' in request.form and request.method == 'POST':
p = PersonProvider.GetPersonByObjectId(request.form['delete'])
PersonProvider.DeletePerson(request.form['delete'])
if request and 'deleteComment' in request.form and request.method == 'POST':
comments.DeleteTeam(request.form['deleteComment'])
elif request and 'updateComment' in request.form and request.method == 'POST':
selectedComment = request.form['updateId']
updatedComment = request.form['updateComment']
comments.UpdatePersonComment(selectedComment, updatedComment)
elif request and 'addComment' in request.form and request.method == 'POST':
personId = Current_Person[0]
commentedPersonId = Current_Person[0]
newComment = request.form['addComment']
comments.AddPersonComment(personId, commentedPersonId, newComment)
elif 'unfollowProject' in request.form:
project_id = request.form['unfollowProject']
store_followed_projects.DeleteFollowedProject(project_id)
elif request and 'searchPeoplePage' in request.form and request.method == 'POST':
return redirect(url_for('site.people_search_person_page'))
elif request and 'searchProjectPage' in request.form and request.method == 'POST':
return redirect(url_for('site.projects_search_page'))
elif request and 'saveProfileSettings' in request.form and request.method == 'POST':
FollowedPersonProvider = followed_person_operations()
listFollowing = FollowedPersonProvider.GetFollowedPersonListByPersonId(Current_Person[0])
listFollowers = FollowedPersonProvider.GetFollowedPersonListByFollowedPersonId(Current_Person[0])
personComments = comments.GetPersonCommentsByCommentedPersonId(Current_Person[0])
listTitle = GetTitleList()
listAccount = GetAccountTypeList()
first_name = request.form['firstName']
last_name = request.form['lastName']
pswd = request.form['pswd']
accountType = request.form['account']
title = request.form['title']
file = request.files['file']
gender = request.form['r1']
if gender == 'male':
gender = False
elif gender == 'female':
gender = True
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
if filename != Current_Person[7]:
file.save(os.path.join('static/user_images', filename))
else:
filename = Current_Person[7]
elif Current_Person[7] is None:
if gender:
filename = 'noimage_female.jpg'
else:
filename = 'noimage_male.jpg'
else:
filename = Current_Person[7]
if pswd != "":
pswd = pwd_context.encrypt(request.form['pswd'])
UpdateUser(pswd, current_user.email)
PersonProvider.UpdatePerson(Current_Person[0], first_name, last_name, accountType, ' ', gender, title, filename, False)
return redirect(url_for('site.personal_default_page', Current_Person=Current_Person,
listFollowing=listFollowing, listFollowers=listFollowers,
personComments=personComments, listAccount=listAccount, listTitle=listTitle))
FollowedPersonProvider = followed_person_operations()
listFollowing = FollowedPersonProvider.GetFollowedPersonListByPersonId(Current_Person[0])
listFollowers = FollowedPersonProvider.GetFollowedPersonListByFollowedPersonId(Current_Person[0])
personComments = comments.GetPersonCommentsByCommentedPersonId(Current_Person[0])
followed_projects = store_followed_projects.GetFollowedProjectListByPersonId(Current_Person[0])
count = 0
while (count < len(followed_projects)):
temp = list(followed_projects[count])
temp.append(list(TeamProvider.GetAllMembersByProjectId(followed_projects[count][8])))
temp.append(len(store_followed_projects.GetFollowerPersonListByFollowedProjectId(followed_projects[count][8])))
followed_projects[count] = tuple(temp)
count = count + 1
now = datetime.datetime.now()
listTitle = GetTitleList()
listAccount = GetAccountTypeList()
store_projects = project_operations()
active_projects = store_projects.get_the_projects_of_a_person(Current_Person[0])
count = 0
while (count < len(active_projects)):
temp = list(active_projects[count])
temp.append(list(TeamProvider.GetAllMembersByProjectId(active_projects[count][3])))
temp.append(len(store_followed_projects.GetFollowerPersonListByFollowedProjectId(active_projects[count][3])))
active_projects[count] = tuple(temp)
count = count + 1
active_project_number = len(active_projects)
listEducation = EducationProvider.GetEducationListByActiveCVAndByPersonId(Current_Person[0])
listSkill = SkillProvider.GetSkillByActiveCVAndByPersonId(Current_Person[0])
listLanguage = LanguageProvider.GetAllLanguagesByActiveCVAndByPersonId(Current_Person[0])
listInformation = InformationProvider.get_all_information_by_ActiveCV_And_PersonId(Current_Person[0])
CvProvider = cv_operations()
activeCv = CvProvider.get_active_cv(Current_Person[0])
ExperienceProvider=experience_operations()
if activeCv:
listExperience = ExperienceProvider.get_experiences_with_key(activeCv[0])
else:
listExperience = 'none'
return render_template('personal/default.html', current_time=now.ctime(), Current_Person=Current_Person,
listFollowing=listFollowing, listFollowers=listFollowers, followed_projects=followed_projects,
personComments=personComments, listAccount=listAccount, listTitle=listTitle,
active_projects=active_projects, active_project_number=active_project_number,listEducation=listEducation, listSkill=listSkill,
listExperience=listExperience, listLanguage=listLanguage, listInformation=listInformation)
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
# if submit_type == 'GET':
# store = followed_person_operations()
# result = store.GetFollowedPersonByObjectId(2)
# result = store.GetFollowedPersonList()
# p = FollowedPerson(None, 1, 2, datetime.now())
# store.AddFollowedPerson(p)
# now = datetime.now()
# return render_template('personal/default.html', current_time=now.ctime(), FollowedPersonList=result)
# from classes.operations.followed_person_operations import followed_person_operations
# from classes.followed_person import FollowedPerson
| itucsdb1611/itucsdb1611 | templates_operations/personal/default.py | Python | gpl-3.0 | 8,587 |
"""Handles simulator watch device pairs."""
from typing import Any, Dict, List
from isim.base_types import SimulatorControlBase, SimulatorControlType
class DevicePair(SimulatorControlBase):
"""Represents a device pair for the iOS simulator."""
raw_info: Dict[str, Any]
identifier: str
watch_udid: str
phone_udid: str
def __init__(self, device_pair_identifier: str, device_pair_info: Dict[str, Any]) -> None:
"""Construct a DevicePair object from simctl output.
device_pair_identifier: The unique identifier for this device pair.
device_pair_info: The dictionary representing the simctl output for a device pair.
"""
super().__init__(device_pair_info, SimulatorControlType.DEVICE_PAIR)
self.raw_info = device_pair_info
self.identifier = device_pair_identifier
self.watch_udid = device_pair_info["watch"]["udid"]
self.phone_udid = device_pair_info["phone"]["udid"]
def watch(self) -> None:
"""Return the device representing the watch in the pair."""
raise NotImplementedError("Function has not yet been implemented")
def phone(self) -> None:
"""Return the device representing the phone in the pair."""
raise NotImplementedError("Function has not yet been implemented")
def unpair(self) -> None:
"""Unpair a watch and phone pair."""
command = 'unpair "%s"' % (self.identifier,)
self._run_command(command)
def activate(self) -> None:
"""Activate a pair."""
command = 'pair_activate "%s"' % (self.identifier,)
self._run_command(command)
def __str__(self) -> str:
"""Return the string representation of the object."""
return self.identifier
def __repr__(self) -> str:
"""Return the string programmatic representation of the object."""
return str({"identifier": self.identifier, "raw_info": self.raw_info})
@staticmethod
def from_simctl_info(info: Dict[str, Any]) -> List["DevicePair"]:
"""Create a new device pair using the info from simctl."""
device_pairs = []
for device_pair_identifier, device_pair_info in info.items():
device_pairs.append(DevicePair(device_pair_identifier, device_pair_info))
return device_pairs
@staticmethod
def list_all() -> List["DevicePair"]:
"""Return all available device pairs."""
device_pair_info = SimulatorControlBase.list_type(SimulatorControlType.DEVICE_PAIR)
return DevicePair.from_simctl_info(device_pair_info)
| dalemyers/xcrun | isim/device_pair.py | Python | mit | 2,574 |
"""
Pandoc filter using panflute
"""
import panflute as pf
def prepare(doc):
pass
def action(elem, doc):
if isinstance(elem, pf.Element) and doc.format == 'latex':
pass
# return None -> element unchanged
# return [] -> delete element
def strong2underline(elem, doc):
if isinstance(elem, pf.Strong):
return pf.Underline(*elem.content)
def finalize(doc):
pass
def main(doc=None):
return pf.run_filters([action, strong2underline],
prepare=prepare,
finalize=finalize,
doc=doc)
if __name__ == '__main__':
main()
| sergiocorreia/panflute | tests/filters/underline.py | Python | bsd-3-clause | 643 |
import pytest
from unittest import mock
import os
from django.utils.timezone import now, timedelta
from awx.main.tasks import (
RunProjectUpdate, RunInventoryUpdate,
awx_isolated_heartbeat,
isolated_manager
)
from awx.main.models import (
ProjectUpdate, InventoryUpdate, InventorySource,
Instance, InstanceGroup
)
@pytest.fixture
def scm_revision_file(tmpdir_factory):
# Returns path to temporary testing revision file
revision_file = tmpdir_factory.mktemp('revisions').join('revision.txt')
with open(str(revision_file), 'w') as f:
f.write('1234567890123456789012345678901234567890')
return os.path.join(revision_file.dirname, 'revision.txt')
@pytest.mark.django_db
class TestDependentInventoryUpdate:
def test_dependent_inventory_updates_is_called(self, scm_inventory_source, scm_revision_file):
task = RunProjectUpdate()
task.revision_path = scm_revision_file
proj_update = scm_inventory_source.source_project.create_project_update()
with mock.patch.object(RunProjectUpdate, '_update_dependent_inventories') as inv_update_mck:
with mock.patch.object(RunProjectUpdate, 'release_lock'):
task.post_run_hook(proj_update, 'successful')
inv_update_mck.assert_called_once_with(proj_update, mock.ANY)
def test_no_unwanted_dependent_inventory_updates(self, project, scm_revision_file):
task = RunProjectUpdate()
task.revision_path = scm_revision_file
proj_update = project.create_project_update()
with mock.patch.object(RunProjectUpdate, '_update_dependent_inventories') as inv_update_mck:
with mock.patch.object(RunProjectUpdate, 'release_lock'):
task.post_run_hook(proj_update, 'successful')
assert not inv_update_mck.called
def test_dependent_inventory_updates(self, scm_inventory_source):
task = RunProjectUpdate()
scm_inventory_source.scm_last_revision = ''
proj_update = ProjectUpdate.objects.create(project=scm_inventory_source.source_project)
with mock.patch.object(RunInventoryUpdate, 'run') as iu_run_mock:
task._update_dependent_inventories(proj_update, [scm_inventory_source])
assert InventoryUpdate.objects.count() == 1
inv_update = InventoryUpdate.objects.first()
iu_run_mock.assert_called_once_with(inv_update.id)
assert inv_update.source_project_update_id == proj_update.pk
def test_dependent_inventory_project_cancel(self, project, inventory):
'''
Test that dependent inventory updates exhibit good behavior on cancel
of the source project update
'''
task = RunProjectUpdate()
proj_update = ProjectUpdate.objects.create(project=project)
kwargs = dict(
source_project=project,
source='scm',
source_path='inventory_file',
update_on_project_update=True,
inventory=inventory
)
is1 = InventorySource.objects.create(name="test-scm-inv", **kwargs)
is2 = InventorySource.objects.create(name="test-scm-inv2", **kwargs)
def user_cancels_project(pk):
ProjectUpdate.objects.all().update(cancel_flag=True)
with mock.patch.object(RunInventoryUpdate, 'run') as iu_run_mock:
iu_run_mock.side_effect = user_cancels_project
task._update_dependent_inventories(proj_update, [is1, is2])
# Verify that it bails after 1st update, detecting a cancel
assert is2.inventory_updates.count() == 0
iu_run_mock.assert_called_once()
class MockSettings:
AWX_ISOLATED_PERIODIC_CHECK = 60
CLUSTER_HOST_ID = 'tower_1'
@pytest.mark.django_db
class TestIsolatedManagementTask:
@pytest.fixture
def control_group(self):
return InstanceGroup.objects.create(name='alpha')
@pytest.fixture
def control_instance(self, control_group):
return control_group.instances.create(hostname='tower_1')
@pytest.fixture
def needs_updating(self, control_group):
ig = InstanceGroup.objects.create(name='thepentagon', controller=control_group)
inst = ig.instances.create(hostname='isolated', capacity=103)
inst.last_isolated_check=now() - timedelta(seconds=MockSettings.AWX_ISOLATED_PERIODIC_CHECK)
inst.save()
return ig
@pytest.fixture
def just_updated(self, control_group):
ig = InstanceGroup.objects.create(name='thepentagon', controller=control_group)
inst = ig.instances.create(hostname='isolated', capacity=103)
inst.last_isolated_check=now()
inst.save()
return inst
@pytest.fixture
def old_version(self, control_group):
ig = InstanceGroup.objects.create(name='thepentagon', controller=control_group)
inst = ig.instances.create(hostname='isolated-old', capacity=103)
inst.save()
return inst
def test_takes_action(self, control_instance, needs_updating):
original_isolated_instance = needs_updating.instances.all().first()
with mock.patch('awx.main.tasks.settings', MockSettings()):
with mock.patch.object(isolated_manager.IsolatedManager, 'health_check') as check_mock:
awx_isolated_heartbeat()
iso_instance = Instance.objects.get(hostname='isolated')
call_args, _ = check_mock.call_args
assert call_args[0][0] == iso_instance
assert iso_instance.last_isolated_check > original_isolated_instance.last_isolated_check
assert iso_instance.modified == original_isolated_instance.modified
def test_does_not_take_action(self, control_instance, just_updated):
with mock.patch('awx.main.tasks.settings', MockSettings()):
with mock.patch.object(isolated_manager.IsolatedManager, 'health_check') as check_mock:
awx_isolated_heartbeat()
iso_instance = Instance.objects.get(hostname='isolated')
check_mock.assert_not_called()
assert iso_instance.capacity == 103
| GoogleCloudPlatform/sap-deployment-automation | third_party/github.com/ansible/awx/awx/main/tests/functional/test_tasks.py | Python | apache-2.0 | 6,092 |
my_expr = True
# language=regexp
pattern = r'{my_e<caret>'
| siosio/intellij-community | python/testData/completion/fStringLikeCompletionDoesNotWorkInStringWithInjections.py | Python | apache-2.0 | 59 |
"""
Django settings for projetoCP project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from dj_database_url import parse as db_url
from decouple import config
from unipath import Path
BASE_DIR = Path(__file__).parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'v-%^_(17=$dyi6a%(h(*(95gyi!49voe%#vsxsdn4+3+7h0=+#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
STATIC_ROOT = 'staticfiles'
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'projetoCP.urls'
WSGI_APPLICATION = 'projetoCP.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': config(
'DATABASE_URL',
default='sqlite:///' + BASE_DIR.child('db.sqlite3'),
cast=db_url),
}
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| luispaulo8402/projetoCP | projetoCP/settings.py | Python | mit | 2,167 |
import codecs
from functools import wraps
import json
from urlparse import urljoin, urlunsplit
from bitcoin import decode_privkey, encode_privkey
import click
import requests
from requests.exceptions import ConnectionError, HTTPError
from blocks import TransientBlock, block_structure
import rlp
from transactions import Transaction, contract
import utils
from . import __version__
from . config import read_config
DEFAULT_GASPRICE = 10**12
DEFAULT_STARTGAS = 10000
class APIClient(object):
"""A client sending HTTP request to an :class:`~apiserver.APIServer`.
:param host: the hostname of the server
:param port: the server port to use
:param path: the api path prefix
"""
def __init__(self, host, port, path=''):
self.base_url = urlunsplit((
'http', '{0}:{1}'.format(host, port), path, '', ''))
def request(self, path, method='GET', data=None):
"""Send a request to the server.
:param path: path specifying the api command
:param method: the HTTP method to use ('GET', 'PUT', etc.)
:param data: the data to attach to the request
:returns: the server's JSON response deserialized to a python object
:raises: :exc:`~requests.exceptions.HTTPError` if the server reports an
error
:raises: :exc:`~requests.exceptions.ConnectionError` if setting up the
connection to the server failed
"""
url = urljoin(self.base_url, path)
response = requests.request(method, url, data=data)
response.raise_for_status()
return response.json()
def getaccount(self, address):
"""Request data associated with an account.
The returned `dict` will have the following keys:
- ``'nonce'``
- ``'balance'``
- ``'code'``
- ``'storage'``
:param address: the account's hex-encoded address
"""
return self.request('/accounts/{0}'.format(address))
def applytx(self, tx):
"""Send a transaction to the server
The server will validate the transaction, add it to its list of pending
transactions and further broadcast it to its peers.
:param tx: a :class:`~transactions.Transaction`
:returns: the response from the server
:raises: :exc:`~requests.exceptions.HTTPError` if the validation on the
server fails, e.g. due to a forged signature or an invalid
nonce (status code 400).
"""
txdata = tx.hex_serialize(True)
return self.request('/transactions/', 'PUT', txdata)['transactions'][0]
def getblock(self, id):
"""Request a certain block in the server's blockchain.
:param id: the block hash, the block number, or the hash of an
arbitrary transaction in the block
:raises: :exc:`~requests.exceptions.HTTPError` if the server can not
find the requested block (status code 404)
"""
response = self.request('/blocks/{0}'.format(id))
return response['blocks'][0]
def getchildren(self, block_hash):
"""For a given parent block, request the block hashes of its children.
:param block_hash: the hash of the parent block
:raises: :exc:`~requests.exceptions.HTTPError`: if the server can not
find a block with the given hash (status code 404)
"""
return self.request('/blocks/{0}/children'.format(id))['children']
def gettx(self, tx_hash):
"""Request a specific transaction.
:param tx_hash: the hex-encoded transaction hash
:returns: a :class:`~transactions.Transaction`
:raises: :exc:`~requests.exceptions.HTTPError`: if the server does not
know about the requested transaction (status code 404)
"""
response = self.request('/transactions/{0}'.format(tx_hash))
tx_dict = response['transactions'][0]
tx = Transaction(int(tx_dict['nonce']),
int(tx_dict['gasprice']),
int(tx_dict['startgas']),
tx_dict['to'],
int(tx_dict['value']),
tx_dict['data'][2:],
int(tx_dict['v']),
int(tx_dict['r']),
int(tx_dict['s']))
print tx_dict
return tx
def getpending(self):
"""Request a list of pending transactions."""
response = self.request('/pending/')['transactions']
print response
txs = []
for tx_dict in response:
txs.append(Transaction(int(tx_dict['nonce']),
int(tx_dict['gasprice']),
int(tx_dict['startgas']),
tx_dict['to'],
int(tx_dict['value']),
tx_dict['data'][2:],
int(tx_dict['v']),
int(tx_dict['r']),
int(tx_dict['s'])))
return txs
def trace(self, tx_hash):
"""Request the trace left by a transaction during its processing.
:param tx_hash: the hex-encoded transaction hash
:raises: :exc:`~requests.exceptions.HTTPError` if the server can not
find the transaction (status code 404)
:returns: a list of dicts, expressing the single footprints
"""
res = self.request('/trace/{0}'.format(tx_hash))
return res['trace']
def dump(self, id):
"""Request a block including the corresponding world state.
:param id: either the block hash or the hash of one transaction in the
block
"""
res = self.request('/dump/{0}'.format(id))
return res
def handle_connection_errors(f):
"""Decorator that handles ConnectionErrors and HTTPErrors by printing an
appropriate error message and exiting.
"""
@wraps(f)
def new_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except ConnectionError as e:
msg = ('Could not establish connection to server '
'{0}'.format(e.message))
raise click.ClickException(msg)
except HTTPError as e:
res = e.response
msg = 'HTTP request failed ({0} {1})'.format(res.status_code,
res.reason)
raise click.ClickException(msg)
return new_f
def pass_client(f):
"""Decorator that passes a :class:`~ethclient.APIClient` instance and
handles unsuccessful requests as well as failed connections by printing an
error message.
"""
raw_pass_client = click.make_pass_decorator(APIClient)
return raw_pass_client(handle_connection_errors(f))
class PrivateKey(click.ParamType):
"""A parameter type for private keys.
Inputs are accepted if they are valid private keys in either hex or WIF.
"""
name = 'private key'
def convert(self, value, param, ctx):
"""Convert the given private key to its integer representation."""
try:
return decode_privkey(value)
except Exception:
# unfortunately pybitcointools raises no more specific exception
self.fail('{} is not a valid private key'.format(value),
param, ctx)
class Binary(click.ParamType):
"""A parameter type for binary data encoded as a hexadecimal string.
Inputs are accepted if they are either bytearrays or contain only
hexadecimal digits (0-9, a-f).
"""
name = 'binary'
def __init__(self, size=None):
"""
:param size: the expected data size in bytes, or None denoting the lack
of a constraint (default).
"""
super(Binary, self).__init__()
self.size = size
def convert(self, value, param, ctx):
"""Convert a hex-string to a bytearray."""
if not isinstance(value, bytearray): # value is raw input
try:
binary = bytearray(codecs.decode(value, 'hex'))
except TypeError:
msg = 'ill hex encoding '
if len(value) % 2 == 1:
msg += '({} is odd length)'
else:
msg += '({} contains non-hexadecimal digits)'
self.fail(msg.format(value), param, ctx)
else:
if self.size and len(binary) != self.size: # check length
msg = 'invalid size (expected {0}, got {1} bytes)'
self.fail(msg.format(self.size, len(binary)))
else:
return binary
else: # value has been converted before
# this branch ensures idempotency as required by Click
return value
class Hash(Binary):
"""A parameter type for hashes, such as addresses, tx hashes, etc.
Inputs are checked for being well encoded in hex, but not converted to
bytearrays.
:param name: the name to use, defaulting to `"n-byte hash"` where `n`
denotes the hash's size.
:param size: the expected length of the hash in bytes (default 32)
"""
def __init__(self, name=None, size=32):
if name:
self.name = name
else:
self.name = '{0}-byte hash'.format(size)
if size <= 0:
raise ValueError('Hashes must be at least 1 byte long')
super(Hash, self).__init__(size)
def convert(self, value, param, ctx):
value = super(Hash, self).convert(value, param, ctx)
return codecs.encode(value, 'hex')
ADDRESS = Hash(size=20)
TXHASH = Hash(size=32)
BLOCKHASH = Hash(size=32)
PRIVKEY = PrivateKey()
NONNEG_INT = click.IntRange(min=0)
NONNEG_INT.name = 'integer'
POS_INT = click.IntRange(min=1)
POS_INT.name = 'integer'
nonce_option = click.option('--nonce', '-n', type=NONNEG_INT, default=0,
show_default=True,
help='the number of transactions already sent by '
'the sender\'s account')
gasprice_option = click.option('--gasprice', '-p', type=NONNEG_INT,
default=DEFAULT_GASPRICE, show_default=True,
help='the amount of ether paid for each unit '
'of gas consumed by the transaction')
startgas_option = click.option('--startgas', '-g', type=NONNEG_INT,
default=DEFAULT_STARTGAS, show_default=True,
help='the maximum number of gas units the '
'transaction is allowed to consume')
receiver_option = click.option('--to', '-t', type=ADDRESS, required=True,
help='the receiving address')
value_option = click.option('--value', '-v', type=NONNEG_INT, default=0,
show_default=True,
help='the amount of ether sent along with the '
'transaction')
data_option = click.option('--data', '-d', type=Binary(), default='',
help='additional hex-encoded data packed in the '
'transaction [default: empty]')
code_option = click.option('--code', '-c', type=Binary(), default='',
help='the EVM code in hex-encoding [default: '
'empty]')
privkey_option = click.option('--key', '-k', type=PRIVKEY, required=True,
help='the private key to sign with')
def print_version(ctx, param, value):
"""Callback for the version flag.
If the flag is set, print the version and exit. Otherwise do nothing.
"""
if not value or ctx.resilient_parsing:
return
click.echo('pyethclient {0}'.format(__version__))
ctx.exit()
@click.group()
@click.pass_context
@click.option('--version', is_flag=True, is_eager=True, expose_value=False,
callback=print_version)
@click.option('--host', '-H', help='API server host')
@click.option('--port', '-p', type=POS_INT, help='API server host port')
def ethclient(ctx, host, port):
"""pyethclient is collection of commands allowing the interaction with a
running pyethereum instance.
"""
config = read_config()
if not host:
host = config.get('api', 'listen_host')
if not port:
port = int(config.get('api', 'listen_port'))
path = config.get('api', 'api_path')
ctx.obj = APIClient(host, port, path)
@ethclient.command()
@click.argument('string')
def sha3(string):
"""Calculate the SHA3-256 hash of some input.
This command calculates the 256-bit SHA3 hash of STRING and prints the
result in hex-encoding. STRING is interpreted as a latin-1 encoded byte
array.
"""
try:
byte_array = string.encode('latin1')
except UnicodeEncodeError:
raise click.BadParameter('STRING must be encoded in latin-1')
else:
click.echo(utils.sha3(byte_array).encode('hex'))
@ethclient.command()
@click.argument('key', type=PRIVKEY)
def privtoaddr(key):
"""Derive an address from a private key.
KEY must either be a raw private key in hex encoding or a WIF string.
The resulting address will be printed in hex encoding.
"""
click.echo(utils.privtoaddr(encode_privkey(key, 'hex')))
@ethclient.command()
@nonce_option
@gasprice_option
@startgas_option
@receiver_option
@value_option
@data_option
def mktx(nonce, gasprice, startgas, to, value, data):
"""Assemble an unsigned transaction.
The result is the hex representation of the transaction in RLP encoding.
"""
tx = Transaction(nonce, gasprice, startgas, to, value,
str(data))
click.echo(tx.hex_serialize(False))
@ethclient.command()
@nonce_option
@gasprice_option
@startgas_option
@value_option
@code_option
def mkcontract(nonce, gasprice, startgas, value, code):
"""Assemble a contract creating transaction.
The result is the hex representation of the transaction in RLP encoding.
"""
ct = contract(nonce, gasprice, startgas, value, str(code))
click.echo(ct.hex_serialize(False))
@ethclient.command()
@click.argument('transaction', type=Binary())
@click.argument('key', type=PRIVKEY)
def signtx(transaction, key):
"""Sign a previously created transaction.
TRANSACTION must be the hex encoded transaction, as for instance created
using mktx or mkcontract. If it has already been signed before, its
signature will be replaced.
KEY must be the private key to sign with, in hexadecimal encoding or WIF.
The signed transaction will be printed in hex encoding.
"""
try:
tx = Transaction.deserialize(str(transaction))
except AssertionError:
raise click.BadParameter('Unable to deserialize TRANSACTION.')
tx.sign(encode_privkey(key, 'hex'))
click.echo(tx.hex_serialize(True))
def pecho(json_dict):
"""Pretty print a `dict`"""
click.echo(json.dumps(json_dict, indent=4))
@ethclient.command()
@pass_client
@click.argument('transaction', type=Binary())
def applytx(client, transaction):
"""Absorb a transaction into the next block.
This command sends a transaction to the server, which will presumably
validate it, include it in its memory pool, and further announce it to the
network. The server's response will be returned.
TRANSACTION must a signed transaction in hex-encoding.
"""
tx = Transaction.deserialize(str(transaction))
pecho(client.applytx(tx))
@ethclient.command()
@pass_client
@gasprice_option
@startgas_option
@receiver_option
@value_option
@data_option
@privkey_option
def quicktx(client, gasprice, startgas, to, value, data, key):
"""Create and finalize a transaction.
This command is a shortcut that chains getnonce, mktx, signtx, and applytx.
It returns the server's response.
"""
encoded_key = encode_privkey(key, 'hex')
nonce = int(client.getaccount(utils.privtoaddr(encoded_key))['nonce'])
tx = Transaction(nonce, gasprice, startgas, to, value, str(data))
tx.sign(encode_privkey(key, 'hex'))
pecho(client.applytx(tx))
@ethclient.command()
@pass_client
@gasprice_option
@startgas_option
@value_option
@code_option
@privkey_option
def quickcontract(client, gasprice, startgas, value, code, key):
"""Create and finalize a contract.
This command is a shortcut that chains getnonce, mkcontract, signtx, and
applytx. In addition to the server's response, it returns the address of
the newly created contract.
"""
encoded_key = encode_privkey(key, 'hex')
sender = utils.privtoaddr(encoded_key)
nonce = int(client.getaccount(sender)['nonce'])
tx = contract(nonce, gasprice, startgas, value, str(code))
tx.sign(encoded_key)
response = client.applytx(tx)
pecho({
'address': tx.contract_address(),
'transaction': response})
@ethclient.command()
@pass_client
@click.argument('address', type=ADDRESS)
def getbalance(client, address):
"""Retrieve the balance of an account."""
click.echo(client.getaccount(address)['balance'])
@ethclient.command()
@pass_client
@click.argument('address', type=ADDRESS)
def getcode(client, address):
"""Print the EVM code of an account."""
click.echo(client.getaccount(address)['code'])
@ethclient.command()
@pass_client
@click.argument('address', type=ADDRESS)
def getnonce(client, address):
"""Return an account's nonce."""
click.echo(client.getaccount(address)['nonce'])
@ethclient.command()
@pass_client
@click.argument('address', type=ADDRESS)
def getstate(client, address):
"""Print an account's storage contents.
The output will be hex encoded. Non-contract accounts have empty storages.
"""
click.echo(client.getaccount(address)['storage'])
@ethclient.command()
@pass_client
@click.option('--txhash', '-t', type=TXHASH, default=None,
help='the hash of one transaction in the block')
@click.option('--blockhash', '-b', type=BLOCKHASH, default=None,
help='the hash of the block')
@click.option('--blocknumber', '-n', type=NONNEG_INT, default=None,
help='the block\'s number in the chain')
def getblock(client, txhash, blockhash, blocknumber):
"""Fetch a block from the block chain.
The block must either be specified by its block hash, its number in the
chain, or by a transaction included in the block.
"""
if sum(map(lambda p: p is None, (txhash, blockhash, blocknumber))) != 2:
raise click.BadParameter('Exactly one of the options --txhash, '
'--blockhash and --blocknumber must be '
'given.')
else:
pecho(client.getblock(txhash or blockhash or blocknumber))
@ethclient.command()
@pass_client
@click.argument('txhash', type=TXHASH)
def gettx(client, txhash):
"""Show a transaction from the block chain.
TXHASH must be the hex encoded hash of the transaction.
"""
pecho(client.gettx(txhash).to_dict())
@ethclient.command()
@pass_client
def getpending(client):
"""List all pending transactions."""
pecho([tx.to_dict() for tx in client.getpending()])
@ethclient.command()
@pass_client
@click.argument('txhash', type=TXHASH)
@click.option('--print/--json', 'print_', is_flag=True, default=True,
help='Display the trace human readably [default] or in JSON.')
def trace(client, txhash, print_):
"""Read the trace left by a transaction.
The transaction must be specified by its hash TXHASH.
"""
if print_:
out = []
for l in client.trace(txhash):
name, data = l.items()[0]
order = dict(pc=-2, op=-1, stackargs=1, data=2, code=3)
items = sorted(data.items(), key=lambda x: order.get(x[0], 0))
msg = ", ".join("%s=%s" % (k, v) for k, v in items)
out.append("%s: %s" % (name.ljust(15), msg))
click.echo('\n'.join(out))
else:
pecho(client.trace(txhash))
@ethclient.command()
@pass_client
@click.option('--blockhash', '-b', type=BLOCKHASH, default=None,
help='the hash of the block')
@click.option('--txhash', '-t', type=TXHASH, default=None,
help='the hash of one transaction in the block')
def dump(client, blockhash, txhash):
"""Dump the state of a block.
The block must be specified either by its hash or by a transaction included
into the block.
In addition to the result of getblock, this command also yields the state
of every account.
"""
if sum(map(lambda p: p is not None, (blockhash, txhash))) != 1:
raise click.BadParameter('Either --blockhash or --txhash must be '
'specified')
else:
pecho(client.dump(blockhash or txhash))
| jnnk/pyethereum | pyethereum/ethclient.py | Python | mit | 21,070 |
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup (
name='0-core-client',
version='1.1.0-alpha-4',
description='Zero-OS 0-core client',
long_description=long_description,
url='https://github.com/zero-os/0-core',
author='Muhamad Azmy',
author_email='muhamada@greenitglobe.com',
license='Apache 2.0',
namespaces=['zeroos'],
packages=find_packages(),
install_requires=['redis>=2.10.5'],
)
| g8os/core0 | client/py-client/setup.py | Python | apache-2.0 | 696 |
#######################################################################
## gengcell.py
##
## Copyright 2011 Elizabeth Yip
##
## This file is part of AnStreetBump.
##
## AnStreetBump is free software: you can redistribute it and/or modify
## it under the terms of the Lesser GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## AnStreetBump is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## Lesser GNU General Public License for more details.
##
## You should have received a copy of the Lesser GNU General Public License
## long with AnStreetBump. If not, see <http://www.gnu.org/licenses/>.
##
#######################################################################
from numpy import zeros, ceil, floor, average, argsort
import math
from util import sortshrink
class gcell:
"""
self.llc is the 'lower level corner of the cell'
self.csize is the size. These are vectors.
lind is the index of the cell in the array LEVELC[level][ ... ]
"""
def __init__(self, level, parent, llc, csize, lind):
self.level = level
self.parent = parent
self.llc = llc
self.csize = csize
self.lind = lind
self.adj = []
self.points = []
self.children = []
def FindCenter(A,points):
"""
Find the center of A[j,:], where j is in points, a subset of
{0,1,..., A.shape[0]-1].
"""
DA = zeros( (len(points),A.shape[1] ) )
center = zeros( (A.shape[1] ) )
for i in range( len(points) ):
DA[i,:]=A[points[i],:]
for i in range(A.shape[1]):
center[i] = average(DA[:,i])
return center
class cluster:
"""
The cluster is defined by the array self.cells for level at self.level.
"""
def __init__(self,level,cells,A,LevelC):
self.level = level
self.cells = cells
self.points = []
for i in range(len(cells)):
P = LevelC[level][cells[i]]
self.points = self.points + P.points
self.center = FindCenter(A,self.points)
def UpdateCluster(self,level,cells,A,LevelC):
self.cells = self.cells + cells
for i in range(len(cells)):
P = LevelC[level][cells[i]]
self.points = self.points + P.points
self.center = FindCenter(A,self.points)
def FindClusters(A,maxlevel,LevelC):
"""
The first cluster consists of the most populated cell and its neigbors.
To find the next cluster, look at the cells that has not been picked ... .
"""
lp = zeros( (len(LevelC[maxlevel])) ,int)
for i in range(len(LevelC[maxlevel])):
lp[i]= len( LevelC[maxlevel][i].points )
slp = argsort(lp)
cls = []
while sum(lp) > 0 :
cells = []
idd = slp[len(slp) - 1]
P=LevelC[maxlevel][idd]
for i in P.adj:
if lp[i] > 0 :
cells.append(i)
lp[i] = 0
cls.append(cluster(maxlevel,cells,A,LevelC))
slp = argsort(lp)
return cls
def BinaryNdim(ndim):
"""
returns the vertices of a unit cube of dimension ndim with
the lower left corner at the origin.
"""
t = zeros( (2**ndim, ndim) )
B = 2**ndim
for i in range(B):
s = str(bin(B+i))
ls = len(s)-1
for j in range(ndim):
t[i,j]=s[ls - j]
return t
def binaryeval(iv):
" evaluate a binary number where the digits are stored in iv "
ind = 0
for i in range(iv.shape[0]):
ind = ind + iv[i]*2**i
return int(ind)
def Branches(P,A,maxlevel,LevelC, asize = -1):
"""
P is the parent node of the tree,
A is input data for the tree
maxlevel is the 'leave' level of the tree
LevelC is a list of cells at a certain level
Default is to cut all sides by half.
If asize is set, the side is only cut if it is larger than asize.
"""
if P.level == maxlevel :
return
ndim = A.shape[1]
level = P.level + 1
csize = zeros( (ndim) )
if asize < 0 :
csize = P.csize / 2.
else :
for i in range(ndim):
if P.csize[i] > asize :
csize[i] = P.csize[i] / 2
else :
csize[i] = P.csize[i]
llc = P.llc
t = zeros( (2**ndim),int)
ic = 0
for i in range(len(P.points)) :
j = P.points[i]
v = A[j,:] - llc
iv = floor( v / csize)
it = binaryeval(iv)
if t[it] == 0 :
rv =iv * csize + llc
parent = P
iv = iv * (2**(maxlevel-level))
lind = len(LevelC[level])
C = gcell(level, parent, rv, csize, lind)
LevelC[level].append(C)
P.children.append(C)
ic = ic + 1
t[it] = ic
P.children[t[it]-1].points.append(j)
"""
recursive
"""
for i in range(len(P.children)):
Branches(P.children[i],A,maxlevel,LevelC)
def GenPjlist(L1,Pi,LevelC):
"""
L1 is the parent level pf Pi.
Pj_list is a list of cells at the same level as Pi.
Each of these cells either have the same parent as Pi or has parent
adjacent to the parent of Pi
"""
Pia = Pi.parent
Pj_parent=[]
Pj_parent.append(Pia.lind)
for i in Pia.adj:
Pj_parent.append(i)
Pj_listT = []
for i in Pj_parent:
Pja = LevelC[L1][i]
for j in Pja.children:
Pj_listT.append(j.lind)
Pj_list = sortshrink(Pj_listT)
return Pj_list
def Adj(Pi,Pj,TC):
"""
Test whether Pi and Pj are adjacent
"""
A = False
if Pi.parent == Pj.parent :
A = True
else:
Li = list(Pi.llc + TC*Pi.csize)
Lj = list(Pj.llc + TC*Pj.csize)
for i in range(len(Li)):
for j in range(len(Lj)):
if max(abs(Li[i]-Li[j])) < 1.e-6 :
A=True
return
return A
def Adjacency(LevelC,maxlevel,TC):
"""
Generate the adjacency list for all the levels.
When considering cell Pi, only look at Pj such that either Pi and Pj have
the same parent, or their parents are adjacent.
"""
P = LevelC[0][0]
P.adj.append(0)
for i in range(len(P.children)):
Li = P.children[i].lind
for j in range(i+1,len(P.children)):
Lj = P.children[j].lind
P.children[i].adj.append(Lj)
P.children[j].adj.append(Li)
for L in range(2,maxlevel+1):
L1 = L - 1
for i in range(len(LevelC[L])):
Pi = LevelC[L][i]
Pj_list = GenPjlist(L1,Pi,LevelC)
for j in Pj_list :
Pj = LevelC[L][j]
if Adj(Pi,Pj,TC) :
Pi.adj.append(j)
def BinTree(A,asize,eps,bincut=True):
"""
Generalized Binary Tree
"""
ndim = A.shape[1]
TC = BinaryNdim(ndim)
csize = zeros( (ndim) )
llc = zeros( (ndim) )
for i in range(ndim):
llc[i] = min(A[:,i]) - eps
csize[i] = max(A[:,i]) + eps - llc[i]
npart = max(max(csize)/asize,1)
maxlevel = int(ceil(math.log(npart,2)))
LevelC=[]
for i in range(maxlevel+1):
LevelC.append(i)
LevelC[i] = []
level = 0
parent = None
lind = 0
P = gcell(level, parent, llc, csize, lind)
for i in range(A.shape[0]):
P.points.append(i)
LevelC[0].append(P)
if bincut :
Branches(P,A,maxlevel,LevelC)
else :
Branches(P,A,maxlevel,LevelC,asize)
Adjacency(LevelC,maxlevel,TC)
return maxlevel,LevelC
def points2cell(LevelC,level,N):
"""
Return the bin number which the point reside
"""
pt2c= zeros( (N), int)
for i in range( len(LevelC[level]) ):
P = LevelC[level][i]
for j in range( len(P.points) ):
k = P.points[j]
pt2c[k] = i
return pt2c
| elyip/AnStreetBump | SRC/gengcell.py | Python | gpl-3.0 | 8,444 |
#!/usr/bin/env python
import sys
import yaml
import datetime
def main(dbfile, action, stamp):
"""Update a YAML file of build timestamps
Args:
dbfile: YAML file name
action: 'add', 'test', or 'rm'
stamp: name of stamp
"""
with open(dbfile, 'r') as f:
data = yaml.load(f)
if data is None:
data = {}
if action == 'add':
data[stamp] = str(datetime.datetime.now())
elif action == 'rm':
if stamp in data:
del data[stamp]
elif action == 'test':
if stamp in data:
print("{0}: {1}".format(stamp, data[stamp]))
sys.exit(0)
else:
print("{0}: no stamp".format(stamp))
sys.exit(1)
with open(dbfile, 'w') as f:
f.write(yaml.dump(data, default_flow_style=False))
if __name__ == "__main__":
main(*sys.argv[1:])
| cornell-cs5220-f15/totient-pkg | configs/check_stamp.py | Python | mit | 890 |
from __future__ import print_function, division
from itertools import product
from sympy.core.sympify import (_sympify, sympify, converter,
SympifyError)
from sympy.core.basic import Basic
from sympy.core.expr import Expr
from sympy.core.singleton import Singleton, S
from sympy.core.evalf import EvalfMixin
from sympy.core.numbers import Float
from sympy.core.compatibility import (iterable, with_metaclass,
ordered, range, PY3)
from sympy.core.evaluate import global_evaluate
from sympy.core.function import FunctionClass
from sympy.core.mul import Mul
from sympy.core.relational import Eq
from sympy.core.symbol import Symbol, Dummy
from sympy.sets.contains import Contains
from sympy.utilities.misc import func_name, filldedent
from mpmath import mpi, mpf
from sympy.logic.boolalg import And, Or, Not, true, false
from sympy.utilities import subsets
class Set(Basic):
"""
The base class for any kind of set.
This is not meant to be used directly as a container of items. It does not
behave like the builtin ``set``; see :class:`FiniteSet` for that.
Real intervals are represented by the :class:`Interval` class and unions of
sets by the :class:`Union` class. The empty set is represented by the
:class:`EmptySet` class and available as a singleton as ``S.EmptySet``.
"""
is_number = False
is_iterable = False
is_interval = False
is_FiniteSet = False
is_Interval = False
is_ProductSet = False
is_Union = False
is_Intersection = None
is_EmptySet = None
is_UniversalSet = None
is_Complement = None
is_ComplexRegion = False
@staticmethod
def _infimum_key(expr):
"""
Return infimum (if possible) else S.Infinity.
"""
try:
infimum = expr.inf
assert infimum.is_comparable
except (NotImplementedError,
AttributeError, AssertionError, ValueError):
infimum = S.Infinity
return infimum
def union(self, other):
"""
Returns the union of 'self' and 'other'.
Examples
========
As a shortcut it is possible to use the '+' operator:
>>> from sympy import Interval, FiniteSet
>>> Interval(0, 1).union(Interval(2, 3))
[0, 1] U [2, 3]
>>> Interval(0, 1) + Interval(2, 3)
[0, 1] U [2, 3]
>>> Interval(1, 2, True, True) + FiniteSet(2, 3)
(1, 2] U {3}
Similarly it is possible to use the '-' operator for set differences:
>>> Interval(0, 2) - Interval(0, 1)
(1, 2]
>>> Interval(1, 3) - FiniteSet(2)
[1, 2) U (2, 3]
"""
return Union(self, other)
def intersect(self, other):
"""
Returns the intersection of 'self' and 'other'.
>>> from sympy import Interval
>>> Interval(1, 3).intersect(Interval(1, 2))
[1, 2]
>>> from sympy import imageset, Lambda, symbols, S
>>> n, m = symbols('n m')
>>> a = imageset(Lambda(n, 2*n), S.Integers)
>>> a.intersect(imageset(Lambda(m, 2*m + 1), S.Integers))
EmptySet()
"""
return Intersection(self, other)
def intersection(self, other):
"""
Alias for :meth:`intersect()`
"""
return self.intersect(other)
def _intersect(self, other):
"""
This function should only be used internally
self._intersect(other) returns a new, intersected set if self knows how
to intersect itself with other, otherwise it returns ``None``
When making a new set class you can be assured that other will not
be a :class:`Union`, :class:`FiniteSet`, or :class:`EmptySet`
Used within the :class:`Intersection` class
"""
return None
def is_disjoint(self, other):
"""
Returns True if 'self' and 'other' are disjoint
Examples
========
>>> from sympy import Interval
>>> Interval(0, 2).is_disjoint(Interval(1, 2))
False
>>> Interval(0, 2).is_disjoint(Interval(3, 4))
True
References
==========
.. [1] http://en.wikipedia.org/wiki/Disjoint_sets
"""
return self.intersect(other) == S.EmptySet
def isdisjoint(self, other):
"""
Alias for :meth:`is_disjoint()`
"""
return self.is_disjoint(other)
def _union(self, other):
"""
This function should only be used internally
self._union(other) returns a new, joined set if self knows how
to join itself with other, otherwise it returns ``None``.
It may also return a python set of SymPy Sets if they are somehow
simpler. If it does this it must be idempotent i.e. the sets returned
must return ``None`` with _union'ed with each other
Used within the :class:`Union` class
"""
return None
def complement(self, universe):
"""
The complement of 'self' w.r.t the given the universe.
Examples
========
>>> from sympy import Interval, S
>>> Interval(0, 1).complement(S.Reals)
(-oo, 0) U (1, oo)
>>> Interval(0, 1).complement(S.UniversalSet)
UniversalSet() \ [0, 1]
"""
return Complement(universe, self)
def _complement(self, other):
# this behaves as other - self
if isinstance(other, ProductSet):
# For each set consider it or it's complement
# We need at least one of the sets to be complemented
# Consider all 2^n combinations.
# We can conveniently represent these options easily using a
# ProductSet
# XXX: this doesn't work if the dimentions of the sets isn't same.
# A - B is essentially same as A if B has a different
# dimentionality than A
switch_sets = ProductSet(FiniteSet(o, o - s) for s, o in
zip(self.sets, other.sets))
product_sets = (ProductSet(*set) for set in switch_sets)
# Union of all combinations but this one
return Union(p for p in product_sets if p != other)
elif isinstance(other, Interval):
if isinstance(self, Interval) or isinstance(self, FiniteSet):
return Intersection(other, self.complement(S.Reals))
elif isinstance(other, Union):
return Union(o - self for o in other.args)
elif isinstance(other, Complement):
return Complement(other.args[0], Union(other.args[1], self), evaluate=False)
elif isinstance(other, EmptySet):
return S.EmptySet
elif isinstance(other, FiniteSet):
return FiniteSet(*[el for el in other if self.contains(el) != True])
def symmetric_difference(self, other):
return SymmetricDifference(self, other)
def _symmetric_difference(self, other):
return Union(Complement(self, other), Complement(other, self))
@property
def inf(self):
"""
The infimum of 'self'
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).inf
0
>>> Union(Interval(0, 1), Interval(2, 3)).inf
0
"""
return self._inf
@property
def _inf(self):
raise NotImplementedError("(%s)._inf" % self)
@property
def sup(self):
"""
The supremum of 'self'
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).sup
1
>>> Union(Interval(0, 1), Interval(2, 3)).sup
3
"""
return self._sup
@property
def _sup(self):
raise NotImplementedError("(%s)._sup" % self)
def contains(self, other):
"""
Returns True if 'other' is contained in 'self' as an element.
As a shortcut it is possible to use the 'in' operator:
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).contains(0.5)
True
>>> 0.5 in Interval(0, 1)
True
"""
other = sympify(other, strict=True)
ret = sympify(self._contains(other))
if ret is None:
ret = Contains(other, self, evaluate=False)
return ret
def _contains(self, other):
raise NotImplementedError("(%s)._contains(%s)" % (self, other))
def is_subset(self, other):
"""
Returns True if 'self' is a subset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_subset(Interval(0, 1))
True
>>> Interval(0, 1).is_subset(Interval(0, 1, left_open=True))
False
"""
if isinstance(other, Set):
return self.intersect(other) == self
else:
raise ValueError("Unknown argument '%s'" % other)
def issubset(self, other):
"""
Alias for :meth:`is_subset()`
"""
return self.is_subset(other)
def is_proper_subset(self, other):
"""
Returns True if 'self' is a proper subset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_proper_subset(Interval(0, 1))
True
>>> Interval(0, 1).is_proper_subset(Interval(0, 1))
False
"""
if isinstance(other, Set):
return self != other and self.is_subset(other)
else:
raise ValueError("Unknown argument '%s'" % other)
def is_superset(self, other):
"""
Returns True if 'self' is a superset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 0.5).is_superset(Interval(0, 1))
False
>>> Interval(0, 1).is_superset(Interval(0, 1, left_open=True))
True
"""
if isinstance(other, Set):
return other.is_subset(self)
else:
raise ValueError("Unknown argument '%s'" % other)
def issuperset(self, other):
"""
Alias for :meth:`is_superset()`
"""
return self.is_superset(other)
def is_proper_superset(self, other):
"""
Returns True if 'self' is a proper superset of 'other'.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).is_proper_superset(Interval(0, 0.5))
True
>>> Interval(0, 1).is_proper_superset(Interval(0, 1))
False
"""
if isinstance(other, Set):
return self != other and self.is_superset(other)
else:
raise ValueError("Unknown argument '%s'" % other)
def _eval_powerset(self):
raise NotImplementedError('Power set not defined for: %s' % self.func)
def powerset(self):
"""
Find the Power set of 'self'.
Examples
========
>>> from sympy import FiniteSet, EmptySet
>>> A = EmptySet()
>>> A.powerset()
{EmptySet()}
>>> A = FiniteSet(1, 2)
>>> a, b, c = FiniteSet(1), FiniteSet(2), FiniteSet(1, 2)
>>> A.powerset() == FiniteSet(a, b, c, EmptySet())
True
References
==========
.. [1] http://en.wikipedia.org/wiki/Power_set
"""
return self._eval_powerset()
@property
def measure(self):
"""
The (Lebesgue) measure of 'self'
Examples
========
>>> from sympy import Interval, Union
>>> Interval(0, 1).measure
1
>>> Union(Interval(0, 1), Interval(2, 3)).measure
2
"""
return self._measure
@property
def boundary(self):
"""
The boundary or frontier of a set
A point x is on the boundary of a set S if
1. x is in the closure of S.
I.e. Every neighborhood of x contains a point in S.
2. x is not in the interior of S.
I.e. There does not exist an open set centered on x contained
entirely within S.
There are the points on the outer rim of S. If S is open then these
points need not actually be contained within S.
For example, the boundary of an interval is its start and end points.
This is true regardless of whether or not the interval is open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).boundary
{0, 1}
>>> Interval(0, 1, True, False).boundary
{0, 1}
"""
return self._boundary
@property
def is_open(self):
if not Intersection(self, self.boundary):
return True
# We can't confidently claim that an intersection exists
return None
@property
def is_closed(self):
return self.boundary.is_subset(self)
@property
def closure(self):
return self + self.boundary
@property
def interior(self):
return self - self.boundary
@property
def _boundary(self):
raise NotImplementedError()
def _eval_imageset(self, f):
from sympy.sets.fancysets import ImageSet
return ImageSet(f, self)
@property
def _measure(self):
raise NotImplementedError("(%s)._measure" % self)
def __add__(self, other):
return self.union(other)
def __or__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersect(other)
def __mul__(self, other):
return ProductSet(self, other)
def __xor__(self, other):
return SymmetricDifference(self, other)
def __pow__(self, exp):
if not sympify(exp).is_Integer and exp >= 0:
raise ValueError("%s: Exponent must be a positive Integer" % exp)
return ProductSet([self]*exp)
def __sub__(self, other):
return Complement(self, other)
def __contains__(self, other):
symb = sympify(self.contains(other))
if not (symb is S.true or symb is S.false):
raise TypeError('contains did not evaluate to a bool: %r' % symb)
return bool(symb)
class ProductSet(Set):
"""
Represents a Cartesian Product of Sets.
Returns a Cartesian product given several sets as either an iterable
or individual arguments.
Can use '*' operator on any sets for convenient shorthand.
Examples
========
>>> from sympy import Interval, FiniteSet, ProductSet
>>> I = Interval(0, 5); S = FiniteSet(1, 2, 3)
>>> ProductSet(I, S)
[0, 5] x {1, 2, 3}
>>> (2, 2) in ProductSet(I, S)
True
>>> Interval(0, 1) * Interval(0, 1) # The unit square
[0, 1] x [0, 1]
>>> coin = FiniteSet('H', 'T')
>>> set(coin**2)
set([(H, H), (H, T), (T, H), (T, T)])
Notes
=====
- Passes most operations down to the argument sets
- Flattens Products of ProductSets
References
==========
.. [1] http://en.wikipedia.org/wiki/Cartesian_product
"""
is_ProductSet = True
def __new__(cls, *sets, **assumptions):
def flatten(arg):
if isinstance(arg, Set):
if arg.is_ProductSet:
return sum(map(flatten, arg.args), [])
else:
return [arg]
elif iterable(arg):
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
sets = flatten(list(sets))
if EmptySet() in sets or len(sets) == 0:
return EmptySet()
if len(sets) == 1:
return sets[0]
return Basic.__new__(cls, *sets, **assumptions)
def _eval_Eq(self, other):
if not other.is_ProductSet:
return
if len(self.args) != len(other.args):
return false
return And(*(Eq(x, y) for x, y in zip(self.args, other.args)))
def _contains(self, element):
"""
'in' operator for ProductSets
Examples
========
>>> from sympy import Interval
>>> (2, 3) in Interval(0, 5) * Interval(0, 5)
True
>>> (10, 10) in Interval(0, 5) * Interval(0, 5)
False
Passes operation on to constituent sets
"""
try:
if len(element) != len(self.args):
return false
except TypeError: # maybe element isn't an iterable
return false
return And(*
[set.contains(item) for set, item in zip(self.sets, element)])
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
if not other.is_ProductSet:
return None
if len(other.args) != len(self.args):
return S.EmptySet
return ProductSet(a.intersect(b)
for a, b in zip(self.sets, other.sets))
def _union(self, other):
if not other.is_ProductSet:
return None
if len(other.args) != len(self.args):
return None
if self.args[0] == other.args[0]:
return self.args[0] * Union(ProductSet(self.args[1:]),
ProductSet(other.args[1:]))
if self.args[-1] == other.args[-1]:
return Union(ProductSet(self.args[:-1]),
ProductSet(other.args[:-1])) * self.args[-1]
return None
@property
def sets(self):
return self.args
@property
def _boundary(self):
return Union(ProductSet(b + b.boundary if i != j else b.boundary
for j, b in enumerate(self.sets))
for i, a in enumerate(self.sets))
@property
def is_iterable(self):
return all(set.is_iterable for set in self.sets)
def __iter__(self):
if self.is_iterable:
return product(*self.sets)
else:
raise TypeError("Not all constituent sets are iterable")
@property
def _measure(self):
measure = 1
for set in self.sets:
measure *= set.measure
return measure
def __len__(self):
return Mul(*[len(s) for s in self.args])
def __bool__(self):
return all([bool(s) for s in self.args])
__nonzero__ = __bool__
class Interval(Set, EvalfMixin):
"""
Represents a real interval as a Set.
Usage:
Returns an interval with end points "start" and "end".
For left_open=True (default left_open is False) the interval
will be open on the left. Similarly, for right_open=True the interval
will be open on the right.
Examples
========
>>> from sympy import Symbol, Interval
>>> Interval(0, 1)
[0, 1]
>>> Interval(0, 1, False, True)
[0, 1)
>>> Interval.Ropen(0, 1)
[0, 1)
>>> Interval.Lopen(0, 1)
(0, 1]
>>> Interval.open(0, 1)
(0, 1)
>>> a = Symbol('a', real=True)
>>> Interval(0, a)
[0, a]
Notes
=====
- Only real end points are supported
- Interval(a, b) with a > b will return the empty set
- Use the evalf() method to turn an Interval into an mpmath
'mpi' interval instance
References
==========
.. [1] http://en.wikipedia.org/wiki/Interval_%28mathematics%29
"""
is_Interval = True
def __new__(cls, start, end, left_open=False, right_open=False):
start = _sympify(start)
end = _sympify(end)
left_open = _sympify(left_open)
right_open = _sympify(right_open)
if not all(isinstance(a, (type(true), type(false)))
for a in [left_open, right_open]):
raise NotImplementedError(
"left_open and right_open can have only true/false values, "
"got %s and %s" % (left_open, right_open))
inftys = [S.Infinity, S.NegativeInfinity]
# Only allow real intervals (use symbols with 'is_real=True').
if not all(i.is_real is not False or i in inftys for i in (start, end)):
raise ValueError("Non-real intervals are not supported")
# evaluate if possible
if (end < start) == True:
return S.EmptySet
elif (end - start).is_negative:
return S.EmptySet
if end == start and (left_open or right_open):
return S.EmptySet
if end == start and not (left_open or right_open):
if start == S.Infinity or start == S.NegativeInfinity:
return S.EmptySet
return FiniteSet(end)
# Make sure infinite interval end points are open.
if start == S.NegativeInfinity:
left_open = true
if end == S.Infinity:
right_open = true
return Basic.__new__(cls, start, end, left_open, right_open)
@property
def start(self):
"""
The left end point of 'self'.
This property takes the same value as the 'inf' property.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).start
0
"""
return self._args[0]
_inf = left = start
@classmethod
def open(cls, a, b):
"""Return an interval including neither boundary."""
return cls(a, b, True, True)
@classmethod
def Lopen(cls, a, b):
"""Return an interval not including the left boundary."""
return cls(a, b, True, False)
@classmethod
def Ropen(cls, a, b):
"""Return an interval not including the right boundary."""
return cls(a, b, False, True)
@property
def end(self):
"""
The right end point of 'self'.
This property takes the same value as the 'sup' property.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1).end
1
"""
return self._args[1]
_sup = right = end
@property
def left_open(self):
"""
True if 'self' is left-open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1, left_open=True).left_open
True
>>> Interval(0, 1, left_open=False).left_open
False
"""
return self._args[2]
@property
def right_open(self):
"""
True if 'self' is right-open.
Examples
========
>>> from sympy import Interval
>>> Interval(0, 1, right_open=True).right_open
True
>>> Interval(0, 1, right_open=False).right_open
False
"""
return self._args[3]
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
# We only know how to intersect with other intervals
if not other.is_Interval:
return None
# handle (-oo, oo)
infty = S.NegativeInfinity, S.Infinity
if self == Interval(*infty):
l, r = self.left, self.right
if l.is_real or l in infty or r.is_real or r in infty:
return other
# We can't intersect [0,3] with [x,6] -- we don't know if x>0 or x<0
if not self._is_comparable(other):
return None
empty = False
if self.start <= other.end and other.start <= self.end:
# Get topology right.
if self.start < other.start:
start = other.start
left_open = other.left_open
elif self.start > other.start:
start = self.start
left_open = self.left_open
else:
start = self.start
left_open = self.left_open or other.left_open
if self.end < other.end:
end = self.end
right_open = self.right_open
elif self.end > other.end:
end = other.end
right_open = other.right_open
else:
end = self.end
right_open = self.right_open or other.right_open
if end - start == 0 and (left_open or right_open):
empty = True
else:
empty = True
if empty:
return S.EmptySet
return Interval(start, end, left_open, right_open)
def _complement(self, other):
if other == S.Reals:
a = Interval(S.NegativeInfinity, self.start,
True, not self.left_open)
b = Interval(self.end, S.Infinity, not self.right_open, True)
return Union(a, b)
if isinstance(other, FiniteSet):
nums = [m for m in other.args if m.is_number]
if nums == []:
return None
return Set._complement(self, other)
def _union(self, other):
"""
This function should only be used internally
See Set._union for docstring
"""
if other.is_UniversalSet:
return S.UniversalSet
if other.is_Interval and self._is_comparable(other):
from sympy.functions.elementary.miscellaneous import Min, Max
# Non-overlapping intervals
end = Min(self.end, other.end)
start = Max(self.start, other.start)
if (end < start or
(end == start and (end not in self and end not in other))):
return None
else:
start = Min(self.start, other.start)
end = Max(self.end, other.end)
left_open = ((self.start != start or self.left_open) and
(other.start != start or other.left_open))
right_open = ((self.end != end or self.right_open) and
(other.end != end or other.right_open))
return Interval(start, end, left_open, right_open)
# If I have open end points and these endpoints are contained in other.
# But only in case, when endpoints are finite. Because
# interval does not contain oo or -oo.
open_left_in_other_and_finite = (self.left_open and
sympify(other.contains(self.start)) is S.true and
self.start.is_finite)
open_right_in_other_and_finite = (self.right_open and
sympify(other.contains(self.end)) is S.true and
self.end.is_finite)
if open_left_in_other_and_finite or open_right_in_other_and_finite:
# Fill in my end points and return
open_left = self.left_open and self.start not in other
open_right = self.right_open and self.end not in other
new_self = Interval(self.start, self.end, open_left, open_right)
return set((new_self, other))
return None
@property
def _boundary(self):
finite_points = [p for p in (self.start, self.end)
if abs(p) != S.Infinity]
return FiniteSet(*finite_points)
def _contains(self, other):
if not isinstance(other, Expr) or (
other is S.Infinity or
other is S.NegativeInfinity or
other is S.NaN or
other is S.ComplexInfinity) or other.is_real is False:
return false
if self.start is S.NegativeInfinity and self.end is S.Infinity:
if not other.is_real is None:
return other.is_real
if self.left_open:
expr = other > self.start
else:
expr = other >= self.start
if self.right_open:
expr = And(expr, other < self.end)
else:
expr = And(expr, other <= self.end)
return _sympify(expr)
def _eval_imageset(self, f):
from sympy.functions.elementary.miscellaneous import Min, Max
from sympy.solvers.solveset import solveset
from sympy.core.function import diff, Lambda
from sympy.series import limit
from sympy.calculus.singularities import singularities
# TODO: handle functions with infinitely many solutions (eg, sin, tan)
# TODO: handle multivariate functions
expr = f.expr
if len(expr.free_symbols) > 1 or len(f.variables) != 1:
return
var = f.variables[0]
if expr.is_Piecewise:
result = S.EmptySet
domain_set = self
for (p_expr, p_cond) in expr.args:
if p_cond is true:
intrvl = domain_set
else:
intrvl = p_cond.as_set()
intrvl = Intersection(domain_set, intrvl)
if p_expr.is_Number:
image = FiniteSet(p_expr)
else:
image = imageset(Lambda(var, p_expr), intrvl)
result = Union(result, image)
# remove the part which has been `imaged`
domain_set = Complement(domain_set, intrvl)
if domain_set.is_EmptySet:
break
return result
if not self.start.is_comparable or not self.end.is_comparable:
return
try:
sing = [x for x in singularities(expr, var)
if x.is_real and x in self]
except NotImplementedError:
return
if self.left_open:
_start = limit(expr, var, self.start, dir="+")
elif self.start not in sing:
_start = f(self.start)
if self.right_open:
_end = limit(expr, var, self.end, dir="-")
elif self.end not in sing:
_end = f(self.end)
if len(sing) == 0:
solns = list(solveset(diff(expr, var), var))
extr = [_start, _end] + [f(x) for x in solns
if x.is_real and x in self]
start, end = Min(*extr), Max(*extr)
left_open, right_open = False, False
if _start <= _end:
# the minimum or maximum value can occur simultaneously
# on both the edge of the interval and in some interior
# point
if start == _start and start not in solns:
left_open = self.left_open
if end == _end and end not in solns:
right_open = self.right_open
else:
if start == _end and start not in solns:
left_open = self.right_open
if end == _start and end not in solns:
right_open = self.left_open
return Interval(start, end, left_open, right_open)
else:
return imageset(f, Interval(self.start, sing[0],
self.left_open, True)) + \
Union(*[imageset(f, Interval(sing[i], sing[i + 1], True, True))
for i in range(0, len(sing) - 1)]) + \
imageset(f, Interval(sing[-1], self.end, True, self.right_open))
@property
def _measure(self):
return self.end - self.start
def to_mpi(self, prec=53):
return mpi(mpf(self.start._eval_evalf(prec)),
mpf(self.end._eval_evalf(prec)))
def _eval_evalf(self, prec):
return Interval(self.left._eval_evalf(prec),
self.right._eval_evalf(prec),
left_open=self.left_open, right_open=self.right_open)
def _is_comparable(self, other):
is_comparable = self.start.is_comparable
is_comparable &= self.end.is_comparable
is_comparable &= other.start.is_comparable
is_comparable &= other.end.is_comparable
return is_comparable
@property
def is_left_unbounded(self):
"""Return ``True`` if the left endpoint is negative infinity. """
return self.left is S.NegativeInfinity or self.left == Float("-inf")
@property
def is_right_unbounded(self):
"""Return ``True`` if the right endpoint is positive infinity. """
return self.right is S.Infinity or self.right == Float("+inf")
def as_relational(self, x):
"""Rewrite an interval in terms of inequalities and logic operators."""
x = sympify(x)
if self.right_open:
right = x < self.end
else:
right = x <= self.end
if self.left_open:
left = self.start < x
else:
left = self.start <= x
return And(left, right)
def _eval_Eq(self, other):
if not other.is_Interval:
if (other.is_Union or other.is_Complement or
other.is_Intersection or other.is_ProductSet):
return
return false
return And(Eq(self.left, other.left),
Eq(self.right, other.right),
self.left_open == other.left_open,
self.right_open == other.right_open)
class Union(Set, EvalfMixin):
"""
Represents a union of sets as a :class:`Set`.
Examples
========
>>> from sympy import Union, Interval
>>> Union(Interval(1, 2), Interval(3, 4))
[1, 2] U [3, 4]
The Union constructor will always try to merge overlapping intervals,
if possible. For example:
>>> Union(Interval(1, 2), Interval(2, 3))
[1, 3]
See Also
========
Intersection
References
==========
.. [1] http://en.wikipedia.org/wiki/Union_%28set_theory%29
"""
is_Union = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_evaluate[0])
# flatten inputs to merge intersections and iterables
args = list(args)
def flatten(arg):
if isinstance(arg, Set):
if arg.is_Union:
return sum(map(flatten, arg.args), [])
else:
return [arg]
if iterable(arg): # and not isinstance(arg, Set) (implicit)
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
args = flatten(args)
# Union of no sets is EmptySet
if len(args) == 0:
return S.EmptySet
# Reduce sets using known rules
if evaluate:
return Union.reduce(args)
args = list(ordered(args, Set._infimum_key))
return Basic.__new__(cls, *args)
@staticmethod
def reduce(args):
"""
Simplify a :class:`Union` using known rules
We first start with global rules like
'Merge all FiniteSets'
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent
"""
# ===== Global Rules =====
# Merge all finite sets
finite_sets = [x for x in args if x.is_FiniteSet]
if len(finite_sets) > 1:
a = (x for set in finite_sets for x in set)
finite_set = FiniteSet(*a)
args = [finite_set] + [x for x in args if not x.is_FiniteSet]
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while(new_args):
for s in args:
new_args = False
for t in args - set((s,)):
new_set = s._union(t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
if not isinstance(new_set, set):
new_set = set((new_set, ))
new_args = (args - set((s, t))).union(new_set)
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Union(args, evaluate=False)
def _complement(self, universe):
# DeMorgan's Law
return Intersection(s.complement(universe) for s in self.args)
@property
def _inf(self):
# We use Min so that sup is meaningful in combination with symbolic
# interval end points.
from sympy.functions.elementary.miscellaneous import Min
return Min(*[set.inf for set in self.args])
@property
def _sup(self):
# We use Max so that sup is meaningful in combination with symbolic
# end points.
from sympy.functions.elementary.miscellaneous import Max
return Max(*[set.sup for set in self.args])
def _contains(self, other):
return Or(*[set.contains(other) for set in self.args])
@property
def _measure(self):
# Measure of a union is the sum of the measures of the sets minus
# the sum of their pairwise intersections plus the sum of their
# triple-wise intersections minus ... etc...
# Sets is a collection of intersections and a set of elementary
# sets which made up those intersections (called "sos" for set of sets)
# An example element might of this list might be:
# ( {A,B,C}, A.intersect(B).intersect(C) )
# Start with just elementary sets ( ({A}, A), ({B}, B), ... )
# Then get and subtract ( ({A,B}, (A int B), ... ) while non-zero
sets = [(FiniteSet(s), s) for s in self.args]
measure = 0
parity = 1
while sets:
# Add up the measure of these sets and add or subtract it to total
measure += parity * sum(inter.measure for sos, inter in sets)
# For each intersection in sets, compute the intersection with every
# other set not already part of the intersection.
sets = ((sos + FiniteSet(newset), newset.intersect(intersection))
for sos, intersection in sets for newset in self.args
if newset not in sos)
# Clear out sets with no measure
sets = [(sos, inter) for sos, inter in sets if inter.measure != 0]
# Clear out duplicates
sos_list = []
sets_list = []
for set in sets:
if set[0] in sos_list:
continue
else:
sos_list.append(set[0])
sets_list.append(set)
sets = sets_list
# Flip Parity - next time subtract/add if we added/subtracted here
parity *= -1
return measure
@property
def _boundary(self):
def boundary_of_set(i):
""" The boundary of set i minus interior of all other sets """
b = self.args[i].boundary
for j, a in enumerate(self.args):
if j != i:
b = b - a.interior
return b
return Union(map(boundary_of_set, range(len(self.args))))
def _eval_imageset(self, f):
return Union(imageset(f, arg) for arg in self.args)
def as_relational(self, symbol):
"""Rewrite a Union in terms of equalities and logic operators. """
return Or(*[set.as_relational(symbol) for set in self.args])
@property
def is_iterable(self):
return all(arg.is_iterable for arg in self.args)
def _eval_evalf(self, prec):
try:
return Union(set._eval_evalf(prec) for set in self.args)
except Exception:
raise TypeError("Not all sets are evalf-able")
def __iter__(self):
import itertools
# roundrobin recipe taken from itertools documentation:
# https://docs.python.org/2/library/itertools.html#recipes
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
pending = len(iterables)
if PY3:
nexts = itertools.cycle(iter(it).__next__ for it in iterables)
else:
nexts = itertools.cycle(iter(it).next for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = itertools.cycle(itertools.islice(nexts, pending))
if all(set.is_iterable for set in self.args):
return roundrobin(*(iter(arg) for arg in self.args))
else:
raise TypeError("Not all constituent sets are iterable")
class Intersection(Set):
"""
Represents an intersection of sets as a :class:`Set`.
Examples
========
>>> from sympy import Intersection, Interval
>>> Intersection(Interval(1, 3), Interval(2, 4))
[2, 3]
We often use the .intersect method
>>> Interval(1,3).intersect(Interval(2,4))
[2, 3]
See Also
========
Union
References
==========
.. [1] http://en.wikipedia.org/wiki/Intersection_%28set_theory%29
"""
is_Intersection = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_evaluate[0])
# flatten inputs to merge intersections and iterables
args = list(args)
def flatten(arg):
if isinstance(arg, Set):
if arg.is_Intersection:
return sum(map(flatten, arg.args), [])
else:
return [arg]
if iterable(arg): # and not isinstance(arg, Set) (implicit)
return sum(map(flatten, arg), [])
raise TypeError("Input must be Sets or iterables of Sets")
args = flatten(args)
if len(args) == 0:
return S.EmptySet
# args can't be ordered for Partition see issue #9608
if 'Partition' not in [type(a).__name__ for a in args]:
args = list(ordered(args, Set._infimum_key))
# Reduce sets using known rules
if evaluate:
return Intersection.reduce(args)
return Basic.__new__(cls, *args)
@property
def is_iterable(self):
return any(arg.is_iterable for arg in self.args)
@property
def _inf(self):
raise NotImplementedError()
@property
def _sup(self):
raise NotImplementedError()
def _eval_imageset(self, f):
return Intersection(imageset(f, arg) for arg in self.args)
def _contains(self, other):
return And(*[set.contains(other) for set in self.args])
def __iter__(self):
no_iter = True
for s in self.args:
if s.is_iterable:
no_iter = False
other_sets = set(self.args) - set((s,))
other = Intersection(other_sets, evaluate=False)
for x in s:
c = sympify(other.contains(x))
if c is S.true:
yield x
elif c is S.false:
pass
else:
yield c
if no_iter:
raise ValueError("None of the constituent sets are iterable")
@staticmethod
def _handle_finite_sets(args):
from sympy.core.logic import fuzzy_and, fuzzy_bool
from sympy.core.compatibility import zip_longest
from sympy.utilities.iterables import sift
sifted = sift(args, lambda x: x.is_FiniteSet)
fs_args = sifted.pop(True, [])
if not fs_args:
return
s = fs_args[0]
fs_args = fs_args[1:]
other = sifted.pop(False, [])
res = []
unk = []
for x in s:
c = fuzzy_and(fuzzy_bool(o.contains(x))
for o in fs_args + other)
if c:
res.append(x)
elif c is None:
unk.append(x)
else:
pass # drop arg
res = FiniteSet(
*res, evaluate=False) if res else S.EmptySet
if unk:
symbolic_s_list = [x for x in s if x.has(Symbol)]
non_symbolic_s = s - FiniteSet(
*symbolic_s_list, evaluate=False)
while fs_args:
v = fs_args.pop()
if all(i == j for i, j in zip_longest(
symbolic_s_list,
(x for x in v if x.has(Symbol)))):
# all the symbolic elements of `v` are the same
# as in `s` so remove the non-symbol containing
# expressions from `unk`, since they cannot be
# contained
for x in non_symbolic_s:
if x in unk:
unk.remove(x)
else:
# if only a subset of elements in `s` are
# contained in `v` then remove them from `v`
# and add this as a new arg
contained = [x for x in symbolic_s_list
if sympify(v.contains(x)) is S.true]
if contained != symbolic_s_list:
other.append(
v - FiniteSet(
*contained, evaluate=False))
else:
pass # for coverage
other_sets = Intersection(*other)
if not other_sets:
return S.EmptySet # b/c we use evaluate=False below
res += Intersection(
FiniteSet(*unk),
other_sets, evaluate=False)
return res
@staticmethod
def reduce(args):
"""
Return a simplified intersection by applying rules.
We first start with global rules like
'if any empty sets, return empty set' and 'distribute unions'.
Then we iterate through all pairs and ask the constituent sets if they
can simplify themselves with any other constituent
"""
from sympy.simplify.simplify import clear_coefficients
# ===== Global Rules =====
# If any EmptySets return EmptySet
if any(s.is_EmptySet for s in args):
return S.EmptySet
# Handle Finite sets
rv = Intersection._handle_finite_sets(args)
if rv is not None:
return rv
# If any of the sets are unions, return a Union of Intersections
for s in args:
if s.is_Union:
other_sets = set(args) - set((s,))
if len(other_sets) > 0:
other = Intersection(other_sets)
return Union(Intersection(arg, other) for arg in s.args)
else:
return Union(arg for arg in s.args)
for s in args:
if s.is_Complement:
args.remove(s)
other_sets = args + [s.args[0]]
return Complement(Intersection(*other_sets), s.args[1])
# At this stage we are guaranteed not to have any
# EmptySets, FiniteSets, or Unions in the intersection
# ===== Pair-wise Rules =====
# Here we depend on rules built into the constituent sets
args = set(args)
new_args = True
while(new_args):
for s in args:
new_args = False
for t in args - set((s,)):
new_set = s._intersect(t)
# This returns None if s does not know how to intersect
# with t. Returns the newly intersected set otherwise
if new_set is not None:
new_args = (args - set((s, t))).union(set((new_set, )))
break
if new_args:
args = new_args
break
if len(args) == 1:
return args.pop()
else:
return Intersection(args, evaluate=False)
def as_relational(self, symbol):
"""Rewrite an Intersection in terms of equalities and logic operators"""
return And(*[set.as_relational(symbol) for set in self.args])
class Complement(Set, EvalfMixin):
"""Represents the set difference or relative complement of a set with
another set.
`A - B = \{x \in A| x \\notin B\}`
Examples
========
>>> from sympy import Complement, FiniteSet
>>> Complement(FiniteSet(0, 1, 2), FiniteSet(1))
{0, 2}
See Also
=========
Intersection, Union
References
==========
.. [1] http://mathworld.wolfram.com/ComplementSet.html
"""
is_Complement = True
def __new__(cls, a, b, evaluate=True):
if evaluate:
return Complement.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
"""
Simplify a :class:`Complement`.
"""
if B == S.UniversalSet or A.is_subset(B):
return EmptySet()
if isinstance(B, Union):
return Intersection(s.complement(A) for s in B.args)
result = B._complement(A)
if result != None:
return result
else:
return Complement(A, B, evaluate=False)
def _contains(self, other):
A = self.args[0]
B = self.args[1]
return And(A.contains(other), Not(B.contains(other)))
class EmptySet(with_metaclass(Singleton, Set)):
"""
Represents the empty set. The empty set is available as a singleton
as S.EmptySet.
Examples
========
>>> from sympy import S, Interval
>>> S.EmptySet
EmptySet()
>>> Interval(1, 2).intersect(S.EmptySet)
EmptySet()
See Also
========
UniversalSet
References
==========
.. [1] http://en.wikipedia.org/wiki/Empty_set
"""
is_EmptySet = True
is_FiniteSet = True
def _intersect(self, other):
return S.EmptySet
@property
def _measure(self):
return 0
def _contains(self, other):
return false
def as_relational(self, symbol):
return false
def __len__(self):
return 0
def _union(self, other):
return other
def __iter__(self):
return iter([])
def _eval_imageset(self, f):
return self
def _eval_powerset(self):
return FiniteSet(self)
@property
def _boundary(self):
return self
def _complement(self, other):
return other
def _symmetric_difference(self, other):
return other
class UniversalSet(with_metaclass(Singleton, Set)):
"""
Represents the set of all things.
The universal set is available as a singleton as S.UniversalSet
Examples
========
>>> from sympy import S, Interval
>>> S.UniversalSet
UniversalSet()
>>> Interval(1, 2).intersect(S.UniversalSet)
[1, 2]
See Also
========
EmptySet
References
==========
.. [1] http://en.wikipedia.org/wiki/Universal_set
"""
is_UniversalSet = True
def _intersect(self, other):
return other
def _complement(self, other):
return S.EmptySet
def _symmetric_difference(self, other):
return other
@property
def _measure(self):
return S.Infinity
def _contains(self, other):
return true
def as_relational(self, symbol):
return true
def _union(self, other):
return self
@property
def _boundary(self):
return EmptySet()
class FiniteSet(Set, EvalfMixin):
"""
Represents a finite set of discrete numbers
Examples
========
>>> from sympy import FiniteSet
>>> FiniteSet(1, 2, 3, 4)
{1, 2, 3, 4}
>>> 3 in FiniteSet(1, 2, 3, 4)
True
>>> members = [1, 2, 3, 4]
>>> FiniteSet(*members)
{1, 2, 3, 4}
References
==========
.. [1] http://en.wikipedia.org/wiki/Finite_set
"""
is_FiniteSet = True
is_iterable = True
def __new__(cls, *args, **kwargs):
evaluate = kwargs.get('evaluate', global_evaluate[0])
if evaluate:
args = list(map(sympify, args))
if len(args) == 0:
return EmptySet()
else:
args = list(map(sympify, args))
args = list(ordered(frozenset(tuple(args)), Set._infimum_key))
obj = Basic.__new__(cls, *args)
obj._elements = frozenset(args)
return obj
def _eval_Eq(self, other):
if not other.is_FiniteSet:
if (other.is_Union or other.is_Complement or
other.is_Intersection or other.is_ProductSet):
return
return false
if len(self) != len(other):
return false
return And(*(Eq(x, y) for x, y in zip(self.args, other.args)))
def __iter__(self):
return iter(self.args)
def _intersect(self, other):
"""
This function should only be used internally
See Set._intersect for docstring
"""
if isinstance(other, self.__class__):
return self.__class__(*(self._elements & other._elements))
return self.__class__(*[el for el in self if el in other])
def _complement(self, other):
if isinstance(other, Interval):
nums = sorted(m for m in self.args if m.is_number)
if other == S.Reals and nums != []:
syms = [m for m in self.args if m.is_Symbol]
# Reals cannot contain elements other than numbers and symbols.
intervals = [] # Build up a list of intervals between the elements
intervals += [Interval(S.NegativeInfinity, nums[0], True, True)]
for a, b in zip(nums[:-1], nums[1:]):
intervals.append(Interval(a, b, True, True)) # both open
intervals.append(Interval(nums[-1], S.Infinity, True, True))
if syms != []:
return Complement(Union(intervals, evaluate=False),
FiniteSet(*syms), evaluate=False)
else:
return Union(intervals, evaluate=False)
elif nums == []:
return None
elif isinstance(other, FiniteSet):
unk = []
for i in self:
c = sympify(other.contains(i))
if c is not S.true and c is not S.false:
unk.append(i)
unk = FiniteSet(*unk)
if unk == self:
return
not_true = []
for i in other:
c = sympify(self.contains(i))
if c is not S.true:
not_true.append(i)
return Complement(FiniteSet(*not_true), unk)
return Set._complement(self, other)
def _union(self, other):
"""
This function should only be used internally
See Set._union for docstring
"""
if other.is_FiniteSet:
return FiniteSet(*(self._elements | other._elements))
# If other set contains one of my elements, remove it from myself
if any(sympify(other.contains(x)) is S.true for x in self):
return set((
FiniteSet(*[x for x in self
if other.contains(x) != True]), other))
return None
def _contains(self, other):
"""
Tests whether an element, other, is in the set.
Relies on Python's set class. This tests for object equality
All inputs are sympified
Examples
========
>>> from sympy import FiniteSet
>>> 1 in FiniteSet(1, 2)
True
>>> 5 in FiniteSet(1, 2)
False
"""
r = false
for e in self._elements:
t = Eq(e, other, evaluate=True)
if isinstance(t, Eq):
t = t.simplify()
if t == true:
return t
elif t != false:
r = None
return r
def _eval_imageset(self, f):
return FiniteSet(*map(f, self))
@property
def _boundary(self):
return self
@property
def _inf(self):
from sympy.functions.elementary.miscellaneous import Min
return Min(*self)
@property
def _sup(self):
from sympy.functions.elementary.miscellaneous import Max
return Max(*self)
@property
def measure(self):
return 0
def __len__(self):
return len(self.args)
def as_relational(self, symbol):
"""Rewrite a FiniteSet in terms of equalities and logic operators. """
from sympy.core.relational import Eq
return Or(*[Eq(symbol, elem) for elem in self])
def compare(self, other):
return (hash(self) - hash(other))
def _eval_evalf(self, prec):
return FiniteSet(*[elem._eval_evalf(prec) for elem in self])
def _hashable_content(self):
return (self._elements,)
@property
def _sorted_args(self):
return tuple(ordered(self.args, Set._infimum_key))
def _eval_powerset(self):
return self.func(*[self.func(*s) for s in subsets(self.args)])
def __ge__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return other.is_subset(self)
def __gt__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_proper_superset(other)
def __le__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_subset(other)
def __lt__(self, other):
if not isinstance(other, Set):
raise TypeError("Invalid comparison of set with %s" % func_name(other))
return self.is_proper_subset(other)
converter[set] = lambda x: FiniteSet(*x)
converter[frozenset] = lambda x: FiniteSet(*x)
class SymmetricDifference(Set):
"""Represents the set of elements which are in either of the
sets and not in their intersection.
Examples
========
>>> from sympy import SymmetricDifference, FiniteSet
>>> SymmetricDifference(FiniteSet(1, 2, 3), FiniteSet(3, 4, 5))
{1, 2, 4, 5}
See Also
========
Complement, Union
References
==========
.. [1] http://en.wikipedia.org/wiki/Symmetric_difference
"""
is_SymmetricDifference = True
def __new__(cls, a, b, evaluate=True):
if evaluate:
return SymmetricDifference.reduce(a, b)
return Basic.__new__(cls, a, b)
@staticmethod
def reduce(A, B):
result = B._symmetric_difference(A)
if result is not None:
return result
else:
return SymmetricDifference(A, B, evaluate=False)
def imageset(*args):
r"""
Return an image of the set under transformation ``f``.
If this function can't compute the image, it returns an
unevaluated ImageSet object.
.. math::
{ f(x) | x \in self }
Examples
========
>>> from sympy import S, Interval, Symbol, imageset, sin, Lambda
>>> from sympy.abc import x, y
>>> imageset(x, 2*x, Interval(0, 2))
[0, 4]
>>> imageset(lambda x: 2*x, Interval(0, 2))
[0, 4]
>>> imageset(Lambda(x, sin(x)), Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), [-2, 1])
>>> imageset(sin, Interval(-2, 1))
ImageSet(Lambda(x, sin(x)), [-2, 1])
>>> imageset(lambda y: x + y, Interval(-2, 1))
ImageSet(Lambda(_x, _x + x), [-2, 1])
Expressions applied to the set of Integers are simplified
to show as few negatives as possible and linear expressions
are converted to a canonical form. If this is not desirable
then the unevaluated ImageSet should be used.
>>> imageset(x, -2*x + 5, S.Integers)
ImageSet(Lambda(x, 2*x + 1), Integers())
See Also
========
sympy.sets.fancysets.ImageSet
"""
from sympy.core import Lambda
from sympy.sets.fancysets import ImageSet
from sympy.geometry.util import _uniquely_named_symbol
if len(args) not in (2, 3):
raise ValueError('imageset expects 2 or 3 args, got: %s' % len(args))
set = args[-1]
if not isinstance(set, Set):
name = func_name(set)
raise ValueError(
'last argument should be a set, not %s' % name)
if len(args) == 3:
f = Lambda(*args[:2])
elif len(args) == 2:
f = args[0]
if isinstance(f, Lambda):
pass
elif (
isinstance(f, FunctionClass) # like cos
or func_name(f) == '<lambda>'
):
var = _uniquely_named_symbol(Symbol('x'), f(Dummy()))
expr = f(var)
f = Lambda(var, expr)
else:
raise TypeError(filldedent('''
expecting lambda, Lambda, or FunctionClass, not \'%s\'''' %
func_name(f)))
r = set._eval_imageset(f)
if isinstance(r, ImageSet):
f, set = r.args
if f.variables[0] == f.expr:
return set
if isinstance(set, ImageSet):
if len(set.lamda.variables) == 1 and len(f.variables) == 1:
return imageset(Lambda(set.lamda.variables[0],
f.expr.subs(f.variables[0], set.lamda.expr)),
set.base_set)
if r is not None:
return r
return ImageSet(f, set)
| antepsis/anteplahmacun | sympy/sets/sets.py | Python | bsd-3-clause | 61,756 |
if __name__ == "__main__":
with open("input.txt") as f:
res = 0
data = f.read()
flag_basement_first = False
for i, d in enumerate(data):
if d == '(':
res += 1
if d == ')':
res -= 1
if res == -1 and flag_basement_first is False:
print '2. What is the position of the character that causes Santa to first enter the basement? %d' % (i+1)
flag_basement_first = True
print '1. To what floor do the instructions take Santa? %d' % res
| svagionitis/puzzles | adventofcode.com/2015/1/sol1.py | Python | mit | 581 |
# -*- coding: utf-8 -*-
from threading import Thread
from django.http import Http404, HttpResponseServerError, HttpResponse
from django.shortcuts import render_to_response
from django.utils import simplejson
def render_page(template):
'''
Render data to template with given file name.
'''
def _render(func):
def _render_data(request, *args, **kwargs):
data = func(request, *args, **kwargs)
if data['status_code'] == 200:
return render_to_response(template, data)
elif data['status_code'] == 404:
raise Http404
elif data['status_code'] == 503:
raise HttpResponseServerError
return _render_data
return _render
def require_params_4_ajax(params, method = 'get'):
'''
Make sure given params exist in request.
'''
method = method.upper()
def _require(func):
def require(request, *args, **kwargs):
for param in params:
if (param not in getattr(request, method))\
or getattr(request, method)[param].strip() == '':
error = {
'status_code': 400,
'success': False,
'msg': '%s请求缺少参数,请填写完整的表单' % method
}
return HttpResponse(
simplejson.dumps(error),
mimetype = 'text/json'
)
return HttpResponse(
simplejson.dumps(
func(request, *args, **kwargs)
),
mimetype = 'text/json'
)
return require
return _require
| JokerQyou/Quantic | nook/decorators.py | Python | mit | 1,795 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 8 11:47:19 2016
@author: ericgrimson
"""
ans = 0
neg_flag = False
x = int(input("Enter an integer: "))
if x < 0:
neg_flag = True
while ans**2 < x:
ans = ans + 1
if ans**2 == x:
print("Square root of", x, "is", ans)
else:
print(x, "is not a perfect square")
if neg_flag:
print("Just checking... did you mean", -x, "?")
| mkhuthir/learnPython | edX_mitX_6_00_1x/L3/sqrtExample.py | Python | mit | 396 |
"""Test event player."""
from collections import namedtuple
from mpf.tests.MpfTestCase import MpfTestCase
class TestEventPlayer(MpfTestCase):
def get_config_file(self):
return 'test_event_player.yaml'
def get_machine_path(self):
return 'tests/machine_files/event_players/'
def test_load_and_play(self):
self.mock_event("event1")
self.mock_event("event2")
self.mock_event("event3")
self.post_event("play_express_single")
self.assertEqual(1, self._events['event1'])
self.assertEqual(0, self._events['event2'])
self.assertEqual(0, self._events['event3'])
self.mock_event("event1")
self.mock_event("event2")
self.mock_event("event3")
self.post_event("play_express_multiple")
self.assertEqual(1, self._events['event1'])
self.assertEqual(1, self._events['event2'])
self.assertEqual(0, self._events['event3'])
self.mock_event("event1")
self.mock_event("event2")
self.mock_event("event3")
self.post_event("play_single_list")
self.assertEqual(1, self._events['event1'])
self.assertEqual(0, self._events['event2'])
self.assertEqual(0, self._events['event3'])
self.mock_event("event1")
self.mock_event("event2")
self.mock_event("event3")
self.post_event("play_single_string")
self.assertEqual(1, self._events['event1'])
self.assertEqual(0, self._events['event2'])
self.assertEqual(0, self._events['event3'])
self.mock_event("event1")
self.mock_event("event2")
self.mock_event("event3")
self.post_event("play_multiple_list")
self.assertEqual(1, self._events['event1'])
self.assertEqual(1, self._events['event2'])
self.assertEqual(1, self._events['event3'])
self.mock_event("event1")
self.mock_event("event2")
self.mock_event("event3")
self.post_event("play_multiple_string")
self.assertEqual(1, self._events['event1'])
self.assertEqual(1, self._events['event2'])
self.assertEqual(1, self._events['event3'])
self.mock_event("event1")
self.mock_event("event2")
self.mock_event("event3")
self.post_event("play_multiple_args")
self.assertEqual(1, self._events['event1'])
self.assertEqual({"a": "b", "priority": 0}, self._last_event_kwargs['event1'])
self.assertEqual(1, self._events['event2'])
self.assertEqual({"priority": 0}, self._last_event_kwargs['event2'])
self.assertEqual(1, self._events['event3'])
self.assertEqual({"a": 1, "b": 2, "priority": 0}, self._last_event_kwargs['event3'])
self.mock_event("event1")
self.post_event("play_multiple_args2")
self.assertEqual({"a": "b", "c": "d", "priority": 0}, self._last_event_kwargs['event1'])
self.mock_event("event1")
self.mock_event("event2")
self.mock_event("event3")
self.machine.shows['test_event_show'].play(loops=0)
self.advance_time_and_run()
self.assertEqual(1, self._events['event1'])
self.assertEqual(1, self._events['event2'])
self.assertEqual(1, self._events['event3'])
def test_condition_and_priority(self):
self.mock_event("condition_ok")
self.mock_event("condition_ok2")
self.mock_event("priority_ok")
self.post_event("test_conditional")
self.assertEventNotCalled("condition_ok")
self.assertEventNotCalled("condition_ok2")
self.assertEventCalled("priority_ok")
arg_obj = namedtuple("Arg", ["abc"])
arg = arg_obj(1)
self.post_event_with_params("test_conditional", arg=arg)
self.assertEventCalled("condition_ok")
self.assertEventCalled("condition_ok2")
def test_handler_condition(self):
# test neither condition passing
self.mock_event("event_always")
self.mock_event("event_if_modeactive")
self.mock_event("event_if_modestopping")
self.assertEqual(0, self._events["event_always"])
self.assertEqual(0, self._events["event_if_modeactive"])
self.assertEqual(0, self._events["event_if_modestopping"])
self.post_event("test_conditional_handlers")
self.assertEqual(1, self._events["event_always"])
self.assertEqual(0, self._events["event_if_modeactive"])
self.assertEqual(0, self._events["event_if_modestopping"])
# test one condition passing
self.mock_event("event_always")
self.mock_event("event_if_modeactive")
self.mock_event("event_if_modestopping")
self.machine.modes["mode1"].start()
self.advance_time_and_run()
self.assertEqual(0, self._events["event_always"])
self.assertEqual(0, self._events["event_if_modeactive"])
self.assertEqual(0, self._events["event_if_modestopping"])
self.post_event("test_conditional_handlers")
self.assertEqual(1, self._events["event_always"])
self.assertEqual(1, self._events["event_if_modeactive"])
self.assertEqual(0, self._events["event_if_modestopping"])
# test both conditions passing
self.mock_event("event_always")
self.mock_event("event_if_modeactive")
self.mock_event("event_if_modestopping")
self.machine.modes["mode1"].stop()
self.assertEqual(0, self._events["event_always"])
self.assertEqual(0, self._events["event_if_modeactive"])
self.assertEqual(0, self._events["event_if_modestopping"])
self.post_event("test_conditional_handlers")
self.assertEqual(1, self._events["event_always"])
self.assertEqual(1, self._events["event_if_modeactive"])
self.assertEqual(1, self._events["event_if_modestopping"])
def test_event_time_delays(self):
self.mock_event('td1')
self.mock_event('td2')
self.post_event('test_time_delay1')
self.advance_time_and_run(1)
self.assertEventNotCalled('td1')
self.advance_time_and_run(1)
self.assertEventCalled('td1')
self.post_event('test_time_delay2')
self.advance_time_and_run(1)
self.assertEventNotCalled('td2')
self.advance_time_and_run(1)
self.assertEventCalled('td2')
def test_mode_condition(self):
self.mock_event('mode1_active')
self.mock_event('mode1_not_active')
self.assertFalse(self.machine.modes["mode1"].active)
self.post_event('test_conditional_mode')
self.assertEventNotCalled('mode1_active')
self.assertEventCalled('mode1_not_active')
self.mock_event('mode1_active')
self.mock_event('mode1_not_active')
self.machine.modes["mode1"].start()
self.advance_time_and_run()
self.post_event('test_conditional_mode')
self.assertTrue(self.machine.modes["mode1"].active)
self.assertEventCalled('mode1_active')
self.assertEventNotCalled('mode1_not_active')
def test_event_placeholder(self):
self.mock_event('my_event_None_123')
self.mock_event('my_event_hello_world_123')
self.post_event("play_placeholder_event")
self.assertEventCalled("my_event_None_123")
self.mock_event('my_event_None_123')
self.assertEventNotCalled("my_event_hello_world_123")
self.machine.variables.set_machine_var("test", "hello_world")
self.post_event("play_placeholder_event")
self.assertEventNotCalled("my_event_None_123")
self.assertEventCalled("my_event_hello_world_123")
def test_arg_placeholder(self):
self.mock_event('loaded_event_int')
self.mock_event('loaded_event_float')
self.mock_event('loaded_event_bool')
self.mock_event('loaded_event_string')
self.mock_event('loaded_event_notype')
self.machine.variables.set_machine_var("testint", 1234)
self.machine.variables.set_machine_var("testfloat", 12.34)
self.machine.variables.set_machine_var("testbool", True)
self.machine.variables.set_machine_var("teststring", "foobar")
self.machine.variables.set_machine_var("testnotype", "barfoo")
self.post_event("play_placeholder_args")
self.assertEqual({"foo": 1234, "priority": 0}, self._last_event_kwargs['loaded_event_int'])
self.assertEqual({"foo": 12.34, "priority": 0}, self._last_event_kwargs['loaded_event_float'])
self.assertEqual({"foo": True, "priority": 0}, self._last_event_kwargs['loaded_event_bool'])
self.assertEqual({"foo": "foobar", "priority": 0}, self._last_event_kwargs['loaded_event_string'])
self.assertEqual({"foo": "barfoo", "priority": 0}, self._last_event_kwargs['loaded_event_notype'])
def test_kwarg_placeholder(self):
self.mock_event('event_always')
self.mock_event('event_foobar')
self.mock_event('event_(name)')
self.post_event_with_params("play_event_with_kwargs", name="foobar")
self.assertEventCalled("event_always")
self.assertEventCalled("event_foobar")
self.assertEventNotCalled("event_(name)")
def test_value_kwarg_evaluation(self):
self.mock_event('event_with_param_kwargs')
self.post_event_with_params("play_event_with_param_kwargs", result="bar", initial=6)
self.assertEqual({"foo": "bar", "maths": 30, "priority": 0}, self._last_event_kwargs["event_with_param_kwargs"])
| missionpinball/mpf | mpf/tests/test_EventPlayer.py | Python | mit | 9,499 |
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs oldisim.
oldisim is a framework to support benchmarks that emulate Online Data-Intensive
(OLDI) workloads, such as web search and social networking. oldisim includes
sample workloads built on top of this framework.
With its default config, oldisim models an example search topology. A user query
is first processed by a front-end server, which then eventually fans out the
query to a large number of leaf nodes. The latency is measured at the root of
the tree, and often increases with the increase of fan-out. oldisim reports a
scaling efficiency for a given topology. The scaling efficiency is defined
as queries per second (QPS) at the current fan-out normalized to QPS at fan-out
1 with ISO root latency.
Sample command line:
./pkb.py --benchmarks=oldisim --project='YOUR_PROJECT' --oldisim_num_leaves=4
--oldisim_fanout=1,2,3,4 --oldisim_latency_target=40
--oldisim_latency_metric=avg
The above command will build a tree with one root node and four leaf nodes. The
average latency target is 40ms. The root node will vary the fanout from 1 to 4
and measure the scaling efficiency.
"""
import logging
import re
import time
from perfkitbenchmarker import configs
from perfkitbenchmarker import flags
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import oldisim_dependencies
FLAGS = flags.FLAGS
flags.DEFINE_integer('oldisim_num_leaves', 4, 'number of leaf nodes',
lower_bound=1, upper_bound=64)
flags.DEFINE_list('oldisim_fanout', [],
'a list of fanouts to be tested. '
'a root can connect to a subset of leaf nodes (fanout). '
'the value of fanout has to be smaller than num_leaves.')
flags.DEFINE_enum('oldisim_latency_metric', 'avg',
['avg', '50p', '90p', '95p', '99p', '99.9p'],
'Allowable metrics for end-to-end latency')
flags.DEFINE_float('oldisim_latency_target', '30', 'latency target in ms')
NUM_DRIVERS = 1
NUM_ROOTS = 1
BENCHMARK_NAME = 'oldisim'
BENCHMARK_CONFIG = """
oldisim:
description: >
Run oldisim. Specify the number of leaf
nodes with --oldisim_num_leaves
vm_groups:
default:
vm_spec: *default_single_core
"""
def GetConfig(user_config):
"""Decide number of vms needed to run oldisim."""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
config['vm_groups']['default']['vm_count'] = (FLAGS.oldisim_num_leaves
+ NUM_DRIVERS + NUM_ROOTS)
return config
def InstallAndBuild(vm):
"""Install and build oldisim on the target vm.
Args:
vm: A vm instance that runs oldisim.
"""
logging.info('prepare oldisim on %s', vm)
vm.Install('oldisim_dependencies')
def Prepare(benchmark_spec):
"""Install and build oldisim on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
leaf_vms = [vm for vm_idx, vm in enumerate(vms)
if vm_idx >= (NUM_DRIVERS + NUM_ROOTS)]
if vms:
vm_util.RunThreaded(InstallAndBuild, vms)
# Launch job on the leaf nodes.
leaf_server_bin = oldisim_dependencies.BinaryPath('LeafNode')
for vm in leaf_vms:
leaf_cmd = '%s --threads=%s' % (leaf_server_bin, vm.num_cpus)
vm.RemoteCommand('%s &> /dev/null &' % leaf_cmd)
def SetupRoot(root_vm, leaf_vms):
"""Connect a root node to a list of leaf nodes.
Args:
root_vm: A root vm instance.
leaf_vms: A list of leaf vm instances.
"""
fanout_args = ' '.join(['--leaf=%s' % i.internal_ip
for i in leaf_vms])
root_server_bin = oldisim_dependencies.BinaryPath('ParentNode')
root_cmd = '%s --threads=%s %s' % (root_server_bin, root_vm.num_cpus,
fanout_args)
logging.info('Root cmdline: %s', root_cmd)
root_vm.RemoteCommand('%s &> /dev/null &' % root_cmd)
def ParseOutput(oldisim_output):
"""Parses the output from oldisim.
Args:
oldisim_output: A string containing the text of oldisim output.
Returns:
A tuple of (peak_qps, peak_lat, target_qps, target_lat).
"""
re_peak = re.compile(r'peak qps = (?P<qps>\S+), latency = (?P<lat>\S+)')
re_target = re.compile(r'measured_qps = (?P<qps>\S+), latency = (?P<lat>\S+)')
for line in oldisim_output.splitlines():
match = re.search(re_peak, line)
if match:
peak_qps = float(match.group('qps'))
peak_lat = float(match.group('lat'))
target_qps = float(peak_qps)
target_lat = float(peak_lat)
continue
match = re.search(re_target, line)
if match:
target_qps = float(match.group('qps'))
target_lat = float(match.group('lat'))
return peak_qps, peak_lat, target_qps, target_lat
def RunLoadTest(benchmark_spec, fanout):
"""Run Loadtest for a given topology.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
fanout: Request is first processed by a root node, which then
fans out to a subset of leaf nodes.
Returns:
A tuple of (peak_qps, peak_lat, target_qps, target_lat).
"""
assert fanout <= FLAGS.oldisim_num_leaves, (
'The number of leaf nodes a root node connected to is defined by the '
'flag fanout. Its current value %s is bigger than the total number of '
'leaves %s.' % (fanout, FLAGS.oldisim_num_leaves))
vms = benchmark_spec.vms
driver_vms = []
root_vms = []
leaf_vms = []
for vm_index, vm in enumerate(vms):
if vm_index < NUM_DRIVERS:
driver_vms.append(vm)
elif vm_index < (NUM_DRIVERS + NUM_ROOTS):
root_vms.append(vm)
else:
leaf_vms.append(vm)
leaf_vms = leaf_vms[:fanout]
for root_vm in root_vms:
SetupRoot(root_vm, leaf_vms)
driver_vm = driver_vms[0]
driver_binary = oldisim_dependencies.BinaryPath('DriverNode')
launch_script = oldisim_dependencies.Path('workloads/search/search_qps.sh')
driver_args = ' '.join(['--server=%s' % i.internal_ip
for i in root_vms])
# Make sure server is up.
time.sleep(5)
driver_cmd = '%s -s %s:%s -t 30 -- %s %s --threads=%s --depth=16' % (
launch_script, FLAGS.oldisim_latency_metric, FLAGS.oldisim_latency_target,
driver_binary, driver_args, driver_vm.num_cpus)
logging.info('Driver cmdline: %s', driver_cmd)
stdout, _ = driver_vm.RemoteCommand(driver_cmd, should_log=True)
return ParseOutput(stdout)
def Run(benchmark_spec):
"""Run oldisim on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
results = []
qps_dict = dict()
vms = benchmark_spec.vms
vm = vms[0]
fanout_list = set([1, FLAGS.oldisim_num_leaves])
for fanout in map(int, FLAGS.oldisim_fanout):
if fanout > 1 and fanout < FLAGS.oldisim_num_leaves:
fanout_list.add(fanout)
metadata = {'num_cpus': vm.num_cpus}
metadata.update(vm.GetMachineTypeDict())
for fanout in sorted(fanout_list):
qps = RunLoadTest(benchmark_spec, fanout)[2]
qps_dict[fanout] = qps
if fanout == 1:
base_qps = qps
name = 'Scaling efficiency of %s leaves' % fanout
scaling_efficiency = round(min(qps_dict[fanout] / base_qps, 1), 2)
results.append(sample.Sample(name, scaling_efficiency, '', metadata))
return results
def Cleanup(benchmark_spec): # pylint: disable=unused-argument
"""Cleanup oldisim on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
for vm_index, vm in enumerate(vms):
if vm_index >= NUM_DRIVERS and vm_index < (NUM_DRIVERS + NUM_ROOTS):
vm.RemoteCommand('sudo pkill ParentNode')
elif vm_index >= (NUM_DRIVERS + NUM_ROOTS):
vm.RemoteCommand('sudo pkill LeafNode')
| meteorfox/PerfKitBenchmarker | perfkitbenchmarker/linux_benchmarks/oldisim_benchmark.py | Python | apache-2.0 | 8,628 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the `AutotuneBuffers` rewrite."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.compat import compat
from tensorflow.python.data.experimental.ops import testing
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.platform import test
class InjectPrefetchTest(test_base.DatasetTestBase, parameterized.TestCase):
def _enable_autotune_buffers(self, dataset):
options = dataset_ops.Options()
options.experimental_optimization.autotune_buffers = True
return dataset.with_options(options)
@combinations.generate(test_base.default_test_combinations())
def testParallelMap(self):
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.apply(
testing.assert_next(["ParallelMap", "Prefetch", "FiniteTake"]))
dataset = dataset.map(
lambda x: x + 1, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.take(50)
dataset = self._enable_autotune_buffers(dataset)
self.assertDatasetProduces(dataset, range(1, 51))
@combinations.generate(test_base.default_test_combinations())
def testMapAndBatch(self):
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.apply(
testing.assert_next(["MapAndBatch", "Prefetch", "FiniteTake"]))
dataset = dataset.map(
lambda x: x + 1, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.batch(10)
dataset = dataset.take(5)
dataset = self._enable_autotune_buffers(dataset)
self.assertDatasetProduces(
dataset, [list(range(i + 1, i + 11)) for i in range(0, 50, 10)])
@combinations.generate(test_base.default_test_combinations())
def testParallelInterleave(self):
dataset = dataset_ops.Dataset.range(100)
parallel_interleave = "ParallelInterleaveV2"
if compat.forward_compatible(2020, 2, 20):
parallel_interleave = "ParallelInterleaveV3"
dataset = dataset.apply(
testing.assert_next([parallel_interleave, "Prefetch", "FiniteTake"]))
dataset = dataset.interleave(
lambda x: dataset_ops.Dataset.from_tensors(x + 1),
num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.take(50)
dataset = self._enable_autotune_buffers(dataset)
self.assertDatasetProduces(dataset, range(1, 51))
@combinations.generate(test_base.default_test_combinations())
def testChainedParallelDatasets(self):
dataset = dataset_ops.Dataset.range(100)
parallel_interleave = "ParallelInterleaveV2"
if compat.forward_compatible(2020, 2, 20):
parallel_interleave = "ParallelInterleaveV3"
dataset = dataset.apply(
testing.assert_next([
"ParallelMap", "Prefetch", parallel_interleave, "Prefetch",
"MapAndBatch", "Prefetch", "FiniteTake"
]))
dataset = dataset.map(
lambda x: x + 1, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.interleave(
lambda x: dataset_ops.Dataset.from_tensors(x + 1),
num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.map(
lambda x: x + 1, num_parallel_calls=dataset_ops.AUTOTUNE)
dataset = dataset.batch(1)
dataset = dataset.take(50)
dataset = self._enable_autotune_buffers(dataset)
self.assertDatasetProduces(dataset, [[i] for i in range(3, 53)])
@combinations.generate(test_base.default_test_combinations())
def testNoRegularMap(self):
dataset = dataset_ops.Dataset.range(100)
dataset = dataset.apply(testing.assert_next(["Map", "FiniteTake"]))
dataset = dataset.map(lambda x: x + 1).take(50)
dataset = self._enable_autotune_buffers(dataset)
self.assertDatasetProduces(dataset, range(1, 51))
if __name__ == "__main__":
test.main()
| jhseu/tensorflow | tensorflow/python/data/experimental/kernel_tests/optimization/inject_prefetch_test.py | Python | apache-2.0 | 4,584 |
from __future__ import unicode_literals
import base64
import datetime
import hashlib
import json
import netrc
import os
import re
import socket
import sys
import time
import math
from ..compat import (
compat_cookiejar,
compat_cookies,
compat_etree_fromstring,
compat_getpass,
compat_http_client,
compat_os_name,
compat_str,
compat_urllib_error,
compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
from ..downloader.f4m import remove_encrypted_media
from ..utils import (
NO_DEFAULT,
age_restricted,
bug_reports_message,
clean_html,
compiled_regex_type,
determine_ext,
error_to_compat_str,
ExtractorError,
fix_xml_ampersands,
float_or_none,
int_or_none,
parse_iso8601,
RegexNotFoundError,
sanitize_filename,
sanitized_Request,
unescapeHTML,
unified_strdate,
unified_timestamp,
url_basename,
xpath_element,
xpath_text,
xpath_with_ns,
determine_protocol,
parse_duration,
mimetype2ext,
update_Request,
update_url_query,
parse_m3u8_attributes,
extract_attributes,
parse_codecs,
)
class InfoExtractor(object):
"""Information Extractor class.
Information extractors are the classes that, given a URL, extract
information about the video (or videos) the URL refers to. This
information includes the real video URL, the video title, author and
others. The information is stored in a dictionary which is then
passed to the YoutubeDL. The YoutubeDL processes this
information possibly downloading the video to the file system, among
other possible outcomes.
The type field determines the type of the result.
By far the most common value (and the default if _type is missing) is
"video", which indicates a single video.
For a video, the dictionaries must include the following fields:
id: Video identifier.
title: Video title, unescaped.
Additionally, it must contain either a formats entry or a url one:
formats: A list of dictionaries for each format available, ordered
from worst to best quality.
Potential fields:
* url Mandatory. The URL of the video file
* ext Will be calculated from URL if missing
* format A human-readable description of the format
("mp4 container with h264/opus").
Calculated from the format_id, width, height.
and format_note fields if missing.
* format_id A short description of the format
("mp4_h264_opus" or "19").
Technically optional, but strongly recommended.
* format_note Additional info about the format
("3D" or "DASH video")
* width Width of the video, if known
* height Height of the video, if known
* resolution Textual description of width and height
* tbr Average bitrate of audio and video in KBit/s
* abr Average audio bitrate in KBit/s
* acodec Name of the audio codec in use
* asr Audio sampling rate in Hertz
* vbr Average video bitrate in KBit/s
* fps Frame rate
* vcodec Name of the video codec in use
* container Name of the container format
* filesize The number of bytes, if known in advance
* filesize_approx An estimate for the number of bytes
* player_url SWF Player URL (used for rtmpdump).
* protocol The protocol that will be used for the actual
download, lower-case.
"http", "https", "rtsp", "rtmp", "rtmpe",
"m3u8", "m3u8_native" or "http_dash_segments".
* preference Order number of this format. If this field is
present and not None, the formats get sorted
by this field, regardless of all other values.
-1 for default (order by other properties),
-2 or smaller for less than default.
< -1000 to hide the format (if there is
another one which is strictly better)
* language Language code, e.g. "de" or "en-US".
* language_preference Is this in the language mentioned in
the URL?
10 if it's what the URL is about,
-1 for default (don't know),
-10 otherwise, other values reserved for now.
* quality Order number of the video quality of this
format, irrespective of the file format.
-1 for default (order by other properties),
-2 or smaller for less than default.
* source_preference Order number for this video source
(quality takes higher priority)
-1 for default (order by other properties),
-2 or smaller for less than default.
* http_headers A dictionary of additional HTTP headers
to add to the request.
* stretched_ratio If given and not 1, indicates that the
video's pixels are not square.
width : height ratio as float.
* no_resume The server does not support resuming the
(HTTP or RTMP) download. Boolean.
url: Final video URL.
ext: Video filename extension.
format: The video format, defaults to ext (used for --get-format)
player_url: SWF Player URL (used for rtmpdump).
The following fields are optional:
alt_title: A secondary title of the video.
display_id An alternative identifier for the video, not necessarily
unique, but available before title. Typically, id is
something like "4234987", title "Dancing naked mole rats",
and display_id "dancing-naked-mole-rats"
thumbnails: A list of dictionaries, with the following entries:
* "id" (optional, string) - Thumbnail format ID
* "url"
* "preference" (optional, int) - quality of the image
* "width" (optional, int)
* "height" (optional, int)
* "resolution" (optional, string "{width}x{height"},
deprecated)
* "filesize" (optional, int)
thumbnail: Full URL to a video thumbnail image.
description: Full video description.
uploader: Full name of the video uploader.
license: License name the video is licensed under.
creator: The creator of the video.
release_date: The date (YYYYMMDD) when the video was released.
timestamp: UNIX timestamp of the moment the video became available.
upload_date: Video upload date (YYYYMMDD).
If not explicitly set, calculated from timestamp.
uploader_id: Nickname or id of the video uploader.
uploader_url: Full URL to a personal webpage of the video uploader.
location: Physical location where the video was filmed.
subtitles: The available subtitles as a dictionary in the format
{language: subformats}. "subformats" is a list sorted from
lower to higher preference, each element is a dictionary
with the "ext" entry and one of:
* "data": The subtitles file contents
* "url": A URL pointing to the subtitles file
"ext" will be calculated from URL if missing
automatic_captions: Like 'subtitles', used by the YoutubeIE for
automatically generated captions
duration: Length of the video in seconds, as an integer or float.
view_count: How many users have watched the video on the platform.
like_count: Number of positive ratings of the video
dislike_count: Number of negative ratings of the video
repost_count: Number of reposts of the video
average_rating: Average rating give by users, the scale used depends on the webpage
comment_count: Number of comments on the video
comments: A list of comments, each with one or more of the following
properties (all but one of text or html optional):
* "author" - human-readable name of the comment author
* "author_id" - user ID of the comment author
* "id" - Comment ID
* "html" - Comment as HTML
* "text" - Plain text of the comment
* "timestamp" - UNIX timestamp of comment
* "parent" - ID of the comment this one is replying to.
Set to "root" to indicate that this is a
comment to the original video.
age_limit: Age restriction for the video, as an integer (years)
webpage_url: The URL to the video webpage, if given to youtube-dl it
should allow to get the same result again. (It will be set
by YoutubeDL if it's missing)
categories: A list of categories that the video falls in, for example
["Sports", "Berlin"]
tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"]
is_live: True, False, or None (=unknown). Whether this video is a
live stream that goes on instead of a fixed-length video.
start_time: Time in seconds where the reproduction should start, as
specified in the URL.
end_time: Time in seconds where the reproduction should end, as
specified in the URL.
The following fields should only be used when the video belongs to some logical
chapter or section:
chapter: Name or title of the chapter the video belongs to.
chapter_number: Number of the chapter the video belongs to, as an integer.
chapter_id: Id of the chapter the video belongs to, as a unicode string.
The following fields should only be used when the video is an episode of some
series or programme:
series: Title of the series or programme the video episode belongs to.
season: Title of the season the video episode belongs to.
season_number: Number of the season the video episode belongs to, as an integer.
season_id: Id of the season the video episode belongs to, as a unicode string.
episode: Title of the video episode. Unlike mandatory video title field,
this field should denote the exact title of the video episode
without any kind of decoration.
episode_number: Number of the video episode within a season, as an integer.
episode_id: Id of the video episode, as a unicode string.
The following fields should only be used when the media is a track or a part of
a music album:
track: Title of the track.
track_number: Number of the track within an album or a disc, as an integer.
track_id: Id of the track (useful in case of custom indexing, e.g. 6.iii),
as a unicode string.
artist: Artist(s) of the track.
genre: Genre(s) of the track.
album: Title of the album the track belongs to.
album_type: Type of the album (e.g. "Demo", "Full-length", "Split", "Compilation", etc).
album_artist: List of all artists appeared on the album (e.g.
"Ash Borer / Fell Voices" or "Various Artists", useful for splits
and compilations).
disc_number: Number of the disc or other physical medium the track belongs to,
as an integer.
release_year: Year (YYYY) when the album was released.
Unless mentioned otherwise, the fields should be Unicode strings.
Unless mentioned otherwise, None is equivalent to absence of information.
_type "playlist" indicates multiple videos.
There must be a key "entries", which is a list, an iterable, or a PagedList
object, each element of which is a valid dictionary by this specification.
Additionally, playlists can have "title", "description" and "id" attributes
with the same semantics as videos (see above).
_type "multi_video" indicates that there are multiple videos that
form a single show, for examples multiple acts of an opera or TV episode.
It must have an entries key like a playlist and contain all the keys
required for a video at the same time.
_type "url" indicates that the video must be extracted from another
location, possibly by a different extractor. Its only required key is:
"url" - the next URL to extract.
The key "ie_key" can be set to the class name (minus the trailing "IE",
e.g. "Youtube") if the extractor class is known in advance.
Additionally, the dictionary may have any properties of the resolved entity
known in advance, for example "title" if the title of the referred video is
known ahead of time.
_type "url_transparent" entities have the same specification as "url", but
indicate that the given additional information is more precise than the one
associated with the resolved URL.
This is useful when a site employs a video service that hosts the video and
its technical metadata, but that video service does not embed a useful
title, description etc.
Subclasses of this one should re-define the _real_initialize() and
_real_extract() methods and define a _VALID_URL regexp.
Probably, they should also be added to the list of extractors.
Finally, the _WORKING attribute should be set to False for broken IEs
in order to warn the users and skip the tests.
"""
_ready = False
_downloader = None
_WORKING = True
def __init__(self, downloader=None):
"""Constructor. Receives an optional downloader."""
self._ready = False
self.set_downloader(downloader)
@classmethod
def suitable(cls, url):
"""Receives a URL and returns True if suitable for this IE."""
# This does not use has/getattr intentionally - we want to know whether
# we have cached the regexp for *this* class, whereas getattr would also
# match the superclass
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
return cls._VALID_URL_RE.match(url) is not None
@classmethod
def _match_id(cls, url):
if '_VALID_URL_RE' not in cls.__dict__:
cls._VALID_URL_RE = re.compile(cls._VALID_URL)
m = cls._VALID_URL_RE.match(url)
assert m
return m.group('id')
@classmethod
def working(cls):
"""Getter method for _WORKING."""
return cls._WORKING
def initialize(self):
"""Initializes an instance (authentication, etc)."""
if not self._ready:
self._real_initialize()
self._ready = True
def extract(self, url):
"""Extracts URL information and returns it in list of dicts."""
try:
self.initialize()
return self._real_extract(url)
except ExtractorError:
raise
except compat_http_client.IncompleteRead as e:
raise ExtractorError('A network error has occurred.', cause=e, expected=True)
except (KeyError, StopIteration) as e:
raise ExtractorError('An extractor error has occurred.', cause=e)
def set_downloader(self, downloader):
"""Sets the downloader for this IE."""
self._downloader = downloader
def _real_initialize(self):
"""Real initialization process. Redefine in subclasses."""
pass
def _real_extract(self, url):
"""Real extraction process. Redefine in subclasses."""
pass
@classmethod
def ie_key(cls):
"""A string for getting the InfoExtractor with get_info_extractor"""
return compat_str(cls.__name__[:-2])
@property
def IE_NAME(self):
return compat_str(type(self).__name__[:-2])
def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
""" Returns the response handle """
if note is None:
self.report_download_webpage(video_id)
elif note is not False:
if video_id is None:
self.to_screen('%s' % (note,))
else:
self.to_screen('%s: %s' % (video_id, note))
if isinstance(url_or_request, compat_urllib_request.Request):
url_or_request = update_Request(
url_or_request, data=data, headers=headers, query=query)
else:
if query:
url_or_request = update_url_query(url_or_request, query)
if data is not None or headers:
url_or_request = sanitized_Request(url_or_request, data, headers)
try:
return self._downloader.urlopen(url_or_request)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
if errnote is False:
return False
if errnote is None:
errnote = 'Unable to download webpage'
errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
if fatal:
raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
else:
self._downloader.report_warning(errmsg)
return False
def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}):
""" Returns a tuple (page content as string, URL handle) """
# Strip hashes from the URL (#1038)
if isinstance(url_or_request, (compat_str, str)):
url_or_request = url_or_request.partition('#')[0]
urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query)
if urlh is False:
assert not fatal
return False
content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
return (content, urlh)
@staticmethod
def _guess_encoding_from_content(content_type, webpage_bytes):
m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
if m:
encoding = m.group(1)
else:
m = re.search(br'<meta[^>]+charset=[\'"]?([^\'")]+)[ /\'">]',
webpage_bytes[:1024])
if m:
encoding = m.group(1).decode('ascii')
elif webpage_bytes.startswith(b'\xff\xfe'):
encoding = 'utf-16'
else:
encoding = 'utf-8'
return encoding
def _webpage_read_content(self, urlh, url_or_request, video_id, note=None, errnote=None, fatal=True, prefix=None, encoding=None):
content_type = urlh.headers.get('Content-Type', '')
webpage_bytes = urlh.read()
if prefix is not None:
webpage_bytes = prefix + webpage_bytes
if not encoding:
encoding = self._guess_encoding_from_content(content_type, webpage_bytes)
if self._downloader.params.get('dump_intermediate_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
self.to_screen('Dumping request to ' + url)
dump = base64.b64encode(webpage_bytes).decode('ascii')
self._downloader.to_screen(dump)
if self._downloader.params.get('write_pages', False):
try:
url = url_or_request.get_full_url()
except AttributeError:
url = url_or_request
basen = '%s_%s' % (video_id, url)
if len(basen) > 240:
h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest()
basen = basen[:240 - len(h)] + h
raw_filename = basen + '.dump'
filename = sanitize_filename(raw_filename, restricted=True)
self.to_screen('Saving request to ' + filename)
# Working around MAX_PATH limitation on Windows (see
# http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
if compat_os_name == 'nt':
absfilepath = os.path.abspath(filename)
if len(absfilepath) > 259:
filename = '\\\\?\\' + absfilepath
with open(filename, 'wb') as outf:
outf.write(webpage_bytes)
try:
content = webpage_bytes.decode(encoding, 'replace')
except LookupError:
content = webpage_bytes.decode('utf-8', 'replace')
if ('<title>Access to this site is blocked</title>' in content and
'Websense' in content[:512]):
msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
blocked_iframe = self._html_search_regex(
r'<iframe src="([^"]+)"', content,
'Websense information URL', default=None)
if blocked_iframe:
msg += ' Visit %s for more details' % blocked_iframe
raise ExtractorError(msg, expected=True)
if '<title>The URL you requested has been blocked</title>' in content[:512]:
msg = (
'Access to this webpage has been blocked by Indian censorship. '
'Use a VPN or proxy server (with --proxy) to route around it.')
block_msg = self._html_search_regex(
r'</h1><p>(.*?)</p>',
content, 'block message', default=None)
if block_msg:
msg += ' (Message: "%s")' % block_msg.replace('\n', ' ')
raise ExtractorError(msg, expected=True)
return content
def _download_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, tries=1, timeout=5, encoding=None, data=None, headers={}, query={}):
""" Returns the data of the page as a string """
success = False
try_count = 0
while success is False:
try:
res = self._download_webpage_handle(url_or_request, video_id, note, errnote, fatal, encoding=encoding, data=data, headers=headers, query=query)
success = True
except compat_http_client.IncompleteRead as e:
try_count += 1
if try_count >= tries:
raise e
self._sleep(timeout, video_id)
if res is False:
return res
else:
content, _ = res
return content
def _download_xml(self, url_or_request, video_id,
note='Downloading XML', errnote='Unable to download XML',
transform_source=None, fatal=True, encoding=None, data=None, headers={}, query={}):
"""Return the xml as an xml.etree.ElementTree.Element"""
xml_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal, encoding=encoding, data=data, headers=headers, query=query)
if xml_string is False:
return xml_string
if transform_source:
xml_string = transform_source(xml_string)
return compat_etree_fromstring(xml_string.encode('utf-8'))
def _download_json(self, url_or_request, video_id,
note='Downloading JSON metadata',
errnote='Unable to download JSON metadata',
transform_source=None,
fatal=True, encoding=None, data=None, headers={}, query={}):
json_string = self._download_webpage(
url_or_request, video_id, note, errnote, fatal=fatal,
encoding=encoding, data=data, headers=headers, query=query)
if (not fatal) and json_string is False:
return None
return self._parse_json(
json_string, video_id, transform_source=transform_source, fatal=fatal)
def _parse_json(self, json_string, video_id, transform_source=None, fatal=True):
if transform_source:
json_string = transform_source(json_string)
try:
return json.loads(json_string)
except ValueError as ve:
errmsg = '%s: Failed to parse JSON ' % video_id
if fatal:
raise ExtractorError(errmsg, cause=ve)
else:
self.report_warning(errmsg + str(ve))
def report_warning(self, msg, video_id=None):
idstr = '' if video_id is None else '%s: ' % video_id
self._downloader.report_warning(
'[%s] %s%s' % (self.IE_NAME, idstr, msg))
def to_screen(self, msg):
"""Print msg to screen, prefixing it with '[ie_name]'"""
self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg))
def report_extraction(self, id_or_name):
"""Report information extraction."""
self.to_screen('%s: Extracting information' % id_or_name)
def report_download_webpage(self, video_id):
"""Report webpage download."""
self.to_screen('%s: Downloading webpage' % video_id)
def report_age_confirmation(self):
"""Report attempt to confirm age."""
self.to_screen('Confirming age')
def report_login(self):
"""Report attempt to log in."""
self.to_screen('Logging in')
@staticmethod
def raise_login_required(msg='This video is only available for registered users'):
raise ExtractorError(
'%s. Use --username and --password or --netrc to provide account credentials.' % msg,
expected=True)
@staticmethod
def raise_geo_restricted(msg='This video is not available from your location due to geo restriction'):
raise ExtractorError(
'%s. You might want to use --proxy to workaround.' % msg,
expected=True)
# Methods for following #608
@staticmethod
def url_result(url, ie=None, video_id=None, video_title=None):
"""Returns a URL that points to a page that should be processed"""
# TODO: ie should be the class used for getting the info
video_info = {'_type': 'url',
'url': url,
'ie_key': ie}
if video_id is not None:
video_info['id'] = video_id
if video_title is not None:
video_info['title'] = video_title
return video_info
@staticmethod
def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None):
"""Returns a playlist"""
video_info = {'_type': 'playlist',
'entries': entries}
if playlist_id:
video_info['id'] = playlist_id
if playlist_title:
video_info['title'] = playlist_title
if playlist_description:
video_info['description'] = playlist_description
return video_info
def _search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Perform a regex search on the given string, using a single or a list of
patterns returning the first matching group.
In case of failure return a default value or raise a WARNING or a
RegexNotFoundError, depending on fatal, specifying the field name.
"""
if isinstance(pattern, (str, compat_str, compiled_regex_type)):
mobj = re.search(pattern, string, flags)
else:
for p in pattern:
mobj = re.search(p, string, flags)
if mobj:
break
if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty():
_name = '\033[0;34m%s\033[0m' % name
else:
_name = name
if mobj:
if group is None:
# return the first matching group
return next(g for g in mobj.groups() if g is not None)
else:
return mobj.group(group)
elif default is not NO_DEFAULT:
return default
elif fatal:
raise RegexNotFoundError('Unable to extract %s' % _name)
else:
self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message())
return None
def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None):
"""
Like _search_regex, but strips HTML tags and unescapes entities.
"""
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
if res:
return clean_html(res).strip()
else:
return res
def _get_netrc_login_info(self, netrc_machine=None):
username = None
password = None
netrc_machine = netrc_machine or self._NETRC_MACHINE
if self._downloader.params.get('usenetrc', False):
try:
info = netrc.netrc().authenticators(netrc_machine)
if info is not None:
username = info[0]
password = info[2]
else:
raise netrc.NetrcParseError('No authenticators for %s' % netrc_machine)
except (IOError, netrc.NetrcParseError) as err:
self._downloader.report_warning('parsing .netrc: %s' % error_to_compat_str(err))
return (username, password)
def _get_login_info(self):
"""
Get the login info as (username, password)
It will look in the netrc file using the _NETRC_MACHINE value
If there's no info available, return (None, None)
"""
if self._downloader is None:
return (None, None)
username = None
password = None
downloader_params = self._downloader.params
# Attempt to use provided username and password or .netrc data
if downloader_params.get('username') is not None:
username = downloader_params['username']
password = downloader_params['password']
else:
username, password = self._get_netrc_login_info()
return (username, password)
def _get_tfa_info(self, note='two-factor verification code'):
"""
Get the two-factor authentication info
TODO - asking the user will be required for sms/phone verify
currently just uses the command line option
If there's no info available, return None
"""
if self._downloader is None:
return None
downloader_params = self._downloader.params
if downloader_params.get('twofactor') is not None:
return downloader_params['twofactor']
return compat_getpass('Type %s and press [Return]: ' % note)
# Helper functions for extracting OpenGraph info
@staticmethod
def _og_regexes(prop):
content_re = r'content=(?:"([^"]+?)"|\'([^\']+?)\'|\s*([^\s"\'=<>`]+?))'
property_re = (r'(?:name|property)=(?:\'og:%(prop)s\'|"og:%(prop)s"|\s*og:%(prop)s\b)'
% {'prop': re.escape(prop)})
template = r'<meta[^>]+?%s[^>]+?%s'
return [
template % (property_re, content_re),
template % (content_re, property_re),
]
@staticmethod
def _meta_regex(prop):
return r'''(?isx)<meta
(?=[^>]+(?:itemprop|name|property|id|http-equiv)=(["\']?)%s\1)
[^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop)
def _og_search_property(self, prop, html, name=None, **kargs):
if not isinstance(prop, (list, tuple)):
prop = [prop]
if name is None:
name = 'OpenGraph %s' % prop[0]
og_regexes = []
for p in prop:
og_regexes.extend(self._og_regexes(p))
escaped = self._search_regex(og_regexes, html, name, flags=re.DOTALL, **kargs)
if escaped is None:
return None
return unescapeHTML(escaped)
def _og_search_thumbnail(self, html, **kargs):
return self._og_search_property('image', html, 'thumbnail URL', fatal=False, **kargs)
def _og_search_description(self, html, **kargs):
return self._og_search_property('description', html, fatal=False, **kargs)
def _og_search_title(self, html, **kargs):
return self._og_search_property('title', html, **kargs)
def _og_search_video_url(self, html, name='video url', secure=True, **kargs):
regexes = self._og_regexes('video') + self._og_regexes('video:url')
if secure:
regexes = self._og_regexes('video:secure_url') + regexes
return self._html_search_regex(regexes, html, name, **kargs)
def _og_search_url(self, html, **kargs):
return self._og_search_property('url', html, **kargs)
def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs):
if not isinstance(name, (list, tuple)):
name = [name]
if display_name is None:
display_name = name[0]
return self._html_search_regex(
[self._meta_regex(n) for n in name],
html, display_name, fatal=fatal, group='content', **kwargs)
def _dc_search_uploader(self, html):
return self._html_search_meta('dc.creator', html, 'uploader')
def _rta_search(self, html):
# See http://www.rtalabel.org/index.php?content=howtofaq#single
if re.search(r'(?ix)<meta\s+name="rating"\s+'
r' content="RTA-5042-1996-1400-1577-RTA"',
html):
return 18
return 0
def _media_rating_search(self, html):
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
rating = self._html_search_meta('rating', html)
if not rating:
return None
RATING_TABLE = {
'safe for kids': 0,
'general': 8,
'14 years': 14,
'mature': 17,
'restricted': 19,
}
return RATING_TABLE.get(rating.lower())
def _family_friendly_search(self, html):
# See http://schema.org/VideoObject
family_friendly = self._html_search_meta('isFamilyFriendly', html)
if not family_friendly:
return None
RATING_TABLE = {
'1': 0,
'true': 0,
'0': 18,
'false': 18,
}
return RATING_TABLE.get(family_friendly.lower())
def _twitter_search_player(self, html):
return self._html_search_meta('twitter:player', html,
'twitter card player')
def _search_json_ld(self, html, video_id, expected_type=None, **kwargs):
json_ld = self._search_regex(
r'(?s)<script[^>]+type=(["\'])application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>',
html, 'JSON-LD', group='json_ld', **kwargs)
default = kwargs.get('default', NO_DEFAULT)
if not json_ld:
return default if default is not NO_DEFAULT else {}
# JSON-LD may be malformed and thus `fatal` should be respected.
# At the same time `default` may be passed that assumes `fatal=False`
# for _search_regex. Let's simulate the same behavior here as well.
fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False
return self._json_ld(json_ld, video_id, fatal=fatal, expected_type=expected_type)
def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None):
if isinstance(json_ld, compat_str):
json_ld = self._parse_json(json_ld, video_id, fatal=fatal)
if not json_ld:
return {}
info = {}
if not isinstance(json_ld, (list, tuple, dict)):
return info
if isinstance(json_ld, dict):
json_ld = [json_ld]
for e in json_ld:
if e.get('@context') == 'http://schema.org':
item_type = e.get('@type')
if expected_type is not None and expected_type != item_type:
return info
if item_type == 'TVEpisode':
info.update({
'episode': unescapeHTML(e.get('name')),
'episode_number': int_or_none(e.get('episodeNumber')),
'description': unescapeHTML(e.get('description')),
})
part_of_season = e.get('partOfSeason')
if isinstance(part_of_season, dict) and part_of_season.get('@type') == 'TVSeason':
info['season_number'] = int_or_none(part_of_season.get('seasonNumber'))
part_of_series = e.get('partOfSeries') or e.get('partOfTVSeries')
if isinstance(part_of_series, dict) and part_of_series.get('@type') == 'TVSeries':
info['series'] = unescapeHTML(part_of_series.get('name'))
elif item_type == 'Article':
info.update({
'timestamp': parse_iso8601(e.get('datePublished')),
'title': unescapeHTML(e.get('headline')),
'description': unescapeHTML(e.get('articleBody')),
})
elif item_type == 'VideoObject':
info.update({
'url': e.get('contentUrl'),
'title': unescapeHTML(e.get('name')),
'description': unescapeHTML(e.get('description')),
'thumbnail': e.get('thumbnailUrl'),
'duration': parse_duration(e.get('duration')),
'timestamp': unified_timestamp(e.get('uploadDate')),
'filesize': float_or_none(e.get('contentSize')),
'tbr': int_or_none(e.get('bitrate')),
'width': int_or_none(e.get('width')),
'height': int_or_none(e.get('height')),
})
break
return dict((k, v) for k, v in info.items() if v is not None)
@staticmethod
def _hidden_inputs(html):
html = re.sub(r'<!--(?:(?!<!--).)*-->', '', html)
hidden_inputs = {}
for input in re.findall(r'(?i)<input([^>]+)>', html):
if not re.search(r'type=(["\'])(?:hidden|submit)\1', input):
continue
name = re.search(r'(?:name|id)=(["\'])(?P<value>.+?)\1', input)
if not name:
continue
value = re.search(r'value=(["\'])(?P<value>.*?)\1', input)
if not value:
continue
hidden_inputs[name.group('value')] = value.group('value')
return hidden_inputs
def _form_hidden_inputs(self, form_id, html):
form = self._search_regex(
r'(?is)<form[^>]+?id=(["\'])%s\1[^>]*>(?P<form>.+?)</form>' % form_id,
html, '%s form' % form_id, group='form')
return self._hidden_inputs(form)
def _sort_formats(self, formats, field_preference=None):
if not formats:
raise ExtractorError('No video formats found')
for f in formats:
# Automatically determine tbr when missing based on abr and vbr (improves
# formats sorting in some cases)
if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None:
f['tbr'] = f['abr'] + f['vbr']
def _formats_key(f):
# TODO remove the following workaround
from ..utils import determine_ext
if not f.get('ext') and 'url' in f:
f['ext'] = determine_ext(f['url'])
if isinstance(field_preference, (list, tuple)):
return tuple(
f.get(field)
if f.get(field) is not None
else ('' if field == 'format_id' else -1)
for field in field_preference)
preference = f.get('preference')
if preference is None:
preference = 0
if f.get('ext') in ['f4f', 'f4m']: # Not yet supported
preference -= 0.5
protocol = f.get('protocol') or determine_protocol(f)
proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1)
if f.get('vcodec') == 'none': # audio only
preference -= 50
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus']
else:
ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a']
ext_preference = 0
try:
audio_ext_preference = ORDER.index(f['ext'])
except ValueError:
audio_ext_preference = -1
else:
if f.get('acodec') == 'none': # video only
preference -= 40
if self._downloader.params.get('prefer_free_formats'):
ORDER = ['flv', 'mp4', 'webm']
else:
ORDER = ['webm', 'flv', 'mp4']
try:
ext_preference = ORDER.index(f['ext'])
except ValueError:
ext_preference = -1
audio_ext_preference = 0
return (
preference,
f.get('language_preference') if f.get('language_preference') is not None else -1,
f.get('quality') if f.get('quality') is not None else -1,
f.get('tbr') if f.get('tbr') is not None else -1,
f.get('filesize') if f.get('filesize') is not None else -1,
f.get('vbr') if f.get('vbr') is not None else -1,
f.get('height') if f.get('height') is not None else -1,
f.get('width') if f.get('width') is not None else -1,
proto_preference,
ext_preference,
f.get('abr') if f.get('abr') is not None else -1,
audio_ext_preference,
f.get('fps') if f.get('fps') is not None else -1,
f.get('filesize_approx') if f.get('filesize_approx') is not None else -1,
f.get('source_preference') if f.get('source_preference') is not None else -1,
f.get('format_id') if f.get('format_id') is not None else '',
)
formats.sort(key=_formats_key)
def _check_formats(self, formats, video_id):
if formats:
formats[:] = filter(
lambda f: self._is_valid_url(
f['url'], video_id,
item='%s video format' % f.get('format_id') if f.get('format_id') else 'video'),
formats)
@staticmethod
def _remove_duplicate_formats(formats):
format_urls = set()
unique_formats = []
for f in formats:
if f['url'] not in format_urls:
format_urls.add(f['url'])
unique_formats.append(f)
formats[:] = unique_formats
def _is_valid_url(self, url, video_id, item='video'):
url = self._proto_relative_url(url, scheme='http:')
# For now assume non HTTP(S) URLs always valid
if not (url.startswith('http://') or url.startswith('https://')):
return True
try:
self._request_webpage(url, video_id, 'Checking %s URL' % item)
return True
except ExtractorError as e:
if isinstance(e.cause, compat_urllib_error.URLError):
self.to_screen(
'%s: %s URL is invalid, skipping' % (video_id, item))
return False
raise
def http_scheme(self):
""" Either "http:" or "https:", depending on the user's preferences """
return (
'http:'
if self._downloader.params.get('prefer_insecure', False)
else 'https:')
def _proto_relative_url(self, url, scheme=None):
if url is None:
return url
if url.startswith('//'):
if scheme is None:
scheme = self.http_scheme()
return scheme + url
else:
return url
def _sleep(self, timeout, video_id, msg_template=None):
if msg_template is None:
msg_template = '%(video_id)s: Waiting for %(timeout)s seconds'
msg = msg_template % {'video_id': video_id, 'timeout': timeout}
self.to_screen(msg)
time.sleep(timeout)
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
manifest = self._download_xml(
manifest_url, video_id, 'Downloading f4m manifest',
'Unable to download f4m manifest',
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
# (see https://github.com/rg3/youtube-dl/issues/6215#issuecomment-121704244)
transform_source=transform_source,
fatal=fatal)
if manifest is False:
return []
return self._parse_f4m_formats(
manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id)
def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None,
transform_source=lambda s: fix_xml_ampersands(s).strip(),
fatal=True, m3u8_id=None):
# currently youtube-dl cannot decode the playerVerificationChallenge as Akamai uses Adobe Alchemy
akamai_pv = manifest.find('{http://ns.adobe.com/f4m/1.0}pv-2.0')
if akamai_pv is not None and ';' in akamai_pv.text:
playerVerificationChallenge = akamai_pv.text.split(';')[0]
if playerVerificationChallenge.strip() != '':
return []
formats = []
manifest_version = '1.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/1.0}media')
if not media_nodes:
manifest_version = '2.0'
media_nodes = manifest.findall('{http://ns.adobe.com/f4m/2.0}media')
# Remove unsupported DRM protected media from final formats
# rendition (see https://github.com/rg3/youtube-dl/issues/8573).
media_nodes = remove_encrypted_media(media_nodes)
if not media_nodes:
return formats
base_url = xpath_text(
manifest, ['{http://ns.adobe.com/f4m/1.0}baseURL', '{http://ns.adobe.com/f4m/2.0}baseURL'],
'base URL', default=None)
if base_url:
base_url = base_url.strip()
bootstrap_info = xpath_element(
manifest, ['{http://ns.adobe.com/f4m/1.0}bootstrapInfo', '{http://ns.adobe.com/f4m/2.0}bootstrapInfo'],
'bootstrap info', default=None)
for i, media_el in enumerate(media_nodes):
tbr = int_or_none(media_el.attrib.get('bitrate'))
width = int_or_none(media_el.attrib.get('width'))
height = int_or_none(media_el.attrib.get('height'))
format_id = '-'.join(filter(None, [f4m_id, compat_str(i if tbr is None else tbr)]))
# If <bootstrapInfo> is present, the specified f4m is a
# stream-level manifest, and only set-level manifests may refer to
# external resources. See section 11.4 and section 4 of F4M spec
if bootstrap_info is None:
media_url = None
# @href is introduced in 2.0, see section 11.6 of F4M spec
if manifest_version == '2.0':
media_url = media_el.attrib.get('href')
if media_url is None:
media_url = media_el.attrib.get('url')
if not media_url:
continue
manifest_url = (
media_url if media_url.startswith('http://') or media_url.startswith('https://')
else ((base_url or '/'.join(manifest_url.split('/')[:-1])) + '/' + media_url))
# If media_url is itself a f4m manifest do the recursive extraction
# since bitrates in parent manifest (this one) and media_url manifest
# may differ leading to inability to resolve the format by requested
# bitrate in f4m downloader
ext = determine_ext(manifest_url)
if ext == 'f4m':
f4m_formats = self._extract_f4m_formats(
manifest_url, video_id, preference=preference, f4m_id=f4m_id,
transform_source=transform_source, fatal=fatal)
# Sometimes stream-level manifest contains single media entry that
# does not contain any quality metadata (e.g. http://matchtv.ru/#live-player).
# At the same time parent's media entry in set-level manifest may
# contain it. We will copy it from parent in such cases.
if len(f4m_formats) == 1:
f = f4m_formats[0]
f.update({
'tbr': f.get('tbr') or tbr,
'width': f.get('width') or width,
'height': f.get('height') or height,
'format_id': f.get('format_id') if not tbr else format_id,
})
formats.extend(f4m_formats)
continue
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
manifest_url, video_id, 'mp4', preference=preference,
m3u8_id=m3u8_id, fatal=fatal))
continue
formats.append({
'format_id': format_id,
'url': manifest_url,
'ext': 'flv' if bootstrap_info is not None else None,
'tbr': tbr,
'width': width,
'height': height,
'preference': preference,
})
return formats
def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None):
return {
'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])),
'url': m3u8_url,
'ext': ext,
'protocol': 'm3u8',
'preference': preference - 100 if preference else -100,
'resolution': 'multiple',
'format_note': 'Quality selection URL',
}
def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None,
entry_protocol='m3u8', preference=None,
m3u8_id=None, note=None, errnote=None,
fatal=True, live=False):
formats = [self._m3u8_meta_format(m3u8_url, ext, preference, m3u8_id)]
format_url = lambda u: (
u
if re.match(r'^https?://', u)
else compat_urlparse.urljoin(m3u8_url, u))
res = self._download_webpage_handle(
m3u8_url, video_id,
note=note or 'Downloading m3u8 information',
errnote=errnote or 'Failed to download m3u8 information',
fatal=fatal)
if res is False:
return []
m3u8_doc, urlh = res
m3u8_url = urlh.geturl()
# We should try extracting formats only from master playlists [1], i.e.
# playlists that describe available qualities. On the other hand media
# playlists [2] should be returned as is since they contain just the media
# without qualities renditions.
# Fortunately, master playlist can be easily distinguished from media
# playlist based on particular tags availability. As of [1, 2] master
# playlist tags MUST NOT appear in a media playist and vice versa.
# As of [3] #EXT-X-TARGETDURATION tag is REQUIRED for every media playlist
# and MUST NOT appear in master playlist thus we can clearly detect media
# playlist with this criterion.
# 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.4
# 2. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3
# 3. https://tools.ietf.org/html/draft-pantos-http-live-streaming-17#section-4.3.3.1
if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is
return [{
'url': m3u8_url,
'format_id': m3u8_id,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
}]
last_info = None
for line in m3u8_doc.splitlines():
if line.startswith('#EXT-X-STREAM-INF:'):
last_info = parse_m3u8_attributes(line)
elif line.startswith('#EXT-X-MEDIA:'):
media = parse_m3u8_attributes(line)
media_type = media.get('TYPE')
if media_type in ('VIDEO', 'AUDIO'):
media_url = media.get('URI')
if media_url:
format_id = []
for v in (media.get('GROUP-ID'), media.get('NAME')):
if v:
format_id.append(v)
formats.append({
'format_id': '-'.join(format_id),
'url': format_url(media_url),
'language': media.get('LANGUAGE'),
'vcodec': 'none' if media_type == 'AUDIO' else None,
'ext': ext,
'protocol': entry_protocol,
'preference': preference,
})
elif line.startswith('#') or not line.strip():
continue
else:
if last_info is None:
formats.append({'url': format_url(line)})
continue
tbr = int_or_none(last_info.get('AVERAGE-BANDWIDTH') or last_info.get('BANDWIDTH'), scale=1000)
format_id = []
if m3u8_id:
format_id.append(m3u8_id)
# Bandwidth of live streams may differ over time thus making
# format_id unpredictable. So it's better to keep provided
# format_id intact.
if not live:
# Despite specification does not mention NAME attribute for
# EXT-X-STREAM-INF it still sometimes may be present
stream_name = last_info.get('NAME')
format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats)))
f = {
'format_id': '-'.join(format_id),
'url': format_url(line.strip()),
'tbr': tbr,
'ext': ext,
'fps': float_or_none(last_info.get('FRAME-RATE')),
'protocol': entry_protocol,
'preference': preference,
}
resolution = last_info.get('RESOLUTION')
if resolution:
width_str, height_str = resolution.split('x')
f['width'] = int(width_str)
f['height'] = int(height_str)
# Unified Streaming Platform
mobj = re.search(
r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url'])
if mobj:
abr, vbr = mobj.groups()
abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000)
f.update({
'vbr': vbr,
'abr': abr,
})
f.update(parse_codecs(last_info.get('CODECS')))
formats.append(f)
last_info = {}
return formats
@staticmethod
def _xpath_ns(path, namespace=None):
if not namespace:
return path
out = []
for c in path.split('/'):
if not c or c == '.':
out.append(c)
else:
out.append('{%s}%s' % (namespace, c))
return '/'.join(out)
def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
if smil is False:
assert not fatal
return []
namespace = self._parse_smil_namespace(smil)
return self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None):
smil = self._download_smil(smil_url, video_id, fatal=fatal)
if smil is False:
return {}
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
def _download_smil(self, smil_url, video_id, fatal=True, transform_source=None):
return self._download_xml(
smil_url, video_id, 'Downloading SMIL file',
'Unable to download SMIL file', fatal=fatal, transform_source=transform_source)
def _parse_smil(self, smil, smil_url, video_id, f4m_params=None):
namespace = self._parse_smil_namespace(smil)
formats = self._parse_smil_formats(
smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params)
subtitles = self._parse_smil_subtitles(smil, namespace=namespace)
video_id = os.path.splitext(url_basename(smil_url))[0]
title = None
description = None
upload_date = None
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
name = meta.attrib.get('name')
content = meta.attrib.get('content')
if not name or not content:
continue
if not title and name == 'title':
title = content
elif not description and name in ('description', 'abstract'):
description = content
elif not upload_date and name == 'date':
upload_date = unified_strdate(content)
thumbnails = [{
'id': image.get('type'),
'url': image.get('src'),
'width': int_or_none(image.get('width')),
'height': int_or_none(image.get('height')),
} for image in smil.findall(self._xpath_ns('.//image', namespace)) if image.get('src')]
return {
'id': video_id,
'title': title or video_id,
'description': description,
'upload_date': upload_date,
'thumbnails': thumbnails,
'formats': formats,
'subtitles': subtitles,
}
def _parse_smil_namespace(self, smil):
return self._search_regex(
r'(?i)^{([^}]+)?}smil$', smil.tag, 'namespace', default=None)
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base = smil_url
for meta in smil.findall(self._xpath_ns('./head/meta', namespace)):
b = meta.get('base') or meta.get('httpBase')
if b:
base = b
break
formats = []
rtmp_count = 0
http_count = 0
m3u8_count = 0
srcs = []
media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace))
for medium in media:
src = medium.get('src')
if not src or src in srcs:
continue
srcs.append(src)
bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000)
filesize = int_or_none(medium.get('size') or medium.get('fileSize'))
width = int_or_none(medium.get('width'))
height = int_or_none(medium.get('height'))
proto = medium.get('proto')
ext = medium.get('ext')
src_ext = determine_ext(src)
streamer = medium.get('streamer') or base
if proto == 'rtmp' or streamer.startswith('rtmp'):
rtmp_count += 1
formats.append({
'url': streamer,
'play_path': src,
'ext': 'flv',
'format_id': 'rtmp-%d' % (rtmp_count if bitrate is None else bitrate),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
if transform_rtmp_url:
streamer, src = transform_rtmp_url(streamer, src)
formats[-1].update({
'url': streamer,
'play_path': src,
})
continue
src_url = src if src.startswith('http') else compat_urlparse.urljoin(base, src)
src_url = src_url.strip()
if proto == 'm3u8' or src_ext == 'm3u8':
m3u8_formats = self._extract_m3u8_formats(
src_url, video_id, ext or 'mp4', m3u8_id='hls', fatal=False)
if len(m3u8_formats) == 1:
m3u8_count += 1
m3u8_formats[0].update({
'format_id': 'hls-%d' % (m3u8_count if bitrate is None else bitrate),
'tbr': bitrate,
'width': width,
'height': height,
})
formats.extend(m3u8_formats)
continue
if src_ext == 'f4m':
f4m_url = src_url
if not f4m_params:
f4m_params = {
'hdcore': '3.2.0',
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
continue
if src_url.startswith('http') and self._is_valid_url(src, video_id):
http_count += 1
formats.append({
'url': src_url,
'ext': ext or src_ext or 'flv',
'format_id': 'http-%d' % (bitrate or http_count),
'tbr': bitrate,
'filesize': filesize,
'width': width,
'height': height,
})
continue
return formats
def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'):
urls = []
subtitles = {}
for num, textstream in enumerate(smil.findall(self._xpath_ns('.//textstream', namespace))):
src = textstream.get('src')
if not src or src in urls:
continue
urls.append(src)
ext = textstream.get('ext') or mimetype2ext(textstream.get('type')) or determine_ext(src)
lang = textstream.get('systemLanguage') or textstream.get('systemLanguageName') or textstream.get('lang') or subtitles_lang
subtitles.setdefault(lang, []).append({
'url': src,
'ext': ext,
})
return subtitles
def _extract_xspf_playlist(self, playlist_url, playlist_id, fatal=True):
xspf = self._download_xml(
playlist_url, playlist_id, 'Downloading xpsf playlist',
'Unable to download xspf manifest', fatal=fatal)
if xspf is False:
return []
return self._parse_xspf(xspf, playlist_id)
def _parse_xspf(self, playlist, playlist_id):
NS_MAP = {
'xspf': 'http://xspf.org/ns/0/',
's1': 'http://static.streamone.nl/player/ns/0',
}
entries = []
for track in playlist.findall(xpath_with_ns('./xspf:trackList/xspf:track', NS_MAP)):
title = xpath_text(
track, xpath_with_ns('./xspf:title', NS_MAP), 'title', default=playlist_id)
description = xpath_text(
track, xpath_with_ns('./xspf:annotation', NS_MAP), 'description')
thumbnail = xpath_text(
track, xpath_with_ns('./xspf:image', NS_MAP), 'thumbnail')
duration = float_or_none(
xpath_text(track, xpath_with_ns('./xspf:duration', NS_MAP), 'duration'), 1000)
formats = [{
'url': location.text,
'format_id': location.get(xpath_with_ns('s1:label', NS_MAP)),
'width': int_or_none(location.get(xpath_with_ns('s1:width', NS_MAP))),
'height': int_or_none(location.get(xpath_with_ns('s1:height', NS_MAP))),
} for location in track.findall(xpath_with_ns('./xspf:location', NS_MAP))]
self._sort_formats(formats)
entries.append({
'id': playlist_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
})
return entries
def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, formats_dict={}):
res = self._download_webpage_handle(
mpd_url, video_id,
note=note or 'Downloading MPD manifest',
errnote=errnote or 'Failed to download MPD manifest',
fatal=fatal)
if res is False:
return []
mpd, urlh = res
mpd_base_url = re.match(r'https?://.+/', urlh.geturl()).group()
return self._parse_mpd_formats(
compat_etree_fromstring(mpd.encode('utf-8')), mpd_id, mpd_base_url, formats_dict=formats_dict)
def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', formats_dict={}):
"""
Parse formats from MPD manifest.
References:
1. MPEG-DASH Standard, ISO/IEC 23009-1:2014(E),
http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP
"""
if mpd_doc.get('type') == 'dynamic':
return []
namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None)
def _add_ns(path):
return self._xpath_ns(path, namespace)
def is_drm_protected(element):
return element.find(_add_ns('ContentProtection')) is not None
def extract_multisegment_info(element, ms_parent_info):
ms_info = ms_parent_info.copy()
segment_list = element.find(_add_ns('SegmentList'))
if segment_list is not None:
segment_urls_e = segment_list.findall(_add_ns('SegmentURL'))
if segment_urls_e:
ms_info['segment_urls'] = [segment.attrib['media'] for segment in segment_urls_e]
initialization = segment_list.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
else:
segment_template = element.find(_add_ns('SegmentTemplate'))
if segment_template is not None:
start_number = segment_template.get('startNumber')
if start_number:
ms_info['start_number'] = int(start_number)
segment_timeline = segment_template.find(_add_ns('SegmentTimeline'))
if segment_timeline is not None:
s_e = segment_timeline.findall(_add_ns('S'))
if s_e:
ms_info['total_number'] = 0
ms_info['s'] = []
for s in s_e:
r = int(s.get('r', 0))
ms_info['total_number'] += 1 + r
ms_info['s'].append({
't': int(s.get('t', 0)),
# @d is mandatory (see [1, 5.3.9.6.2, Table 17, page 60])
'd': int(s.attrib['d']),
'r': r,
})
else:
timescale = segment_template.get('timescale')
if timescale:
ms_info['timescale'] = int(timescale)
segment_duration = segment_template.get('duration')
if segment_duration:
ms_info['segment_duration'] = int(segment_duration)
media_template = segment_template.get('media')
if media_template:
ms_info['media_template'] = media_template
initialization = segment_template.get('initialization')
if initialization:
ms_info['initialization_url'] = initialization
else:
initialization = segment_template.find(_add_ns('Initialization'))
if initialization is not None:
ms_info['initialization_url'] = initialization.attrib['sourceURL']
return ms_info
mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration'))
formats = []
for period in mpd_doc.findall(_add_ns('Period')):
period_duration = parse_duration(period.get('duration')) or mpd_duration
period_ms_info = extract_multisegment_info(period, {
'start_number': 1,
'timescale': 1,
})
for adaptation_set in period.findall(_add_ns('AdaptationSet')):
if is_drm_protected(adaptation_set):
continue
adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info)
for representation in adaptation_set.findall(_add_ns('Representation')):
if is_drm_protected(representation):
continue
representation_attrib = adaptation_set.attrib.copy()
representation_attrib.update(representation.attrib)
# According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory
mime_type = representation_attrib['mimeType']
content_type = mime_type.split('/')[0]
if content_type == 'text':
# TODO implement WebVTT downloading
pass
elif content_type == 'video' or content_type == 'audio':
base_url = ''
for element in (representation, adaptation_set, period, mpd_doc):
base_url_e = element.find(_add_ns('BaseURL'))
if base_url_e is not None:
base_url = base_url_e.text + base_url
if re.match(r'^https?://', base_url):
break
if mpd_base_url and not re.match(r'^https?://', base_url):
if not mpd_base_url.endswith('/') and not base_url.startswith('/'):
mpd_base_url += '/'
base_url = mpd_base_url + base_url
representation_id = representation_attrib.get('id')
lang = representation_attrib.get('lang')
url_el = representation.find(_add_ns('BaseURL'))
filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None)
f = {
'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id,
'url': base_url,
'ext': mimetype2ext(mime_type),
'width': int_or_none(representation_attrib.get('width')),
'height': int_or_none(representation_attrib.get('height')),
'tbr': int_or_none(representation_attrib.get('bandwidth'), 1000),
'asr': int_or_none(representation_attrib.get('audioSamplingRate')),
'fps': int_or_none(representation_attrib.get('frameRate')),
'vcodec': 'none' if content_type == 'audio' else representation_attrib.get('codecs'),
'acodec': 'none' if content_type == 'video' else representation_attrib.get('codecs'),
'language': lang if lang not in ('mul', 'und', 'zxx', 'mis') else None,
'format_note': 'DASH %s' % content_type,
'filesize': filesize,
}
representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info)
if 'segment_urls' not in representation_ms_info and 'media_template' in representation_ms_info:
if 'total_number' not in representation_ms_info and 'segment_duration':
segment_duration = float(representation_ms_info['segment_duration']) / float(representation_ms_info['timescale'])
representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration))
media_template = representation_ms_info['media_template']
media_template = media_template.replace('$RepresentationID$', representation_id)
media_template = re.sub(r'\$(Number|Bandwidth|Time)\$', r'%(\1)d', media_template)
media_template = re.sub(r'\$(Number|Bandwidth|Time)%([^$]+)\$', r'%(\1)\2', media_template)
media_template.replace('$$', '$')
# As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$
# can't be used at the same time
if '%(Number' in media_template:
representation_ms_info['segment_urls'] = [
media_template % {
'Number': segment_number,
'Bandwidth': representation_attrib.get('bandwidth'),
}
for segment_number in range(
representation_ms_info['start_number'],
representation_ms_info['total_number'] + representation_ms_info['start_number'])]
else:
representation_ms_info['segment_urls'] = []
segment_time = 0
def add_segment_url():
representation_ms_info['segment_urls'].append(
media_template % {
'Time': segment_time,
'Bandwidth': representation_attrib.get('bandwidth'),
}
)
for num, s in enumerate(representation_ms_info['s']):
segment_time = s.get('t') or segment_time
add_segment_url()
for r in range(s.get('r', 0)):
segment_time += s['d']
add_segment_url()
segment_time += s['d']
if 'segment_urls' in representation_ms_info:
f.update({
'segment_urls': representation_ms_info['segment_urls'],
'protocol': 'http_dash_segments',
})
if 'initialization_url' in representation_ms_info:
initialization_url = representation_ms_info['initialization_url'].replace('$RepresentationID$', representation_id)
f.update({
'initialization_url': initialization_url,
})
if not f.get('url'):
f['url'] = initialization_url
try:
existing_format = next(
fo for fo in formats
if fo['format_id'] == representation_id)
except StopIteration:
full_info = formats_dict.get(representation_id, {}).copy()
full_info.update(f)
formats.append(full_info)
else:
existing_format.update(f)
else:
self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type)
return formats
def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8'):
def absolute_url(video_url):
return compat_urlparse.urljoin(base_url, video_url)
def parse_content_type(content_type):
if not content_type:
return {}
ctr = re.search(r'(?P<mimetype>[^/]+/[^;]+)(?:;\s*codecs="?(?P<codecs>[^"]+))?', content_type)
if ctr:
mimetype, codecs = ctr.groups()
f = parse_codecs(codecs)
f['ext'] = mimetype2ext(mimetype)
return f
return {}
def _media_formats(src, cur_media_type):
full_url = absolute_url(src)
if determine_ext(full_url) == 'm3u8':
is_plain_url = False
formats = self._extract_m3u8_formats(
full_url, video_id, ext='mp4',
entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id)
else:
is_plain_url = True
formats = [{
'url': full_url,
'vcodec': 'none' if cur_media_type == 'audio' else None,
}]
return is_plain_url, formats
entries = []
for media_tag, media_type, media_content in re.findall(r'(?s)(<(?P<tag>video|audio)[^>]*>)(.*?)</(?P=tag)>', webpage):
media_info = {
'formats': [],
'subtitles': {},
}
media_attributes = extract_attributes(media_tag)
src = media_attributes.get('src')
if src:
_, formats = _media_formats(src)
media_info['formats'].extend(formats)
media_info['thumbnail'] = media_attributes.get('poster')
if media_content:
for source_tag in re.findall(r'<source[^>]+>', media_content):
source_attributes = extract_attributes(source_tag)
src = source_attributes.get('src')
if not src:
continue
is_plain_url, formats = _media_formats(src, media_type)
if is_plain_url:
f = parse_content_type(source_attributes.get('type'))
f.update(formats[0])
media_info['formats'].append(f)
else:
media_info['formats'].extend(formats)
for track_tag in re.findall(r'<track[^>]+>', media_content):
track_attributes = extract_attributes(track_tag)
kind = track_attributes.get('kind')
if not kind or kind == 'subtitles':
src = track_attributes.get('src')
if not src:
continue
lang = track_attributes.get('srclang') or track_attributes.get('lang') or track_attributes.get('label')
media_info['subtitles'].setdefault(lang, []).append({
'url': absolute_url(src),
})
if media_info['formats']:
entries.append(media_info)
return entries
def _extract_akamai_formats(self, manifest_url, video_id):
formats = []
f4m_url = re.sub(r'(https?://.+?)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
formats.extend(self._extract_f4m_formats(
update_url_query(f4m_url, {'hdcore': '3.7.0'}),
video_id, f4m_id='hds', fatal=False))
m3u8_url = re.sub(r'(https?://.+?)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
return formats
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
res = int_or_none(v, **kwargs)
if 'get_attr' in kwargs:
print(getattr(v, kwargs['get_attr']))
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _float(self, v, name, fatal=False, **kwargs):
res = float_or_none(v, **kwargs)
if res is None:
msg = 'Failed to extract %s: Could not parse value %r' % (name, v)
if fatal:
raise ExtractorError(msg)
else:
self._downloader.report_warning(msg)
return res
def _set_cookie(self, domain, name, value, expire_time=None):
cookie = compat_cookiejar.Cookie(
0, name, value, None, None, domain, None,
None, '/', True, False, expire_time, '', None, None, None)
self._downloader.cookiejar.set_cookie(cookie)
def _get_cookies(self, url):
""" Return a compat_cookies.SimpleCookie with the cookies for the url """
req = sanitized_Request(url)
self._downloader.cookiejar.add_cookie_header(req)
return compat_cookies.SimpleCookie(req.get_header('Cookie'))
def get_testcases(self, include_onlymatching=False):
t = getattr(self, '_TEST', None)
if t:
assert not hasattr(self, '_TESTS'), \
'%s has _TEST and _TESTS' % type(self).__name__
tests = [t]
else:
tests = getattr(self, '_TESTS', [])
for t in tests:
if not include_onlymatching and t.get('only_matching', False):
continue
t['name'] = type(self).__name__[:-len('IE')]
yield t
def is_suitable(self, age_limit):
""" Test whether the extractor is generally suitable for the given
age limit (i.e. pornographic sites are not, all others usually are) """
any_restricted = False
for tc in self.get_testcases(include_onlymatching=False):
if tc.get('playlist', []):
tc = tc['playlist'][0]
is_restricted = age_restricted(
tc.get('info_dict', {}).get('age_limit'), age_limit)
if not is_restricted:
return True
any_restricted = any_restricted or is_restricted
return not any_restricted
def extract_subtitles(self, *args, **kwargs):
if (self._downloader.params.get('writesubtitles', False) or
self._downloader.params.get('listsubtitles')):
return self._get_subtitles(*args, **kwargs)
return {}
def _get_subtitles(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
""" Merge subtitle items for one language. Items with duplicated URLs
will be dropped. """
list1_urls = set([item['url'] for item in subtitle_list1])
ret = list(subtitle_list1)
ret.extend([item for item in subtitle_list2 if item['url'] not in list1_urls])
return ret
@classmethod
def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2):
""" Merge two subtitle dictionaries, language by language. """
ret = dict(subtitle_dict1)
for lang in subtitle_dict2:
ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang])
return ret
def extract_automatic_captions(self, *args, **kwargs):
if (self._downloader.params.get('writeautomaticsub', False) or
self._downloader.params.get('listsubtitles')):
return self._get_automatic_captions(*args, **kwargs)
return {}
def _get_automatic_captions(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def mark_watched(self, *args, **kwargs):
if (self._downloader.params.get('mark_watched', False) and
(self._get_login_info()[0] is not None or
self._downloader.params.get('cookiefile') is not None)):
self._mark_watched(*args, **kwargs)
def _mark_watched(self, *args, **kwargs):
raise NotImplementedError('This method must be implemented by subclasses')
def geo_verification_headers(self):
headers = {}
geo_verification_proxy = self._downloader.params.get('geo_verification_proxy')
if geo_verification_proxy:
headers['Ytdl-request-proxy'] = geo_verification_proxy
return headers
class SearchInfoExtractor(InfoExtractor):
"""
Base class for paged search queries extractors.
They accept URLs in the format _SEARCH_KEY(|all|[0-9]):{query}
Instances should define _SEARCH_KEY and _MAX_RESULTS.
"""
@classmethod
def _make_valid_url(cls):
return r'%s(?P<prefix>|[1-9][0-9]*|all):(?P<query>[\s\S]+)' % cls._SEARCH_KEY
@classmethod
def suitable(cls, url):
return re.match(cls._make_valid_url(), url) is not None
def _real_extract(self, query):
mobj = re.match(self._make_valid_url(), query)
if mobj is None:
raise ExtractorError('Invalid search query "%s"' % query)
prefix = mobj.group('prefix')
query = mobj.group('query')
if prefix == '':
return self._get_n_results(query, 1)
elif prefix == 'all':
return self._get_n_results(query, self._MAX_RESULTS)
else:
n = int(prefix)
if n <= 0:
raise ExtractorError('invalid download number %s for query "%s"' % (n, query))
elif n > self._MAX_RESULTS:
self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n))
n = self._MAX_RESULTS
return self._get_n_results(query, n)
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
return self._SEARCH_KEY
| mcepl/youtube-dl | youtube_dl/extractor/common.py | Python | unlicense | 89,946 |
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class PlaybookState:
'''
A helper class, which keeps track of the task iteration
state for a given playbook. This is used in the PlaybookIterator
class on a per-host basis.
'''
def __init__(self, parent_iterator):
self._parent_iterator = parent_iterator
self._cur_play = 0
self._task_list = None
self._cur_task_pos = 0
self._done = False
def next(self, peek=False):
'''
Determines and returns the next available task from the playbook,
advancing through the list of plays as it goes.
'''
task = None
# we save these locally so that we can peek at the next task
# without updating the internal state of the iterator
cur_play = self._cur_play
task_list = self._task_list
cur_task_pos = self._cur_task_pos
while True:
# when we hit the end of the playbook entries list, we set a flag
# and return None to indicate we're there
# FIXME: accessing the entries and parent iterator playbook members
# should be done through accessor functions
if self._done or cur_play > len(self._parent_iterator._playbook._entries) - 1:
self._done = True
return None
# initialize the task list by calling the .compile() method
# on the play, which will call compile() for all child objects
if task_list is None:
task_list = self._parent_iterator._playbook._entries[cur_play].compile()
# if we've hit the end of this plays task list, move on to the next
# and reset the position values for the next iteration
if cur_task_pos > len(task_list) - 1:
cur_play += 1
task_list = None
cur_task_pos = 0
continue
else:
# FIXME: do tag/conditional evaluation here and advance
# the task position if it should be skipped without
# returning a task
task = task_list[cur_task_pos]
cur_task_pos += 1
# Skip the task if it is the member of a role which has already
# been run, unless the role allows multiple executions
if task._role:
# FIXME: this should all be done via member functions
# instead of direct access to internal variables
if task._role.has_run() and not task._role._metadata._allow_duplicates:
continue
# Break out of the while loop now that we have our task
break
# If we're not just peeking at the next task, save the internal state
if not peek:
self._cur_play = cur_play
self._task_list = task_list
self._cur_task_pos = cur_task_pos
return task
class PlaybookIterator:
'''
The main iterator class, which keeps the state of the playbook
on a per-host basis using the above PlaybookState class.
'''
def __init__(self, inventory, log_manager, playbook):
self._playbook = playbook
self._log_manager = log_manager
self._host_entries = dict()
self._first_host = None
# build the per-host dictionary of playbook states
for host in inventory.get_hosts():
if self._first_host is None:
self._first_host = host
self._host_entries[host.get_name()] = PlaybookState(parent_iterator=self)
def get_next_task(self, peek=False):
''' returns the next task for host[0] '''
return self._host_entries[self._first_host.get_name()].next(peek=peek)
def get_next_task_for_host(self, host, peek=False):
''' fetch the next task for the given host '''
if host.get_name() not in self._host_entries:
raise AnsibleError("invalid host specified for playbook iteration")
return self._host_entries[host.get_name()].next(peek=peek)
| wulczer/ansible | v2/ansible/executor/playbook_iterator.py | Python | gpl-3.0 | 4,905 |
#!/usr/bin/python
#coding=utf-8
'''
@author: sheng
@license:
'''
import unittest
from meridian.acupoints import dabao41
class TestDabao41Functions(unittest.TestCase):
def setUp(self):
pass
def test_xxx(self):
pass
if __name__ == '__main__':
unittest.main()
| sinotradition/meridian | meridian/tst/acupoints/test_dabao41.py | Python | apache-2.0 | 295 |
#!/usr/bin/env python
## Google APIs
# apiclient
from apiclient.discovery import build
# Oauth2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
# Python Built-in Libraries
import httplib2
import trackback
__author__ = "Colin Su <littleq@tagtoo.org>"
default_settings = {
'instance_settings': {
'zone': 'us-central1-a',
'image': 'debian',
'images': {
'debian': 'debian-7-wheezy-v20131119',
'centos': 'centos-6-v20131119'
},
'machine_type': 'debian'
'instance_name_prefix': 'tagtoo-'
},
'project_settings': {
'gce_scopes': [
'https://www.googleapis.com/auth/compute'
],
'gce_api_version': 'v1',
'gce_url': 'https://www.googleapis.com/compute/%s/projects/' % (default_settings['gce_api_version'])
'oauth2_storage': 'oauth2.dat',
'client_secrets': 'client_secrets.json',
'project_id': '',
}
}
settings = default_settings.copy()
try:
from custom_settings import custom_settings
except:
custom_settings = None
if custom_settings:
print "Custom settings detected!"
settings.update(custom_settings)
class GCEService(object):
def __init__(self, project_settings, auth_http, project_id, blocking=True):
self.project_settings = project_settings
self.service = build('compute', settings['gce_api_version'])
self.auth_http = self.get_authed_http()
self.project_id = project_id
self.project_url = "%s%s" % (settings['gce_url'], self.project_id)
self.blocking = blocking
def get_authed_http(self):
"""
Get authed http client
"""
flow = flow_from_clientsecrets(self.project_settings['client_secrets'], scope=self.project_settings['gce_scopes'])
storage = Storage(self.project_settings['oauth2_storage'])
credentials = storage.get()
if not credentials or credentials.invalid:
credentials = run(flow, storage)
http = httplib2.Http()
auth_http = credentials.authorize(http)
return auth_http
def execute(request):
try:
response = request.execute(self.auth_http)
except:
print 'unexpected error occured.'
trackback.print_exc()
return
return response
def list_instances(self):
request = self.service.instances().list(project=self.project_id)
class GCEInstance(object):
def __init__(self, service, settings, instance_name=None):
self.gce_service = service
self.instance_settings = settings
self.instance_name = instance_name
if __name__ == '__main__':
| Tagtoo/cookie_atm | gce_operation.py | Python | mit | 2,794 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('inicio', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='perfiles',
name='rol',
),
]
| acs-um/gestion-turnos | apps/inicio/migrations/0002_remove_perfiles_rol.py | Python | mit | 339 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from marshmallow_mongoengine.schema import (
SchemaOpts,
ModelSchema,
)
from marshmallow_mongoengine.conversion.fields import (
register_field,
register_field_builder
)
from marshmallow_mongoengine.convert import (
ModelConverter,
fields_for_model,
convert_field,
field_for,
)
from marshmallow_mongoengine.exceptions import ModelConversionError
__version__ = '0.7.7'
__license__ = 'MIT'
__all__ = [
'ModelSchema',
'SchemaOpts',
'ModelConverter',
'fields_for_model',
'property2field',
'column2field',
'ModelConversionError',
'convert_field',
'field_for',
'fields',
'register_field_builder',
'register_field',
]
| touilleMan/marshmallow-mongoengine | marshmallow_mongoengine/__init__.py | Python | mit | 758 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-11-15 18:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('places', '0007_auto_20161114_2203'),
]
operations = [
migrations.AddField(
model_name='place',
name='code',
field=models.CharField(blank=True, max_length=40, null=True, unique=True),
),
]
| RESTfactory/presence | places/migrations/0008_place_code.py | Python | gpl-3.0 | 481 |
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.postgres.search import SearchVector, SearchQuery, SearchRank
from django.core.urlresolvers import reverse
from django.db import models
from github import UnknownObjectException
from social.apps.django_app.default.models import UserSocialAuth
from documents.tasks.wiki_processor import process_wiki
from interface.utils import get_github
from interface.path_processor import PathProcessor
class UserProxy(User):
class Meta:
proxy = True
def get_auth(self):
try:
data = UserSocialAuth.objects.filter(user=self).values_list('extra_data')[0][0]
except:
return None
username = data['login']
password = data['access_token']
return (username, password)
class Repo(models.Model):
user = models.ForeignKey(UserProxy, related_name='repos')
full_name = models.TextField(unique=True)
webhook_id = models.IntegerField(null=True, blank=True)
is_private = models.BooleanField(default=True)
wiki_branch = models.TextField(default='master')
created_at = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['full_name']
def __str__(self):
return self.full_name
def get_absolute_url(self):
return reverse('repo_detail', kwargs={'full_name': self.full_name})
@property
def clone_url(self):
return 'https://github.com/{}.git'.format(self.full_name)
def delete(self, *args, **kwargs):
self.remove_webhook()
return super(Repo, self).delete(*args, **kwargs)
def remove_webhook(self):
if not settings.DEBUG:
g = get_github(self.user)
grepo = g.get_repo(self.full_name)
try:
hook = grepo.get_hook(self.webhook_id)
hook.delete()
except UnknownObjectException:
pass
self.webhook_id = None
self.save()
def user_is_collaborator(self, user):
if not user.is_authenticated():
return False
if self.user == user or user.is_staff:
return True
g = get_github(user)
grepo = g.get_repo(self.full_name)
guser = g.get_user(user.username)
return grepo.has_in_collaborators(guser)
def add_webhook(self, request):
if settings.DEBUG:
self.webhook_id = 123
else:
g = get_github(self.user)
grepo = g.get_repo(self.full_name)
hook = grepo.create_hook(
'web',
{
'content_type': 'json',
'url': request.build_absolute_uri(reverse('hooksgithub')),
'secret': settings.WEBHOOK_SECRET
},
events=['push'],
active=True
)
self.webhook_id = hook.id
self.save()
@property
def directory(self):
path_processor = PathProcessor(self.full_name, is_directory=True)
return path_processor.repo_disk_path
def enqueue(self, file_change=None):
file_change = file_change or {}
process_wiki.delay(self.id, file_change)
def get_folder_contents(self, path, documents):
folders = []
docs = []
for document in documents:
doc_path = document.path
if path != '/':
doc_path = doc_path.replace(path, '')
if not doc_path.startswith('/'):
doc_path = '/{}'.format(doc_path)
if doc_path == '/':
docs.append(document.filename)
else:
first_seg = doc_path.split('/', maxsplit=2)[1]
if first_seg:
folder_name = '{}/'.format(first_seg)
if folder_name not in folders:
folders.append(folder_name)
folders = sorted(folders)
docs = sorted(docs)
folders.extend(docs)
return folders
| ZeroCater/Eyrie | interface/models.py | Python | mit | 4,056 |
'''
Animations tests
================
'''
import unittest
from time import time, sleep
from kivy.animation import Animation, AnimationTransition
from kivy.uix.widget import Widget
from functools import partial
from kivy.clock import Clock
class AnimationTestCase(unittest.TestCase):
def sleep(self, t):
start = time()
while time() < start + t:
sleep(.01)
Clock.tick()
def setUp(self):
self.a = Animation(x=100, d=1, t='out_bounce')
self.w = Widget()
def test_start_animation(self):
self.a.start(self.w)
self.sleep(1)
self.assertAlmostEqual(self.w.x, 100)
def test_stop_animation(self):
self.a.start(self.w)
self.sleep(.5)
self.a.stop(self.w)
self.assertNotAlmostEqual(self.w.x, 100)
self.assertNotAlmostEqual(self.w.x, 0)
def test_stop_all(self):
self.a.start(self.w)
self.sleep(.5)
Animation.stop_all(self.w)
def test_stop_all_2(self):
self.a.start(self.w)
self.sleep(.5)
Animation.stop_all(self.w, 'x')
def test_duration(self):
self.assertEqual(self.a.duration, 1)
def test_transition(self):
self.assertEqual(self.a.transition, AnimationTransition.out_bounce)
def test_animated_properties(self):
self.assertEqual(self.a.animated_properties['x'], 100)
| nuigroup/kivy | kivy/tests/test_animations.py | Python | lgpl-3.0 | 1,392 |
#<pycode(py_diskio)>
def enumerate_system_files(subdir, fname, callback):
"""Similar to enumerate_files() however it searches inside IDA directory or its subdirectories"""
return enumerate_files(idadir(subdir), fname, callback)
#</pycode(py_diskio)>
| nihilus/src | pywraps/py_diskio.py | Python | bsd-3-clause | 258 |
# -*- coding: utf-8 -*-
import time, random, string, os
import urllib, urllib2
from PIL import Image
def urlformat(unformaturl):
host = 'http://w.duoting.fm'
url = '%s%s' % (host, unformaturl)
return url
def urllocalformat(unformaturl):
host = 'http://api.duoting.fm'
url = '%s%s' % (host, unformaturl)
return url
def handleurl(result, field):
if result[0].has_key(field):
for obj in result:
obj[field] = urlformat(obj[field])
return result
def timeformat(unformattime):
'''
工具函数,将13位时间戳转换成格式化时间
'''
TIMESTAMP = '%Y-%m-%d %H:%M:%S'
unformattime = float(unformattime)
formattime = time.strftime(TIMESTAMP, time.localtime(unformattime))
return formattime
def timeformat2(unformattime):
'''
工具函数,将13位时间戳转换成格式化时间
'''
TIMESTAMP = '%Y-%m-%d %H:%M:%S'
unformattime = float(unformattime / 100)
formattime = time.strftime(TIMESTAMP, time.localtime(unformattime))
return formattime
def strip_tags(html):
from HTMLParser import HTMLParser
html = html.strip()
html = html.strip("\n")
result = []
parse = HTMLParser()
parse.handle_data = result.append
parse.feed(html)
parse.close()
return "".join(result)
def get_current_time():
import time
t = time.time()
return t
def pwdmd5(pwd):
"""
加密函数
"""
import hashlib
hash_md5 = hashlib.md5(pwd).hexdigest()
return hash_md5
def dateformat(unformatdate):
return int(time.mktime(time.strptime(unformatdate, '%Y-%m-%d %H:%M:%S')))
import re
##过滤HTML中的标签
#将HTML中标签等信息去掉
#@param htmlstr HTML字符串.
def filter_tags(htmlstr):
#先过滤CDATA
re_cdata=re.compile('//<!\[CDATA\[[^>]*//\]\]>',re.I) #匹配CDATA
re_script=re.compile('<\s*script[^>]*>[^<]*<\s*/\s*script\s*>',re.I)#Script
re_style=re.compile('<\s*style[^>]*>[^<]*<\s*/\s*style\s*>',re.I)#style
re_br=re.compile('<br\s*?/?>')#处理换行
re_p=re.compile('</p\s*?/?>')#处理换行
re_h=re.compile('</?\w+[^>]*>')#HTML标签
re_comment=re.compile('<!--[^>]*-->')#HTML注释
s=re_cdata.sub('',htmlstr)#去掉CDATA
s=re_script.sub('',s) #去掉SCRIPT
s=re_style.sub('',s)#去掉style
s=re_br.sub('\n',s)#将br转换为换行
s=re_h.sub('',s) #去掉HTML 标签
s=re_comment.sub('',s)#去掉HTML注释
#去掉多余的空行
blank_line=re.compile('\n+')
s=blank_line.sub('\n',s)
s=replaceCharEntity(s)#替换实体
return s
##替换常用HTML字符实体.
#使用正常的字符替换HTML中特殊的字符实体.
#你可以添加新的实体字符到CHAR_ENTITIES中,处理更多HTML字符实体.
#@param htmlstr HTML字符串.
def replaceCharEntity(htmlstr):
CHAR_ENTITIES={'nbsp':' ','160':' ',
'lt':'<','60':'<',
'gt':'>','62':'>',
'amp':'&','38':'&',
'quot':'"','34':'"',}
re_charEntity=re.compile(r'&#?(?P<name>\w+);')
sz=re_charEntity.search(htmlstr)
while sz:
entity=sz.group()#entity全称,如>
key=sz.group('name')#去除&;后entity,如>为gt
try:
htmlstr=re_charEntity.sub(CHAR_ENTITIES[key],htmlstr,1)
sz=re_charEntity.search(htmlstr)
except KeyError:
#以空串代替
htmlstr=re_charEntity.sub('',htmlstr,1)
sz=re_charEntity.search(htmlstr)
return htmlstr
def getdirectoryname():
return time.strftime('%Y-%m-%d', time.localtime())
def getfilename(filename):
filelist = filename.split('.')
randomstr = ''.join(random.sample(string.ascii_letters + string.digits, 16))
return '%s.%s' % (randomstr, filelist[1])
def allowed_file(filename, allowed_extensions):
return '.' in filename and filename.split('.', 1)[1] in allowed_extensions
def thumb_pic(picname, tx):
dirname = os.path.dirname(picname)
basename = 'thumb-%s' % os.path.basename(picname)
target_img = os.path.join(dirname, basename)
img = Image.open(picname)
(x, y) = img.size
x_t = tx
y_t = y * x_t / x
try:
img.resize((x_t, y_t), Image.ANTIALIAS).save(target_img)
return basename
except Exception, e:
return False
| rsj217/dtapi | dtapi/api/libs/utils.py | Python | gpl-2.0 | 4,366 |
# Configuration file for jupyter-notebook.
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# JupyterApp(Application) configuration
#------------------------------------------------------------------------------
## Base class for Jupyter applications
## Answer yes to any prompts.
#c.JupyterApp.answer_yes = False
## Full path of a config file.
#c.JupyterApp.config_file = ''
## Specify a config file to load.
#c.JupyterApp.config_file_name = ''
## Generate default config file.
#c.JupyterApp.generate_config = False
#------------------------------------------------------------------------------
# NotebookApp(JupyterApp) configuration
#------------------------------------------------------------------------------
## Set the Access-Control-Allow-Credentials: true header
#c.NotebookApp.allow_credentials = False
## Set the Access-Control-Allow-Origin header
#
# Use '*' to allow any origin to access your server.
#
# Takes precedence over allow_origin_pat.
#c.NotebookApp.allow_origin = ''
## Use a regular expression for the Access-Control-Allow-Origin header
#
# Requests from an origin matching the expression will get replies with:
#
# Access-Control-Allow-Origin: origin
#
# where `origin` is the origin of the request.
#
# Ignored if allow_origin is set.
#c.NotebookApp.allow_origin_pat = ''
## Allow password to be changed at login for the notebook server.
#
# While loggin in with a token, the notebook server UI will give the opportunity
# to the user to enter a new password at the same time that will replace the
# token login mechanism.
#
# This can be set to false to prevent changing password from the UI/API.
#c.NotebookApp.allow_password_change = True
## Allow requests where the Host header doesn't point to a local server
#
# By default, requests get a 403 forbidden response if the 'Host' header shows
# that the browser thinks it's on a non-local domain. Setting this option to
# True disables this check.
#
# This protects against 'DNS rebinding' attacks, where a remote web server
# serves you a page and then changes its DNS to send later requests to a local
# IP, bypassing same-origin checks.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are allowed as local, along
# with hostnames configured in local_hostnames.
#c.NotebookApp.allow_remote_access = False
## Whether to allow the user to run the notebook as root.
#c.NotebookApp.allow_root = False
## DEPRECATED use base_url
#c.NotebookApp.base_project_url = '/'
## The base URL for the notebook server.
#
# Leading and trailing slashes can be omitted, and will automatically be added.
#c.NotebookApp.base_url = '/'
## Specify what command to use to invoke a web browser when opening the notebook.
# If not specified, the default browser will be determined by the `webbrowser`
# standard library module, which allows setting of the BROWSER environment
# variable to override it.
#c.NotebookApp.browser = ''
## The full path to an SSL/TLS certificate file.
#c.NotebookApp.certfile = ''
## The full path to a certificate authority certificate for SSL/TLS client
# authentication.
#c.NotebookApp.client_ca = ''
## The config manager class to use
#c.NotebookApp.config_manager_class = 'notebook.services.config.manager.ConfigManager'
## The notebook manager class to use.
#c.NotebookApp.contents_manager_class = 'notebook.services.contents.largefilemanager.LargeFileManager'
## Extra keyword arguments to pass to `set_secure_cookie`. See tornado's
# set_secure_cookie docs for details.
#c.NotebookApp.cookie_options = {}
## The random bytes used to secure cookies. By default this is a new random
# number every time you start the Notebook. Set it to a value in a config file
# to enable logins to persist across server sessions.
#
# Note: Cookie secrets should be kept private, do not share config files with
# cookie_secret stored in plaintext (you can read the value from a file).
#c.NotebookApp.cookie_secret = b''
## The file where the cookie secret is stored.
#c.NotebookApp.cookie_secret_file = ''
## Override URL shown to users.
#
# Replace actual URL, including protocol, address, port and base URL, with the
# given value when displaying URL to the users. Do not change the actual
# connection URL. If authentication token is enabled, the token is added to the
# custom URL automatically.
#
# This option is intended to be used when the URL to display to the user cannot
# be determined reliably by the Jupyter notebook server (proxified or
# containerized setups for example).
#c.NotebookApp.custom_display_url = ''
## The default URL to redirect to from `/`
#c.NotebookApp.default_url = '/tree'
## Disable cross-site-request-forgery protection
#
# Jupyter notebook 4.3.1 introduces protection from cross-site request
# forgeries, requiring API requests to either:
#
# - originate from pages served by this server (validated with XSRF cookie and
# token), or - authenticate with a token
#
# Some anonymous compute resources still desire the ability to run code,
# completely without authentication. These services can disable all
# authentication and security checks, with the full knowledge of what that
# implies.
#c.NotebookApp.disable_check_xsrf = False
## Whether to enable MathJax for typesetting math/TeX
#
# MathJax is the javascript library Jupyter uses to render math/LaTeX. It is
# very large, so you may want to disable it if you have a slow internet
# connection, or for offline use of the notebook.
#
# When disabled, equations etc. will appear as their untransformed TeX source.
#c.NotebookApp.enable_mathjax = True
## extra paths to look for Javascript notebook extensions
#c.NotebookApp.extra_nbextensions_path = []
## handlers that should be loaded at higher priority than the default services
#c.NotebookApp.extra_services = []
## Extra paths to search for serving static files.
#
# This allows adding javascript/css to be available from the notebook server
# machine, or overriding individual files in the IPython
#c.NotebookApp.extra_static_paths = []
## Extra paths to search for serving jinja templates.
#
# Can be used to override templates from notebook.templates.
#c.NotebookApp.extra_template_paths = []
##
#c.NotebookApp.file_to_run = ''
## Deprecated: Use minified JS file or not, mainly use during dev to avoid JS
# recompilation
#c.NotebookApp.ignore_minified_js = False
## (bytes/sec) Maximum rate at which stream output can be sent on iopub before
# they are limited.
c.NotebookApp.iopub_data_rate_limit = 500000000
## (msgs/sec) Maximum rate at which messages can be sent on iopub before they are
# limited.
#c.NotebookApp.iopub_msg_rate_limit = 1000
## The IP address the notebook server will listen on.
#c.NotebookApp.ip = 'localhost'
## Supply extra arguments that will be passed to Jinja environment.
#c.NotebookApp.jinja_environment_options = {}
## Extra variables to supply to jinja templates when rendering.
#c.NotebookApp.jinja_template_vars = {}
## The kernel manager class to use.
#c.NotebookApp.kernel_manager_class = 'notebook.services.kernels.kernelmanager.MappingKernelManager'
## The kernel spec manager class to use. Should be a subclass of
# `jupyter_client.kernelspec.KernelSpecManager`.
#
# The Api of KernelSpecManager is provisional and might change without warning
# between this version of Jupyter and the next stable one.
#c.NotebookApp.kernel_spec_manager_class = 'jupyter_client.kernelspec.KernelSpecManager'
## The full path to a private key file for usage with SSL/TLS.
#c.NotebookApp.keyfile = ''
## Hostnames to allow as local when allow_remote_access is False.
#
# Local IP addresses (such as 127.0.0.1 and ::1) are automatically accepted as
# local as well.
#c.NotebookApp.local_hostnames = ['localhost']
## The login handler class to use.
#c.NotebookApp.login_handler_class = 'notebook.auth.login.LoginHandler'
## The logout handler class to use.
#c.NotebookApp.logout_handler_class = 'notebook.auth.logout.LogoutHandler'
## The MathJax.js configuration file that is to be used.
#c.NotebookApp.mathjax_config = 'TeX-AMS-MML_HTMLorMML-full,Safe'
## A custom url for MathJax.js. Should be in the form of a case-sensitive url to
# MathJax, for example: /static/components/MathJax/MathJax.js
#c.NotebookApp.mathjax_url = ''
## Dict of Python modules to load as notebook server extensions.Entry values can
# be used to enable and disable the loading ofthe extensions. The extensions
# will be loaded in alphabetical order.
#c.NotebookApp.nbserver_extensions = {}
## The directory to use for notebooks and kernels.
#c.NotebookApp.notebook_dir = 'data'
## Whether to open in a browser after starting. The specific browser used is
# platform dependent and determined by the python standard library `webbrowser`
# module, unless it is overridden using the --browser (NotebookApp.browser)
# configuration option.
#c.NotebookApp.open_browser = True
## Hashed password to use for web authentication.
#
# To generate, type in a python/IPython shell:
#
# from notebook.auth import passwd; passwd()
#
# The string should be of the form type:salt:hashed-password.
#c.NotebookApp.password = ''
## Forces users to use a password for the Notebook server. This is useful in a
# multi user environment, for instance when everybody in the LAN can access each
# other's machine through ssh.
#
# In such a case, server the notebook server on localhost is not secure since
# any user can connect to the notebook server via ssh.
#c.NotebookApp.password_required = False
## The port the notebook server will listen on.
#c.NotebookApp.port = 8888
## The number of additional ports to try if the specified port is not available.
#c.NotebookApp.port_retries = 50
## DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
#c.NotebookApp.pylab = 'disabled'
## If True, display a button in the dashboard to quit (shutdown the notebook
# server).
#c.NotebookApp.quit_button = True
## (sec) Time window used to check the message and data rate limits.
#c.NotebookApp.rate_limit_window = 3
## Reraise exceptions encountered loading server extensions?
#c.NotebookApp.reraise_server_extension_failures = False
## DEPRECATED use the nbserver_extensions dict instead
#c.NotebookApp.server_extensions = []
## The session manager class to use.
#c.NotebookApp.session_manager_class = 'notebook.services.sessions.sessionmanager.SessionManager'
## Shut down the server after N seconds with no kernels or terminals running and
# no activity. This can be used together with culling idle kernels
# (MappingKernelManager.cull_idle_timeout) to shutdown the notebook server when
# it's not in use. This is not precisely timed: it may shut down up to a minute
# later. 0 (the default) disables this automatic shutdown.
#c.NotebookApp.shutdown_no_activity_timeout = 0
## Supply SSL options for the tornado HTTPServer. See the tornado docs for
# details.
#c.NotebookApp.ssl_options = {}
## Supply overrides for terminado. Currently only supports "shell_command".
#c.NotebookApp.terminado_settings = {}
## Set to False to disable terminals.
#
# This does *not* make the notebook server more secure by itself. Anything the
# user can in a terminal, they can also do in a notebook.
#
# Terminals may also be automatically disabled if the terminado package is not
# available.
#c.NotebookApp.terminals_enabled = True
## Token used for authenticating first-time connections to the server.
#
# When no password is enabled, the default is to generate a new, random token.
#
# Setting to an empty string disables authentication altogether, which is NOT
# RECOMMENDED.
#c.NotebookApp.token = '<generated>'
## Supply overrides for the tornado.web.Application that the Jupyter notebook
# uses.
#c.NotebookApp.tornado_settings = {}
## Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
#c.NotebookApp.trust_xheaders = False
## DEPRECATED, use tornado_settings
#c.NotebookApp.webapp_settings = {}
## Specify Where to open the notebook on startup. This is the `new` argument
# passed to the standard library method `webbrowser.open`. The behaviour is not
# guaranteed, but depends on browser support. Valid values are:
#
# - 2 opens a new tab,
# - 1 opens a new window,
# - 0 opens in an existing window.
#
# See the `webbrowser.open` documentation for details.
#c.NotebookApp.webbrowser_open_new = 2
## Set the tornado compression options for websocket connections.
#
# This value will be returned from
# :meth:`WebSocketHandler.get_compression_options`. None (default) will disable
# compression. A dict (even an empty one) will enable compression.
#
# See the tornado docs for WebSocketHandler.get_compression_options for details.
#c.NotebookApp.websocket_compression_options = None
## The base URL for websockets, if it differs from the HTTP server (hint: it
# almost certainly doesn't).
#
# Should be in the form of an HTTP origin: ws[s]://hostname[:port]
#c.NotebookApp.websocket_url = ''
#------------------------------------------------------------------------------
# ConnectionFileMixin(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Mixin for configurable classes that work with connection files
## JSON file in which to store connection info [default: kernel-<pid>.json]
#
# This file will contain the IP, ports, and authentication key needed to connect
# clients to this kernel. By default, this file will be created in the security
# dir of the current profile, but can be specified by absolute path.
#c.ConnectionFileMixin.connection_file = ''
## set the control (ROUTER) port [default: random]
#c.ConnectionFileMixin.control_port = 0
## set the heartbeat port [default: random]
#c.ConnectionFileMixin.hb_port = 0
## set the iopub (PUB) port [default: random]
#c.ConnectionFileMixin.iopub_port = 0
## Set the kernel's IP address [default localhost]. If the IP address is
# something other than localhost, then Consoles on other machines will be able
# to connect to the Kernel, so be careful!
#c.ConnectionFileMixin.ip = ''
## set the shell (ROUTER) port [default: random]
#c.ConnectionFileMixin.shell_port = 0
## set the stdin (ROUTER) port [default: random]
#c.ConnectionFileMixin.stdin_port = 0
##
#c.ConnectionFileMixin.transport = 'tcp'
#------------------------------------------------------------------------------
# KernelManager(ConnectionFileMixin) configuration
#------------------------------------------------------------------------------
## Manages a single kernel in a subprocess on this host.
#
# This version starts kernels with Popen.
## Should we autorestart the kernel if it dies.
#c.KernelManager.autorestart = True
## DEPRECATED: Use kernel_name instead.
#
# The Popen Command to launch the kernel. Override this if you have a custom
# kernel. If kernel_cmd is specified in a configuration file, Jupyter does not
# pass any arguments to the kernel, because it cannot make any assumptions about
# the arguments that the kernel understands. In particular, this means that the
# kernel does not receive the option --debug if it given on the Jupyter command
# line.
#c.KernelManager.kernel_cmd = []
## Time to wait for a kernel to terminate before killing it, in seconds.
#c.KernelManager.shutdown_wait_time = 5.0
#------------------------------------------------------------------------------
# Session(Configurable) configuration
#------------------------------------------------------------------------------
## Object for handling serialization and sending of messages.
#
# The Session object handles building messages and sending them with ZMQ sockets
# or ZMQStream objects. Objects can communicate with each other over the
# network via Session objects, and only need to work with the dict-based IPython
# message spec. The Session will handle serialization/deserialization, security,
# and metadata.
#
# Sessions support configurable serialization via packer/unpacker traits, and
# signing with HMAC digests via the key/keyfile traits.
#
# Parameters ----------
#
# debug : bool
# whether to trigger extra debugging statements
# packer/unpacker : str : 'json', 'pickle' or import_string
# importstrings for methods to serialize message parts. If just
# 'json' or 'pickle', predefined JSON and pickle packers will be used.
# Otherwise, the entire importstring must be used.
#
# The functions must accept at least valid JSON input, and output *bytes*.
#
# For example, to use msgpack:
# packer = 'msgpack.packb', unpacker='msgpack.unpackb'
# pack/unpack : callables
# You can also set the pack/unpack callables for serialization directly.
# session : bytes
# the ID of this Session object. The default is to generate a new UUID.
# username : unicode
# username added to message headers. The default is to ask the OS.
# key : bytes
# The key used to initialize an HMAC signature. If unset, messages
# will not be signed or checked.
# keyfile : filepath
# The file containing a key. If this is set, `key` will be initialized
# to the contents of the file.
## Threshold (in bytes) beyond which an object's buffer should be extracted to
# avoid pickling.
#c.Session.buffer_threshold = 1024
## Whether to check PID to protect against calls after fork.
#
# This check can be disabled if fork-safety is handled elsewhere.
#c.Session.check_pid = True
## Threshold (in bytes) beyond which a buffer should be sent without copying.
#c.Session.copy_threshold = 65536
## Debug output in the Session
#c.Session.debug = False
## The maximum number of digests to remember.
#
# The digest history will be culled when it exceeds this value.
#c.Session.digest_history_size = 65536
## The maximum number of items for a container to be introspected for custom
# serialization. Containers larger than this are pickled outright.
#c.Session.item_threshold = 64
## execution key, for signing messages.
#c.Session.key = b''
## path to file containing execution key.
#c.Session.keyfile = ''
## Metadata dictionary, which serves as the default top-level metadata dict for
# each message.
#c.Session.metadata = {}
## The name of the packer for serializing messages. Should be one of 'json',
# 'pickle', or an import name for a custom callable serializer.
#c.Session.packer = 'json'
## The UUID identifying this session.
#c.Session.session = ''
## The digest scheme used to construct the message signatures. Must have the form
# 'hmac-HASH'.
#c.Session.signature_scheme = 'hmac-sha256'
## The name of the unpacker for unserializing messages. Only used with custom
# functions for `packer`.
#c.Session.unpacker = 'json'
## Username for the Session. Default is your system username.
#c.Session.username = 'nben'
#------------------------------------------------------------------------------
# MultiKernelManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for managing multiple kernels.
## The name of the default kernel to start
#c.MultiKernelManager.default_kernel_name = 'python3'
## The kernel manager class. This is configurable to allow subclassing of the
# KernelManager for customized behavior.
#c.MultiKernelManager.kernel_manager_class = 'jupyter_client.ioloop.IOLoopKernelManager'
#------------------------------------------------------------------------------
# MappingKernelManager(MultiKernelManager) configuration
#------------------------------------------------------------------------------
## A KernelManager that handles notebook mapping and HTTP error handling
## Whether messages from kernels whose frontends have disconnected should be
# buffered in-memory.
#
# When True (default), messages are buffered and replayed on reconnect, avoiding
# lost messages due to interrupted connectivity.
#
# Disable if long-running kernels will produce too much output while no
# frontends are connected.
#c.MappingKernelManager.buffer_offline_messages = True
## Whether to consider culling kernels which are busy. Only effective if
# cull_idle_timeout > 0.
#c.MappingKernelManager.cull_busy = False
## Whether to consider culling kernels which have one or more connections. Only
# effective if cull_idle_timeout > 0.
#c.MappingKernelManager.cull_connected = False
## Timeout (in seconds) after which a kernel is considered idle and ready to be
# culled. Values of 0 or lower disable culling. Very short timeouts may result
# in kernels being culled for users with poor network connections.
#c.MappingKernelManager.cull_idle_timeout = 0
## The interval (in seconds) on which to check for idle kernels exceeding the
# cull timeout value.
#c.MappingKernelManager.cull_interval = 300
## Timeout for giving up on a kernel (in seconds).
#
# On starting and restarting kernels, we check whether the kernel is running and
# responsive by sending kernel_info_requests. This sets the timeout in seconds
# for how long the kernel can take before being presumed dead. This affects the
# MappingKernelManager (which handles kernel restarts) and the
# ZMQChannelsHandler (which handles the startup).
#c.MappingKernelManager.kernel_info_timeout = 60
##
#c.MappingKernelManager.root_dir = ''
#------------------------------------------------------------------------------
# ContentsManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## Base class for serving files and directories.
#
# This serves any text or binary file, as well as directories, with special
# handling for JSON notebook documents.
#
# Most APIs take a path argument, which is always an API-style unicode path, and
# always refers to a directory.
#
# - unicode, not url-escaped
# - '/'-separated
# - leading and trailing '/' will be stripped
# - if unspecified, path defaults to '',
# indicating the root path.
## Allow access to hidden files
#c.ContentsManager.allow_hidden = False
##
#c.ContentsManager.checkpoints = None
##
#c.ContentsManager.checkpoints_class = 'notebook.services.contents.checkpoints.Checkpoints'
##
#c.ContentsManager.checkpoints_kwargs = {}
## handler class to use when serving raw file requests.
#
# Default is a fallback that talks to the ContentsManager API, which may be
# inefficient, especially for large files.
#
# Local files-based ContentsManagers can use a StaticFileHandler subclass, which
# will be much more efficient.
#
# Access to these files should be Authenticated.
#c.ContentsManager.files_handler_class = 'notebook.files.handlers.FilesHandler'
## Extra parameters to pass to files_handler_class.
#
# For example, StaticFileHandlers generally expect a `path` argument specifying
# the root directory from which to serve files.
#c.ContentsManager.files_handler_params = {}
## Glob patterns to hide in file and directory listings.
#c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~']
## Python callable or importstring thereof
#
# To be called on a contents model prior to save.
#
# This can be used to process the structure, such as removing notebook outputs
# or other side effects that should not be saved.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(path=path, model=model, contents_manager=self)
#
# - model: the model to be saved. Includes file contents.
# Modifying this dict will affect the file that is stored.
# - path: the API path of the save destination
# - contents_manager: this ContentsManager instance
#c.ContentsManager.pre_save_hook = None
##
#c.ContentsManager.root_dir = '/'
## The base name used when creating untitled directories.
#c.ContentsManager.untitled_directory = 'Untitled Folder'
## The base name used when creating untitled files.
#c.ContentsManager.untitled_file = 'untitled'
## The base name used when creating untitled notebooks.
#c.ContentsManager.untitled_notebook = 'Untitled'
#------------------------------------------------------------------------------
# FileManagerMixin(Configurable) configuration
#------------------------------------------------------------------------------
## Mixin for ContentsAPI classes that interact with the filesystem.
#
# Provides facilities for reading, writing, and copying both notebooks and
# generic files.
#
# Shared by FileContentsManager and FileCheckpoints.
#
# Note ---- Classes using this mixin must provide the following attributes:
#
# root_dir : unicode
# A directory against against which API-style paths are to be resolved.
#
# log : logging.Logger
## By default notebooks are saved on disk on a temporary file and then if
# succefully written, it replaces the old ones. This procedure, namely
# 'atomic_writing', causes some bugs on file system whitout operation order
# enforcement (like some networked fs). If set to False, the new notebook is
# written directly on the old one which could fail (eg: full filesystem or quota
# )
#c.FileManagerMixin.use_atomic_writing = True
#------------------------------------------------------------------------------
# FileContentsManager(FileManagerMixin,ContentsManager) configuration
#------------------------------------------------------------------------------
## If True (default), deleting files will send them to the platform's
# trash/recycle bin, where they can be recovered. If False, deleting files
# really deletes them.
#c.FileContentsManager.delete_to_trash = True
## Python callable or importstring thereof
#
# to be called on the path of a file just saved.
#
# This can be used to process the file on disk, such as converting the notebook
# to a script or HTML via nbconvert.
#
# It will be called as (all arguments passed by keyword)::
#
# hook(os_path=os_path, model=model, contents_manager=instance)
#
# - path: the filesystem path to the file just written - model: the model
# representing the file - contents_manager: this ContentsManager instance
#c.FileContentsManager.post_save_hook = None
##
#c.FileContentsManager.root_dir = ''
## DEPRECATED, use post_save_hook. Will be removed in Notebook 5.0
#c.FileContentsManager.save_script = False
#------------------------------------------------------------------------------
# NotebookNotary(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## A class for computing and verifying notebook signatures.
## The hashing algorithm used to sign notebooks.
#c.NotebookNotary.algorithm = 'sha256'
## The sqlite file in which to store notebook signatures. By default, this will
# be in your Jupyter data directory. You can set it to ':memory:' to disable
# sqlite writing to the filesystem.
#c.NotebookNotary.db_file = ''
## The secret key with which notebooks are signed.
#c.NotebookNotary.secret = b''
## The file where the secret key is stored.
#c.NotebookNotary.secret_file = ''
## A callable returning the storage backend for notebook signatures. The default
# uses an SQLite database.
#c.NotebookNotary.store_factory = traitlets.Undefined
#------------------------------------------------------------------------------
# KernelSpecManager(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## If there is no Python kernelspec registered and the IPython kernel is
# available, ensure it is added to the spec list.
#c.KernelSpecManager.ensure_native_kernel = True
## The kernel spec class. This is configurable to allow subclassing of the
# KernelSpecManager for customized behavior.
#c.KernelSpecManager.kernel_spec_class = 'jupyter_client.kernelspec.KernelSpec'
## Whitelist of allowed kernel names.
#
# By default, all installed kernels are allowed.
#c.KernelSpecManager.whitelist = set()
| noahbenson/neuropythy | docker/jupyter_notebook_config.py | Python | agpl-3.0 | 28,716 |
"""Memebot views"""
import os
from django.views.generic.simple import direct_to_template
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from gruntle.memebot.decorators import login_or_apikey_required
from gruntle.memebot.models import UserProfile, Link
from gruntle.memebot.feeds import get_feed_names, get_feeds
from gruntle.memebot.forms import CheckLinkForm
from gruntle.memebot.utils import text
@login_required
def view_index(request):
"""Site index"""
return direct_to_template(request, 'memebot/index.html', {})
@login_required
def view_scores(request):
"""View scoreboard"""
profiles = UserProfile.objects.get_by_score()
return direct_to_template(request, 'memebot/scores.html', {'profiles': profiles})
@login_required
def browse_links(request):
"""Browse all links"""
try:
page = int(request.GET.get('page'))
except StandardError:
page = 1
try:
per_page = int(request.GET.get('per_page'))
except StandardError:
per_page = settings.BROWSE_LINKS_PER_PAGE
start = (page - 1) * per_page
end = start + per_page
links = Link.objects.all()
if not text.boolean(request.GET.get('disabled')):
links = links.exclude(state='disabled')
links = links.order_by('-created')[start:end]
return direct_to_template(request, 'memebot/browse.html', {'links': links})
def _get_link(publish_id, **kwargs):
"""Helper function to get published links or raise 404"""
return get_object_or_404(Link, publish_id=int(publish_id), state='published', **kwargs)
@login_required
def check_link(request):
"""Page to allow user to enter a URL to check its status"""
if request.method == 'POST':
form = CheckLinkForm(request.POST)
if form.is_valid():
return HttpResponseRedirect(reverse('memebot-view-link', args=[form.cleaned_data['link'].publish_id]))
else:
form = CheckLinkForm()
return direct_to_template(request, 'memebot/check-link.html', {'form': form})
@login_required
def view_link(request, publish_id):
"""Info about a link, TBD"""
return direct_to_template(request, 'memebot/view-link.html', {'link': _get_link(publish_id)})
##############
### PUBLIC ###
##############
def view_link_content(request, publish_id):
"""View generic published content that is cached locally"""
link = _get_link(publish_id, content__isnull=False)
return HttpResponse(link.content, link.content_type)
def view_rss_index(request):
"""Index of available RSS feeds"""
feeds = [(name, feed.description) for name, feed in get_feeds()]
return direct_to_template(request, 'memebot/rss-index.html', {'feeds': feeds})
def view_rss(request, name):
"""View RSS feed"""
if name not in get_feed_names():
raise Http404
feed_file = os.path.join(settings.FEED_DIR, name + '.xml')
if not os.path.exists(feed_file):
raise Http404
with open(feed_file, 'r') as fp:
return HttpResponse(fp.read(), 'text/xml; charset=' + settings.FEED_ENCODING)
| ToxicFrog/lancow | contrib/django-memebot/gruntle/memebot/views/memebot.py | Python | gpl-3.0 | 3,251 |
grid = [
[ 8, 2, 22, 97, 38, 15, 0, 40, 0, 75, 4, 5, 7, 78, 52, 12, 50, 77, 91, 8],
[49, 49, 99, 40, 17, 81, 18, 57, 60, 87, 17, 40, 98, 43, 69, 48, 4, 56, 62, 0],
[81, 49, 31, 73, 55, 79, 14, 29, 93, 71, 40, 67, 53, 88, 30, 3, 49, 13, 36, 65],
[52, 70, 95, 23, 4, 60, 11, 42, 69, 24, 68, 56, 1, 32, 56, 71, 37, 2, 36, 91],
[22, 31, 16, 71, 51, 67, 63, 89, 41, 92, 36, 54, 22, 40, 40, 28, 66, 33, 13, 80],
[24, 47, 32, 60, 99, 3, 45, 2, 44, 75, 33, 53, 78, 36, 84, 20, 35, 17, 12, 50],
[32, 98, 81, 28, 64, 23, 67, 10, 26, 38, 40, 67, 59, 54, 70, 66, 18, 38, 64, 70],
[67, 26, 20, 68, 2, 62, 12, 20, 95, 63, 94, 39, 63, 8, 40, 91, 66, 49, 94, 21],
[24, 55, 58, 5, 66, 73, 99, 26, 97, 17, 78, 78, 96, 83, 14, 88, 34, 89, 63, 72],
[21, 36, 23, 9, 75, 0, 76, 44, 20, 45, 35, 14, 0, 61, 33, 97, 34, 31, 33, 95],
[78, 17, 53, 28, 22, 75, 31, 67, 15, 94, 3, 80, 4, 62, 16, 14, 9, 53, 56, 92],
[16, 39, 5, 42, 96, 35, 31, 47, 55, 58, 88, 24, 0, 17, 54, 24, 36, 29, 85, 57],
[86, 56, 0, 48, 35, 71, 89, 7, 5, 44, 44, 37, 44, 60, 21, 58, 51, 54, 17, 58],
[19, 80, 81, 68, 5, 94, 47, 69, 28, 73, 92, 13, 86, 52, 17, 77, 4, 89, 55, 40],
[ 4, 52, 8, 83, 97, 35, 99, 16, 7, 97, 57, 32, 16, 26, 26, 79, 33, 27, 98, 66],
[88, 36, 68, 87, 57, 62, 20, 72, 3, 46, 33, 67, 46, 55, 12, 32, 63, 93, 53, 69],
[ 4, 42, 16, 73, 38, 25, 39, 11, 24, 94, 72, 18, 8, 46, 29, 32, 40, 62, 76, 36],
[20, 69, 36, 41, 72, 30, 23, 88, 34, 62, 99, 69, 82, 67, 59, 85, 74, 4, 36, 16],
[20, 73, 35, 29, 78, 31, 90, 1, 74, 31, 49, 71, 48, 86, 81, 16, 23, 57, 5, 54],
[ 1, 70, 54, 71, 83, 51, 54, 69, 16, 92, 33, 48, 61, 43, 52, 1, 89, 19, 67, 48]
]
largestProduct=0
for row in xrange(0,len(grid)):
for i in xrange(0,len(grid[row])-3):
if grid[row][i]*grid[row][i+1]*grid[row][i+2]*grid[row][i+3] > largestProduct:
largestProduct = grid[row][i]*grid[row][i+1]*grid[row][i+2]*grid[row][i+3]
for row in xrange(0,len(grid)-3):
for i in xrange(0,len(grid[row])):
if grid[row][i]*grid[row+1][i]*grid[row+2][i]*grid[row+3][i] > largestProduct:
largestProduct = grid[row][i]*grid[row+1][i]*grid[row+2][i]*grid[row+3][i]
for row in xrange(0,len(grid)-3):
for i in xrange(0,len(grid[row])-3):
if grid[row][i]*grid[row+1][i+1]*grid[row+2][i+2]*grid[row+3][i+3] > largestProduct:
largestProduct = grid[row][i]*grid[row+1][i+1]*grid[row+2][i+2]*grid[row+3][i+3]
for row in xrange(0,len(grid)-3):
for i in xrange(3,len(grid[row])):
if grid[row][i]*grid[row+1][i-1]*grid[row+2][i-2]*grid[row+3][i-3] > largestProduct:
largestProduct = grid[row][i]*grid[row+1][i-1]*grid[row+2][i-2]*grid[row+3][i-3]
print largestProduct;
| smileytechguy/sample_projects | product_in_a_grid/python/product_in_a_grid.py | Python | mit | 2,677 |
# This file is part of FNPDjango, licensed under GNU Affero GPLv3 or later.
# Copyright © Fundacja Nowoczesna Polska. See README.md for more information.
#
from django.conf import settings
from django.test import TestCase
class UtilsSettingsTestCase(TestCase):
def test_lazy_ugettext_lazy(self):
self.assertEqual(str(settings.TEST_LAZY_UGETTEXT_LAZY),
"Lazy setting.")
| fnp/fnpdjango | tests/tests/test_utils_settings.py | Python | agpl-3.0 | 398 |
#!/usr/bin/env python
import numpy as np
import rospy as rp
import cv2
import time
from sensor_msgs.msg import CompressedImage
from cv_bridge import CvBridge, CvBridgeError
from _init_paths import cfg
import caffe
__author__ = 'kazuto1011'
TOPIC_NAME = "/camera/image/compressed"
class CNNClassifier:
def __init__(self):
rp.loginfo("Initialization")
cv2.namedWindow("rgb", cv2.CV_WINDOW_AUTOSIZE)
# initialize a classifier
caffe.set_device(cfg.gpuNum)
caffe.set_mode_gpu()
self.classifier = caffe.Classifier(cfg.path.caffe.prototxt, cfg.path.caffe.caffemodel)
# load synset_words
self.categories = np.loadtxt(cfg.path.caffe.synset_words, str, delimiter="\t")
self.bridge = CvBridge()
self.rgb_subscriber = rp.Subscriber(TOPIC_NAME, CompressedImage, self.classify, queue_size=1)
def classify(self, image):
# convert CompressedImage to numpy array
np_arr = np.fromstring(image.data, np.uint8)
rgb_image = cv2.imdecode(np_arr, cv2.CV_LOAD_IMAGE_COLOR)
# prediction
start = time.time()
predictions = self.classifier.predict([rgb_image], oversample=False)
index = np.argmax(predictions)
rp.loginfo("%.2f s. " % (time.time() - start) + self.categories[index])
cv2.imshow("rgb", rgb_image)
cv2.waitKey(30)
class NodeMain:
def __init__(self):
rp.init_node('server_caffe', anonymous=False)
rp.on_shutdown(self.shutdown)
CNNClassifier()
rp.spin()
cv2.destroyAllWindows()
@staticmethod
def shutdown():
rp.loginfo("Shutting down")
if __name__ == '__main__':
try:
NodeMain()
except rp.ROSInterruptException:
rp.loginfo("Terminated")
| kazuto1011/rcnn-server | tms_ss_cnn/nodes/caffe/server_caffe.py | Python | mit | 1,790 |
"""
When you need a wall.
"""
VERSION = (1, 2, 0)
def get_version():
"""Returns the version as a string."""
return '.'.join(map(str, VERSION))
| GermanoGuerrini/django-bricks | djangobricks/__init__.py | Python | mit | 154 |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from tempfile import NamedTemporaryFile
from airflow.contrib.hooks.gcs_hook import (GoogleCloudStorageHook,
_parse_gcs_url)
from airflow.contrib.operators.s3_list_operator import S3ListOperator
from airflow.exceptions import AirflowException
from airflow.hooks.S3_hook import S3Hook
from airflow.utils.decorators import apply_defaults
class S3ToGoogleCloudStorageOperator(S3ListOperator):
"""
Synchronizes an S3 key, possibly a prefix, with a Google Cloud Storage
destination path.
:param bucket: The S3 bucket where to find the objects. (templated)
:type bucket: str
:param prefix: Prefix string which filters objects whose name begin with
such prefix. (templated)
:type prefix: str
:param delimiter: the delimiter marks key hierarchy. (templated)
:type delimiter: str
:param aws_conn_id: The source S3 connection
:type aws_conn_id: str
:param verify: Whether or not to verify SSL certificates for S3 connection.
By default SSL certificates are verified.
You can provide the following values:
- ``False``: do not validate SSL certificates. SSL will still be used
(unless use_ssl is False), but SSL certificates will not be
verified.
- ``path/to/cert/bundle.pem``: A filename of the CA cert bundle to uses.
You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
:type verify: bool or str
:param dest_gcs_conn_id: The destination connection ID to use
when connecting to Google Cloud Storage.
:type dest_gcs_conn_id: str
:param dest_gcs: The destination Google Cloud Storage bucket and prefix
where you want to store the files. (templated)
:type dest_gcs: str
:param delegate_to: The account to impersonate, if any.
For this to work, the service account making the request must have
domain-wide delegation enabled.
:type delegate_to: str
:param replace: Whether you want to replace existing destination files
or not.
:type replace: bool
**Example**:
.. code-block:: python
s3_to_gcs_op = S3ToGoogleCloudStorageOperator(
task_id='s3_to_gcs_example',
bucket='my-s3-bucket',
prefix='data/customers-201804',
dest_gcs_conn_id='google_cloud_default',
dest_gcs='gs://my.gcs.bucket/some/customers/',
replace=False,
dag=my-dag)
Note that ``bucket``, ``prefix``, ``delimiter`` and ``dest_gcs`` are
templated, so you can use variables in them if you wish.
"""
template_fields = ('bucket', 'prefix', 'delimiter', 'dest_gcs')
ui_color = '#e09411'
@apply_defaults
def __init__(self,
bucket,
prefix='',
delimiter='',
aws_conn_id='aws_default',
verify=None,
dest_gcs_conn_id=None,
dest_gcs=None,
delegate_to=None,
replace=False,
*args,
**kwargs):
super(S3ToGoogleCloudStorageOperator, self).__init__(
bucket=bucket,
prefix=prefix,
delimiter=delimiter,
aws_conn_id=aws_conn_id,
*args,
**kwargs)
self.dest_gcs_conn_id = dest_gcs_conn_id
self.dest_gcs = dest_gcs
self.delegate_to = delegate_to
self.replace = replace
self.verify = verify
if dest_gcs and not self._gcs_object_is_directory(self.dest_gcs):
self.log.info(
'Destination Google Cloud Storage path is not a valid '
'"directory", define a path that ends with a slash "/" or '
'leave it empty for the root of the bucket.')
raise AirflowException('The destination Google Cloud Storage path '
'must end with a slash "/" or be empty.')
def execute(self, context):
# use the super method to list all the files in an S3 bucket/key
files = super(S3ToGoogleCloudStorageOperator, self).execute(context)
gcs_hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=self.dest_gcs_conn_id,
delegate_to=self.delegate_to)
if not self.replace:
# if we are not replacing -> list all files in the GCS bucket
# and only keep those files which are present in
# S3 and not in Google Cloud Storage
bucket_name, object_prefix = _parse_gcs_url(self.dest_gcs)
existing_files_prefixed = gcs_hook.list(
bucket_name, prefix=object_prefix)
existing_files = []
if existing_files_prefixed:
# Remove the object prefix itself, an empty directory was found
if object_prefix in existing_files_prefixed:
existing_files_prefixed.remove(object_prefix)
# Remove the object prefix from all object string paths
for f in existing_files_prefixed:
if f.startswith(object_prefix):
existing_files.append(f[len(object_prefix):])
else:
existing_files.append(f)
files = list(set(files) - set(existing_files))
if len(files) > 0:
self.log.info(
'%s files are going to be synced: %s.', len(files), files
)
else:
self.log.info(
'There are no new files to sync. Have a nice day!')
if files:
hook = S3Hook(aws_conn_id=self.aws_conn_id, verify=self.verify)
for file in files:
# GCS hook builds its own in-memory file so we have to create
# and pass the path
file_object = hook.get_key(file, self.bucket)
with NamedTemporaryFile(mode='wb', delete=True) as f:
file_object.download_fileobj(f)
f.flush()
dest_gcs_bucket, dest_gcs_object_prefix = _parse_gcs_url(
self.dest_gcs)
# There will always be a '/' before file because it is
# enforced at instantiation time
dest_gcs_object = dest_gcs_object_prefix + file
# Sync is sequential and the hook already logs too much
# so skip this for now
# self.log.info(
# 'Saving file {0} from S3 bucket {1} in GCS bucket {2}'
# ' as object {3}'.format(file, self.bucket,
# dest_gcs_bucket,
# dest_gcs_object))
gcs_hook.upload(dest_gcs_bucket, dest_gcs_object, f.name)
self.log.info(
"All done, uploaded %d files to Google Cloud Storage",
len(files))
else:
self.log.info(
'In sync, no files needed to be uploaded to Google Cloud'
'Storage')
return files
# Following functionality may be better suited in
# airflow/contrib/hooks/gcs_hook.py
@staticmethod
def _gcs_object_is_directory(object):
bucket, blob = _parse_gcs_url(object)
return len(blob) == 0 or blob.endswith('/')
| r39132/airflow | airflow/contrib/operators/s3_to_gcs_operator.py | Python | apache-2.0 | 8,370 |
# -*- coding: utf-8 -*-
# Home made test
KX= 1000 # Spring constant
KY= 2000 # Spring constant
KZ= 3000 # Spring constant
FX= 1 # Force magnitude
FY= 2
FZ= 3
import xc_base
import geom
import xc
from solution import predefined_solutions
from model import predefined_spaces
from materials import typical_materials
# Model definition
# Problem type
feProblem= xc.FEProblem()
preprocessor= feProblem.getPreprocessor
nodes= preprocessor.getNodeHandler
modelSpace= predefined_spaces.StructuralMechanics3D(nodes)
nodes.defaultTag= 1 #First node number.
nod= nodes.newNodeXYZ(1,1,1)
# Define materials
kx= typical_materials.defElasticMaterial(preprocessor, "kx",KX)
ky= typical_materials.defElasticMaterial(preprocessor, "ky",KY)
kz= typical_materials.defElasticMaterial(preprocessor, "kz",KZ)
fixedNode, newElemen= modelSpace.setBearing(nod.tag,["kx","ky","kz"])
# Constraints
constraints= preprocessor.getBoundaryCondHandler
#
spc= constraints.newSPConstraint(1,3,0.0) # Node 1
spc= constraints.newSPConstraint(1,4,0.0)
spc= constraints.newSPConstraint(1,5,0.0)
# Loads definition
loadHandler= preprocessor.getLoadHandler
lPatterns= loadHandler.getLoadPatterns
#Load modulation.
ts= lPatterns.newTimeSeries("constant_ts","ts")
lPatterns.currentTimeSeries= "ts"
#Load case definition
lp0= lPatterns.newLoadPattern("default","0")
lp0.newNodalLoad(1,xc.Vector([FX,FY,FZ,0,0,0]))
#We add the load case to domain.
lPatterns.addToDomain("0")
# Solution
analisis= predefined_solutions.simple_static_linear(feProblem)
result= analisis.analyze(1)
nodes.calculateNodalReactions(True,1e-7)
nod1= nodes.getNode(1)
deltax= nod1.getDisp[0]
deltay= nod1.getDisp[1]
deltaz= nod1.getDisp[2]
RX= fixedNode.getReaction[0]
RY= fixedNode.getReaction[1]
RZ= fixedNode.getReaction[2]
ratio1= -RX/FX
ratio2= (KX*deltax)/FX
ratio3= -RY/FY
ratio4= (KY*deltay)/FY
ratio5= -RZ/FZ
ratio6= (KZ*deltaz)/FZ
'''
print "RX= ",RX
print "dx= ",deltax
print "RY= ",RY
print "dy= ",deltay
print "RZ= ",RZ
print "dz= ",deltaz
print "ratio1= ",(ratio1)
print "ratio2= ",(ratio2)
print "ratio3= ",(ratio3)
print "ratio4= ",(ratio4)
print "ratio5= ",(ratio5)
print "ratio6= ",(ratio6)
'''
import os
from miscUtils import LogMessages as lmsg
fname= os.path.basename(__file__)
if (abs(ratio1-1.0)<1e-5) & (abs(ratio2-1.0)<1e-5) & (abs(ratio3-1.0)<1e-5) & (abs(ratio4-1.0)<1e-5) & (abs(ratio5-1.0)<1e-5) & (abs(ratio6-1.0)<1e-5) :
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
| lcpt/xc | verif/tests/constraints/test_elastic_bearing_01.py | Python | gpl-3.0 | 2,538 |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from compas.topology import connected_components
__all__ = [
'network_disconnected_nodes',
'network_disconnected_edges',
'network_explode'
]
def network_disconnected_nodes(network):
"""Get the disconnected node groups in a network.
Parameters
----------
network : :class:`~compas.datastructures.Network`
A network.
Returns
-------
list[list[hashable]]
The list of disconnected node groups.
"""
return connected_components(network.adjacency)
def network_disconnected_edges(network):
"""Get the disconnected edge groups in a network.
Parameters
----------
network : :class:`~compas.datastructures.Network`
A network.
Returns
-------
list[list[tuple[hashable, hashable]]]
The list of disconnected edge groups.
"""
components = network_disconnected_nodes(network)
return [[(u, v) for u in component for v in network.neighbors(u) if u < v] for component in components]
def network_explode(network, cls=None):
"""Explode a network into its connected components.
Parameters
----------
network : :class:`~compas.datastructures.Network`
A network.
Returns
-------
list[:class:`~compas.datastructures.Network`]
The list of exploded network parts.
"""
if cls is None:
cls = type(network)
exploded_networks = []
parts = network_disconnected_edges(network)
for part in parts:
keys = list(set([key for edge in part for key in edge]))
nodes = [network.node_coordinates(key) for key in keys]
key_index = {key: index for index, key in enumerate(keys)}
edges = [(key_index[u], key_index[v]) for u, v in part]
exploded_networks.append(cls.from_nodes_and_edges(nodes, edges))
return exploded_networks
| compas-dev/compas | src/compas/datastructures/network/explode.py | Python | mit | 1,947 |
import math
import re
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.urls import reverse_lazy
from django.contrib.contenttypes.fields import GenericRelation
from planotrabalho.models import PlanoTrabalho
from planotrabalho.models import PlanoDeCultura
from planotrabalho.models import Componente
from planotrabalho.models import ConselhoDeCultura
from planotrabalho.models import OrgaoGestor2
from planotrabalho.models import LISTA_SITUACAO_ARQUIVO
from gestao.models import Diligencia
from planotrabalho.models import FundoDeCultura
from adesao.managers import SistemaManager
from adesao.managers import HistoricoManager
from datetime import date
from adesao.middleware import get_current_user
from itertools import tee
from django.db import connection
LISTA_ESTADOS_PROCESSO = (
('0', 'Aguardando preenchimento dos dados cadastrais'),
('1', 'Aguardando envio da documentação'),
('2', 'Aguardando renovação da adesão'),
('3', 'Diligência Documental'),
('4', 'Aguardando análise do Plano de Trabalho'),
('5', 'Diligência Documental'),
('6', 'Publicado no DOU'),
('7', 'Acordo de Cooperação e Termo de Adesão aprovados'),
)
LISTA_TIPOS_FUNCIONARIOS = (
(0, 'Gestor de Cultura'),
(1, 'Responsável'),
(2, 'Gestor'),)
UFS = {
12: "AC",
27: "AL",
13: "AM",
16: "AP",
29: "BA",
23: "CE",
53: "DF",
32: "ES",
52: "GO",
21: "MA",
31: "MG",
50: "MS",
51: "MT",
15: "PA",
25: "PB",
26: "PE",
22: "PI",
41: "PR",
33: "RJ",
24: "RN",
11: "RO",
14: "RR",
43: "RS",
42: "SC",
28: "SE",
35: "SP",
17: "TO"
}
REGIOES = {
'1': "Norte",
'2': "Nordeste",
'3': "Sudeste",
'4': "Sul",
'5': "Centro Oeste",
}
# Create your models here.
class Uf(models.Model):
codigo_ibge = models.IntegerField(primary_key=True)
sigla = models.CharField(max_length=2)
nome_uf = models.CharField(max_length=100)
def __str__(self):
return self.sigla
class Meta:
ordering = ['sigla']
class EnteFederado(models.Model):
cod_ibge = models.IntegerField(_('Código IBGE'))
nome = models.CharField(_("Nome do EnteFederado"), max_length=300)
gentilico = models.CharField(_("Gentilico"), max_length=300, null=True, blank=True)
mandatario = models.CharField(_("Nome do Mandataio"), max_length=300, null=True, blank=True)
territorio = models.DecimalField(_("Área territorial - km²"), max_digits=15, decimal_places=3)
populacao = models.IntegerField(_("População Estimada - pessoas"))
densidade = models.DecimalField(_("Densidade demográfica - hab/km²"), null=True, blank=True, max_digits=10,
decimal_places=2)
idh = models.DecimalField(_("IDH / IDHM"), max_digits=10, decimal_places=3, null=True, blank=True)
receita = models.IntegerField(_("Receitas realizadas - R$ (×1000)"), null=True, blank=True)
despesas = models.IntegerField(_("Despesas empenhadas - R$ (×1000)"), null=True, blank=True)
pib = models.DecimalField(_("PIB per capita - R$"), max_digits=10, decimal_places=2)
latitude = models.FloatField(null=True, blank=True)
longitude = models.FloatField(null=True, blank=True)
def __str__(self):
uf = UFS.get(self.cod_ibge, UFS.get(int(str(self.cod_ibge)[:2])))
digits = int(math.log10(self.cod_ibge)) + 1
if digits > 2 or self.cod_ibge == 53:
return f"{self.nome}/{uf}"
return f"Estado de {self.nome} ({uf})"
def get_regiao(self):
digito = str(self.cod_ibge)[0]
regiao = REGIOES[digito]
return regiao
def faixa_populacional(self):
if self.populacao <= 5000:
faixa = "Até 5.000"
elif self.populacao <= 10000:
faixa = "De 5.001 até 10.000"
elif self.populacao <= 20000:
faixa = "De 10.001 até 20.000"
elif self.populacao <= 50000:
faixa = "De 20.001 até 50.000"
elif self.populacao <= 100000:
faixa = "De 50.001 até 100.000"
elif self.populacao <= 500000:
faixa = "De 100.001 até 500.000"
else:
faixa = "Acima de 500.000"
return faixa
@property
def is_municipio(self):
digits = int(math.log10(self.cod_ibge)) + 1
if digits > 2:
return True
return False
@property
def sigla(self):
if self.is_municipio is False and self.cod_ibge != 53:
uf = re.search('\(([A-Z]+)\)', self.__str__())[0]
return re.search('[A-Z]+', uf)[0]
return re.search('(\/[A-Z]*)', self.__str__())[0][1:]
class Meta:
indexes = [models.Index(fields=['cod_ibge']), ]
class Cidade(models.Model):
codigo_ibge = models.IntegerField(unique=True)
uf = models.ForeignKey('Uf',
to_field='codigo_ibge',
on_delete=models.CASCADE)
nome_municipio = models.CharField(max_length=100)
lat = models.FloatField()
lng = models.FloatField()
def __str__(self):
return self.nome_municipio
class Meta:
ordering = ['nome_municipio']
class Municipio(models.Model):
localizacao = models.CharField(max_length=50, blank=True)
numero_processo = models.CharField(max_length=50, blank=True)
cpf_prefeito = models.CharField(
max_length=14,
verbose_name='CPF')
nome_prefeito = models.CharField(max_length=255)
cnpj_prefeitura = models.CharField(
max_length=18,
verbose_name='CNPJ')
rg_prefeito = models.CharField(max_length=50, verbose_name='RG')
orgao_expeditor_rg = models.CharField(max_length=50)
estado_expeditor = models.ForeignKey('Uf',
related_name='estado_expeditor',
on_delete=models.CASCADE)
endereco = models.CharField(max_length=255)
complemento = models.CharField(max_length=255, default='', blank=True)
cep = models.CharField(max_length=10)
bairro = models.CharField(max_length=50)
estado = models.ForeignKey('Uf', on_delete=models.CASCADE)
cidade = models.ForeignKey('Cidade', on_delete=models.CASCADE,
null=True, blank=True)
telefone_um = models.CharField(max_length=100)
telefone_dois = models.CharField(max_length=25, blank=True)
telefone_tres = models.CharField(max_length=25, blank=True)
endereco_eletronico = models.URLField(max_length=255, blank=True, null=True)
email_institucional_prefeito = models.EmailField()
termo_posse_prefeito = models.FileField(
upload_to='termo_posse',
max_length=255,
blank=True,
null=True)
rg_copia_prefeito = models.FileField(
upload_to='rg_copia',
max_length=255,
blank=True,
null=True)
cpf_copia_prefeito = models.FileField(
upload_to='cpf_copia',
max_length=255,
blank=True,
null=True)
def __str__(self):
return self.cnpj_prefeitura
class Meta:
unique_together = ('cidade', 'estado')
class Responsavel(models.Model):
cpf_responsavel = models.CharField(
max_length=14,
verbose_name='CPF')
rg_responsavel = models.CharField(max_length=25, verbose_name='RG')
orgao_expeditor_rg = models.CharField(max_length=50)
estado_expeditor = models.ForeignKey('Uf', on_delete=models.CASCADE)
nome_responsavel = models.CharField(max_length=100)
cargo_responsavel = models.CharField(max_length=100)
instituicao_responsavel = models.CharField(max_length=100)
telefone_um = models.CharField(max_length=25)
telefone_dois = models.CharField(max_length=25, blank=True)
telefone_tres = models.CharField(max_length=25, blank=True)
email_institucional_responsavel = models.EmailField()
def __str__(self):
return self.cpf_responsavel
class Secretario(models.Model):
cpf_secretario = models.CharField(
max_length=14,
verbose_name='CPF')
rg_secretario = models.CharField(max_length=25, verbose_name='RG')
orgao_expeditor_rg = models.CharField(max_length=50)
estado_expeditor = models.ForeignKey('Uf', on_delete=models.CASCADE)
nome_secretario = models.CharField(max_length=100)
cargo_secretario = models.CharField(max_length=100)
instituicao_secretario = models.CharField(max_length=100)
telefone_um = models.CharField(max_length=25)
telefone_dois = models.CharField(max_length=25, blank=True)
telefone_tres = models.CharField(max_length=25, blank=True)
email_institucional_secretario = models.EmailField()
def __str__(self):
return self.cpf_secretario
class Usuario(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
nome_usuario = models.CharField(max_length=100)
municipio = models.OneToOneField('Municipio', on_delete=models.CASCADE,
blank=True, null=True)
responsavel = models.OneToOneField('Responsavel', on_delete=models.CASCADE,
blank=True, null=True)
secretario = models.OneToOneField('Secretario', on_delete=models.CASCADE,
blank=True, null=True)
plano_trabalho = models.OneToOneField(
'planotrabalho.PlanoTrabalho',
on_delete=models.CASCADE,
blank=True,
null=True)
estado_processo = models.CharField(
max_length=1,
choices=LISTA_ESTADOS_PROCESSO,
default='0')
data_publicacao_acordo = models.DateField(blank=True, null=True)
link_publicacao_acordo = models.CharField(max_length=200, blank=True, null=True)
processo_sei = models.CharField(max_length=100, blank=True, null=True)
codigo_ativacao = models.CharField(max_length=12, unique=True)
data_cadastro = models.DateTimeField(auto_now_add=True)
prazo = models.IntegerField(default=2)
email_pessoal = models.EmailField(blank=True, null=True)
def __str__(self):
return self.user.username
def limpa_cadastrador(self):
"""
Remove referência do cadastrador alterado para as tabelas PlanoTrabalho,
Secretario, Reponsavel e Municipio
"""
self.plano_trabalho = None
self.municipio = None
self.responsavel = None
self.secretario = None
self.user.save()
self.save()
def transfere_propriedade(self, propriedade, valor):
"""
Transfere um determinado valor para uma propriedade da instancia de
Usuario
"""
setattr(self, propriedade, valor)
def recebe_permissoes_sistema_cultura(self, usuario):
"""
Recebe de um outro usuário o seu PlanoTrabalho, Municipio, Secretario,
Responsavel, DataPublicacaoAcordo e EstadoProcesso.
"""
propriedades = ("plano_trabalho", "municipio", "secretario",
"responsavel", "data_publicacao_acordo", "data_publicacao_retificacao",
"estado_processo")
for propriedade in propriedades:
valor = getattr(usuario, propriedade, None)
self.transfere_propriedade(propriedade, valor)
usuario.limpa_cadastrador()
self.save()
def save(self, *args, **kwargs):
if self.pk:
if self.estado_processo == '6' and self.plano_trabalho is None:
self.plano_trabalho = PlanoTrabalho.objects.create()
super(Usuario, self).save(*args, **kwargs)
class Historico(models.Model):
usuario = models.ForeignKey('Usuario', on_delete=models.CASCADE)
situacao = models.CharField(
max_length=1,
choices=LISTA_ESTADOS_PROCESSO,
blank=True,
null=True)
data_alteracao = models.DateTimeField(auto_now_add=True)
arquivo = models.FileField(upload_to='historico', blank=True, null=True)
descricao = models.TextField(blank=True, null=True)
class Sede(models.Model):
localizacao = models.CharField(max_length=50, blank=True)
cnpj = models.CharField(
max_length=18,
verbose_name='CNPJ')
endereco = models.TextField()
complemento = models.CharField(max_length=255, default='', blank=True)
cep = models.CharField(max_length=10)
bairro = models.CharField(max_length=50)
telefone_um = models.CharField(max_length=100)
telefone_dois = models.CharField(max_length=25, blank=True)
telefone_tres = models.CharField(max_length=25, blank=True)
endereco_eletronico = models.URLField(max_length=255, blank=True, null=True)
def __str__(self):
return self.cnpj
class Funcionario(models.Model):
cpf = models.CharField(
max_length=14,
verbose_name='CPF')
rg = models.CharField(max_length=50, verbose_name='RG')
orgao_expeditor_rg = models.CharField(max_length=50)
estado_expeditor = models.ForeignKey('Uf',
on_delete=models.CASCADE,
choices=UFS.items())
nome = models.CharField(max_length=100)
cargo = models.CharField(max_length=100, null=True, blank=True)
instituicao = models.CharField(max_length=100, null=True, blank=True)
telefone_um = models.CharField(max_length=50)
telefone_dois = models.CharField(max_length=50, blank=True)
telefone_tres = models.CharField(max_length=50, blank=True)
email_institucional = models.EmailField()
email_pessoal = models.EmailField(null=True, blank=True)
tipo_funcionario = models.IntegerField(
choices=LISTA_TIPOS_FUNCIONARIOS,
default='0')
estado_endereco = models.ForeignKey('Uf',
related_name='funcionario_estado_endereco',
on_delete=models.CASCADE,
choices=UFS.items(),
null=True)
endereco = models.CharField(max_length=255, null=True)
complemento = models.CharField(max_length=255, default='', blank=True)
cep = models.CharField(max_length=10, null=True)
bairro = models.CharField(max_length=50, null=True)
def __str__(self):
return self.cpf
class Gestor(Funcionario):
termo_posse = models.FileField(
upload_to='termo_posse',
max_length=255,
blank=True,
null=True)
rg_copia = models.FileField(
upload_to='rg_copia',
max_length=255,
blank=True,
null=True)
cpf_copia = models.FileField(
upload_to='cpf_copia',
max_length=255,
blank=True,
null=True)
class SistemaCultura(models.Model):
"""
Entidade que representa um Sistema de Cultura
"""
oficio_cadastrador = models.FileField(
upload_to='oficio_cadastrador',
max_length=255,
null=True)
oficio_prorrogacao_prazo = models.FileField(
upload_to='oficio_prorrogacao_prazo',
max_length=255,
null=True)
cadastrador = models.ForeignKey("Usuario", on_delete=models.SET_NULL, null=True, related_name="sistema_cultura")
ente_federado = models.ForeignKey("EnteFederado", on_delete=models.SET_NULL, null=True)
data_criacao = models.DateTimeField(default=timezone.now)
legislacao = models.ForeignKey(Componente, on_delete=models.SET_NULL, null=True, related_name="legislacao")
orgao_gestor = models.ForeignKey(OrgaoGestor2, on_delete=models.SET_NULL, null=True, related_name="orgao_gestor")
fundo_cultura = models.ForeignKey(FundoDeCultura, on_delete=models.SET_NULL, null=True,
related_name="fundo_cultura")
conselho = models.ForeignKey(ConselhoDeCultura, on_delete=models.SET_NULL, null=True, related_name="conselho")
plano = models.ForeignKey(PlanoDeCultura, on_delete=models.SET_NULL, null=True, related_name="plano")
gestor_cultura = models.ForeignKey(Funcionario, on_delete=models.SET_NULL, null=True,
related_name="sistema_cultura_gestor_cultura")
gestor = models.ForeignKey(Gestor, on_delete=models.SET_NULL, null=True)
sede = models.ForeignKey(Sede, on_delete=models.SET_NULL, null=True)
estado_processo = models.CharField(
max_length=1,
choices=LISTA_ESTADOS_PROCESSO,
default='0')
data_publicacao_acordo = models.DateField(blank=True, null=True)
data_publicacao_retificacao = models.DateField(blank=True, null=True)
link_publicacao_acordo = models.CharField(max_length=200, blank=True, null=True)
link_publicacao_retificacao = models.CharField(max_length=200, blank=True, null=True)
processo_sei = models.CharField(max_length=100, blank=True, null=True)
numero_processo = models.CharField(max_length=50, null=True, blank=True)
localizacao = models.CharField(_("Localização do Processo"), max_length=10, blank=True, null=True)
justificativa = models.TextField(_("Justificativa"), blank=True, null=True)
diligencia = models.ForeignKey("gestao.DiligenciaSimples", on_delete=models.SET_NULL,
related_name="sistema_cultura", blank=True, null=True)
prazo = models.IntegerField(default=2)
conferencia_nacional = models.BooleanField(blank=True, default=False)
alterado_em = models.DateTimeField("Alterado em", default=timezone.now)
alterado_por = models.ForeignKey("Usuario", on_delete=models.SET_NULL, null=True, related_name="sistemas_alterados")
objects = models.Manager()
sistema = SistemaManager()
historico = HistoricoManager()
class Meta:
ordering = ['ente_federado__nome', 'ente_federado', '-alterado_em']
def get_absolute_url(self):
url = reverse_lazy("gestao:detalhar", kwargs={"cod_ibge": self.ente_federado.cod_ibge})
return url
def get_componentes_diligencias(self, componente=None, arquivo='arquivo'):
diligencias_componentes = []
if componente:
componentes = [componente]
else:
componentes = ['legislacao', 'orgao_gestor',
'plano', 'conselho', 'fundo_cultura']
for componente in componentes:
componente = getattr(self, componente)
if arquivo != 'arquivo':
componente = getattr(componente, arquivo)
if componente and componente.diligencia:
componente.historico_diligencia = componente.diligencia.history.all()
diligencias_componentes.append(componente)
return diligencias_componentes
def atualiza_relacoes_reversas(self, anterior):
for field in anterior._meta.get_fields():
if field.auto_created and not field.concrete:
objetos = getattr(anterior, field.name)
for objeto in objetos.all():
objeto.sistema_cultura = self
objeto.save()
def historico_cadastradores(self):
sistemas = SistemaCultura.historico.ente(self.ente_federado.cod_ibge)
sistema_base = sistemas.first()
historico_cadastradores = [sistema_base]
for sistema in sistemas:
if sistema.cadastrador != sistema_base.cadastrador:
historico_cadastradores.append(sistema)
sistema_base = sistema
return historico_cadastradores
def get_situacao_componentes(self):
"""
Retornar uma lista contendo a situação de cada componente e comporvante CNPJ de um SistemaCultura
"""
componentes = ('legislacao', 'orgao_gestor', 'orgao_gestor_cnpj','fundo_cultura', 'fundo_cultura_cnpj', 'conselho', 'plano','metas')
objetos = (getattr(self, componente, None) for componente in componentes)
situacoes = {componente: objeto.get_situacao_display() for (componente, objeto) in zip(componentes, objetos) if
objeto is not None }
comp = {}
if self.orgao_gestor:
if self.orgao_gestor.comprovante_cnpj:
comp.update({'orgao_gestor_cnpj' : LISTA_SITUACAO_ARQUIVO[self.orgao_gestor.comprovante_cnpj.situacao][1]} )
if self.fundo_cultura:
if self.fundo_cultura.comprovante_cnpj:
comp.update({'fundo_cultura_cnpj' : LISTA_SITUACAO_ARQUIVO[self.fundo_cultura.comprovante_cnpj.situacao][1]} )
if self.plano:
if self.plano.metas:
comp.update({'metas' : LISTA_SITUACAO_ARQUIVO[self.plano.metas.situacao][1]} )
if comp:
situacoes.update(comp)
return situacoes
def compara_valores(self, obj_anterior, fields):
"""
Compara os valores de determinada propriedade entre dois objetos.
"""
return (getattr(obj_anterior, field.attname) == getattr(self, field.attname) for field in
fields)
def compara_fks(self, obj_anterior, fields):
comparacao_fk = True
for field in fields:
if field.get_internal_type() == 'ForeignKey':
objeto_fk_anterior = getattr(obj_anterior, field.name)
objeto_fk_atual = getattr(self, field.name)
if objeto_fk_anterior and objeto_fk_atual:
for field in field.related_model._meta.fields[1:]:
objeto_fk_anterior_value = getattr(objeto_fk_anterior, field.name)
objeto_fk_atual_value = getattr(objeto_fk_atual, field.name)
if objeto_fk_anterior_value != objeto_fk_atual_value:
comparacao_fk = False
break
if not comparacao_fk:
break
return comparacao_fk
def get_estado_processo_display(self):
estado_index = int(self.estado_processo)
return LISTA_ESTADOS_PROCESSO[estado_index][1]
def save(self, *args, **kwargs):
"""
Salva uma nova instancia de SistemaCultura sempre que alguma informação
é alterada.
"""
if self.pk:
fields = self._meta.fields[1:-1]
anterior = SistemaCultura.objects.get(pk=self.pk)
comparacao_fk = True
if all(self.compara_valores(anterior, fields)):
comparacao_fk = self.compara_fks(anterior, fields)
if False in self.compara_valores(anterior, fields) or comparacao_fk == False:
self.pk = None
self.alterado_em = timezone.now()
self.alterado_por = get_current_user()
super().save(*args, **kwargs)
self.atualiza_relacoes_reversas(anterior)
else:
super().save(*args, **kwargs)
def has_not_diligencias_enviadas_aprovadas(self):
query = '''SELECT COUNT(ad_sc.id) <= 0
FROM adesao_sistemacultura ad_sc
JOIN planotrabalho_componente pt_cl
ON pt_cl.arquivocomponente2_ptr_id = ad_sc.legislacao_id
JOIN planotrabalho_arquivocomponente2 pt_acl
ON pt_acl.id = pt_cl.arquivocomponente2_ptr_id
AND pt_acl.situacao IN (2, 3)
JOIN planotrabalho_componente pt_cp
ON pt_cp.arquivocomponente2_ptr_id = ad_sc.plano_id
JOIN planotrabalho_arquivocomponente2 pt_acp
ON pt_acp.id = pt_cp.arquivocomponente2_ptr_id
AND pt_acp.situacao IN (2, 3)
JOIN planotrabalho_componente pt_cc
ON pt_cc.arquivocomponente2_ptr_id = ad_sc.conselho_id
JOIN planotrabalho_arquivocomponente2 pt_acc
ON pt_acc.id = pt_cc.arquivocomponente2_ptr_id
AND pt_acc.situacao IN (2, 3)
JOIN planotrabalho_componente pt_cf
ON pt_cf.arquivocomponente2_ptr_id = ad_sc.fundo_cultura_id
JOIN planotrabalho_arquivocomponente2 pt_acf
ON pt_acf.id = pt_cf.arquivocomponente2_ptr_id
AND pt_acf.situacao IN (2, 3)
JOIN planotrabalho_componente pt_co
ON pt_co.arquivocomponente2_ptr_id = ad_sc.orgao_gestor_id
JOIN planotrabalho_arquivocomponente2 pt_aco
ON pt_aco.id = pt_co.arquivocomponente2_ptr_id
AND pt_aco.situacao IN (2, 3)
WHERE ad_sc.ente_federado_id = %s
AND ad_sc.diligencia_id IS NOT NULL'''
cursor = connection.cursor()
cursor.execute(query, [self.ente_federado.id])
row = cursor.fetchone()
return row[0]
class BaseSolicitacao(models.Model):
"""
Requerimento de Troca Cadastrado
"""
class Meta:
abstract = True
STATUS = (
('0', 'Pendente de Análise'),
('1', 'Aprovado'),
('2', 'Rejeitado'),
)
ente_federado = models.ForeignKey("EnteFederado", on_delete=models.SET_NULL, null=True)
alterado_por = models.ForeignKey('Usuario', on_delete=models.SET_NULL, null=True, related_name="%(class)s_alterado_por")
status = models.CharField(max_length=1, choices=STATUS, default='0', blank=True, null=True)
alterado_em = models.DateTimeField("Alterado em", default=timezone.now)
oficio = models.FileField(upload_to='oficio', max_length=255, null=True)
laudo = models.TextField(blank=True, null=True)
avaliador = models.ForeignKey('Usuario', on_delete=models.SET_NULL, null=True, related_name="%(class)s_avaliador")
data_analise = models.DateTimeField("Data de Análise", blank=True, null=True)
def save(self, *args, **kwargs):
"""
Salva uma nova instancia
"""
if self.pk:
self.alterado_em = timezone.now()
super().save(*args, **kwargs)
else:
self.alterado_em = timezone.now()
self.alterado_por = get_current_user()
super().save(*args, **kwargs)
def get_estado_processo_display(self):
estado_index = int(self.status)
return self.STATUS[estado_index][1]
def get_ente_federao_id(self):
return str(self.ente_federado_id)
def get_ente_federao_cod_ibge(self):
return str(self.ente_federado__cod_ibge)
def __str__(self):
return "Solicitação de "+str(self.ente_federado)
class SolicitacaoDeAdesao(BaseSolicitacao):
def __str__(self):
return "Solicitação de Adesão de "+str(self.ente_federado)
class SolicitacaoDeTrocaDeCadastrador(BaseSolicitacao):
def __str__(self):
return "Solicitação de Troca de Cadastrador de "+str(self.ente_federado)
| culturagovbr/sistema-nacional-cultura | adesao/models.py | Python | agpl-3.0 | 26,998 |
import json
import os
from datetime import datetime
from django.core.files.base import ContentFile
from django.core.files.storage import get_storage_class
from django.forms.models import model_to_dict
storage = get_storage_class()()
IP_DENY_LIST = """
-- Mozilla Network
ip_address NOT LIKE '63.245.208.%' AND
ip_address NOT LIKE '63.245.209.%' AND
ip_address NOT LIKE '63.245.21%' AND
ip_address NOT LIKE '63.245.220.%' AND
ip_address NOT LIKE '63.245.221.%' AND
ip_address NOT LIKE '63.245.222.%' AND
ip_address NOT LIKE '63.245.223.%' AND
-- Not sure, but grepped an hour of logs, nothing.
ip_address NOT LIKE '180.92.184.%' AND
-- CN adm
ip_address NOT LIKE '59.151.50%' AND
ip_address NOT IN (
-- Not sure
'72.26.221.66',
'72.26.221.67',
-- white hat
'209.10.217.226',
-- CN lbs
'223.202.6.11',
'223.202.6.12',
'223.202.6.13',
'223.202.6.14',
'223.202.6.15',
'223.202.6.16',
'223.202.6.17',
'223.202.6.18',
'223.202.6.19',
'223.202.6.20'
)
"""
def get_date_from_file(filepath, sep):
"""Get the date from the file, which should be the first col."""
with open(filepath) as f:
line = f.readline()
try:
return line.split(sep)[0]
except IndexError:
return None
def serialize_stats(model):
"""Return the stats from the model ready to write to a file."""
data = model_to_dict(model)
del data['id'] # No need for the model's ID at all (eg: UpdateCount).
return json.dumps(data)
def save_stats_to_file(model):
"""Save the given model to a file on the disc."""
model_name = model._meta.model_name
date = datetime.strptime(model.date, '%Y-%m-%d')
path = u'{addon_id}/{date.year}/{date.month:02}/'.format(
addon_id=model.addon_id, date=date)
name_tpl = u'{date.year}_{date.month:02}_{date.day:02}_{model_name}.json'
name = name_tpl.format(date=date, model_name=model_name)
filepath = os.path.join(path, name)
storage.save(filepath, ContentFile(serialize_stats(model)))
| harikishen/addons-server | src/olympia/stats/management/commands/__init__.py | Python | bsd-3-clause | 2,174 |
# -*- coding: utf-8 -*-
# Licensed to Anthony Shaw (anthonyshaw@apache.org) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import requests
from pluralsight.exceptions import PluralsightApiException
from .invites import InvitesClient
from .users import UsersClient
from .teams import TeamsClient
BASE_URL = "https://app.pluralsight.com/plans/api/license/v1/{0}"
class LicensingAPIClient(object):
"""
The licensing API client
"""
def __init__(self, plan, api_key):
"""
Instantiate a new reports API client
:param plan: The plan name
:type plan: ``str``
:param api_key: The API token (from the pluralsight team)
:type api_key: ``str``
"""
self._plan = plan
self._api_key = api_key
self.base_url = BASE_URL.format(plan)
self.session = requests.Session()
self.session.headers.update(
{"Accept": "application/json", "Authorization": "Token {0}".format(api_key)}
)
self.invites = InvitesClient(self)
self.users = UsersClient(self)
self.teams = TeamsClient(self)
def get(self, uri, params=None):
try:
result = self.session.get(
"{0}/{1}".format(self.base_url, uri), params=params
)
result.raise_for_status()
return result.json()
except requests.HTTPError as e:
raise PluralsightApiException(e.response.text, uri)
def post(self, uri, data=None):
try:
result = self.session.post("{0}/{1}".format(self.base_url, uri), json=data)
result.raise_for_status()
return result.json()
except requests.HTTPError as e:
raise PluralsightApiException(e.response.text)
def put(self, uri, data=None):
try:
result = self.session.put("{0}/{1}".format(self.base_url, uri), json=data)
result.raise_for_status()
except requests.HTTPError as e:
raise PluralsightApiException(e.response.text)
def delete(self, uri):
try:
result = self.session.delete("{0}/{1}".format(self.base_url, uri))
result.raise_for_status()
except requests.HTTPError as e:
raise PluralsightApiException(e.response.text)
| tonybaloney/pluralsight | pluralsight/licensing/client.py | Python | apache-2.0 | 2,994 |
from django.contrib import admin
from djforms.giving.models import PaverContact, DonationContact
from djforms.processors.models import Order
class OrderInline(admin.TabularInline):
model = DonationContact.order.through
extra = 3
class DonationContactAdmin(admin.ModelAdmin):
model = DonationContact
exclude = ('second_name','previous_name','salutation')
raw_id_fields = ('order',)
list_max_show_all = 500
list_per_page = 500
#list_max_show_all = 2000
#list_per_page = 2000
ordering = [
'-created_at','last_name','city','state','postal_code',
'anonymous'
]
search_fields = ('last_name','phone','city','state','postal_code')
#inlines = [OrderInline,]
list_display = (
'last_name','first_name','order_cc_name','created_at','email','phone',
'address1','address2','city','state','postal_code','class_of','relation',
'spouse','spouse_class','honouring','matching_company','order_promo',
'order_cycle','order_payments','order_start_date','order_transid',
'order_status','order_total','order_comments','opt_in','anonymous'
)
def order_cc_name(self, obj):
try:
name = obj.order.all()[0].cc_name
except:
name = None
return name
order_cc_name.short_description = 'CC Name'
def order_promo(self, obj):
try:
promo = obj.order.all()[0].promotion
except:
promo = None
return promo
order_promo.short_description = 'Campaign'
def order_status(self, obj):
try:
stat = obj.order.all()[0].status
except:
stat = None
return stat
order_status.short_description = 'Transaction status'
def order_transid(self, obj):
try:
tid = obj.order.all()[0].transid
except:
tid = None
return tid
order_transid.short_description = 'Transaction ID'
def order_total(self, obj):
try:
tid = obj.order.all()[0].total
except:
tid = None
return tid
order_total.short_description = 'Donation'
def order_cycle(self, obj):
try:
cycle = obj.order.all()[0].cycle
except:
cycle = None
return cycle
order_cycle.short_description = 'Interval'
def order_payments(self, obj):
try:
payments = obj.order.all()[0].payments
except:
payments = None
return payments
order_payments.short_description = 'Duration'
def order_start_date(self, obj):
try:
sdate = obj.order.all()[0].start_date
except:
sdate = None
return sdate
order_start_date.short_description = 'Start Date'
def order_comments(self, obj):
try:
com = obj.order.all()[0].comments
except:
com = None
return com
order_comments.short_description = 'Designation'
def save_model(self, request, obj, form, change):
if change:
obj.updated_by = request.user
obj.save()
class PaverContactAdmin(DonationContactAdmin):
exclude = ('second_name',)
ordering = (
'-created_at','last_name','city','state','postal_code',
)
list_display = (
'last_name','first_name','order_cc_name','created_at','email','phone',
'address1','address2','city','state','postal_code',
'order_promo', 'order_cycle','order_payments','order_start_date',
'order_transid','order_status','order_total','order_comments'
)
pass
admin.site.register(PaverContact, PaverContactAdmin)
admin.site.register(DonationContact, DonationContactAdmin)
| carthagecollege/django-djforms | djforms/giving/admin.py | Python | unlicense | 3,871 |
#!/usr/bin/python3
def min_max_avg(list):
min = list[0]
max = list[0]
sum = list[0]
for x in list[1:]:
if x < min:
min = x
if x > max:
max = x
sum += x
return min, max, sum / len(list)
print(min_max_avg(xrange(0, 100000)))
| nonZero/demos-python | src/exercises/basic/min_max_average/solution3.py | Python | gpl-3.0 | 294 |
"""Support for sensors through the SmartThings cloud API."""
from collections import namedtuple
from typing import Optional, Sequence
from pysmartthings import Attribute, Capability
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
ENERGY_KILO_WATT_HOUR,
MASS_KILOGRAMS,
POWER_WATT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
Map = namedtuple("map", "attribute name default_unit device_class")
CAPABILITY_TO_SENSORS = {
Capability.activity_lighting_mode: [
Map(Attribute.lighting_mode, "Activity Lighting Mode", None, None)
],
Capability.air_conditioner_mode: [
Map(Attribute.air_conditioner_mode, "Air Conditioner Mode", None, None)
],
Capability.air_quality_sensor: [
Map(Attribute.air_quality, "Air Quality", "CAQI", None)
],
Capability.alarm: [Map(Attribute.alarm, "Alarm", None, None)],
Capability.audio_volume: [Map(Attribute.volume, "Volume", "%", None)],
Capability.battery: [Map(Attribute.battery, "Battery", "%", DEVICE_CLASS_BATTERY)],
Capability.body_mass_index_measurement: [
Map(Attribute.bmi_measurement, "Body Mass Index", "kg/m^2", None)
],
Capability.body_weight_measurement: [
Map(Attribute.body_weight_measurement, "Body Weight", MASS_KILOGRAMS, None)
],
Capability.carbon_dioxide_measurement: [
Map(Attribute.carbon_dioxide, "Carbon Dioxide Measurement", "ppm", None)
],
Capability.carbon_monoxide_detector: [
Map(Attribute.carbon_monoxide, "Carbon Monoxide Detector", None, None)
],
Capability.carbon_monoxide_measurement: [
Map(Attribute.carbon_monoxide_level, "Carbon Monoxide Measurement", "ppm", None)
],
Capability.dishwasher_operating_state: [
Map(Attribute.machine_state, "Dishwasher Machine State", None, None),
Map(Attribute.dishwasher_job_state, "Dishwasher Job State", None, None),
Map(
Attribute.completion_time,
"Dishwasher Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
),
],
Capability.dryer_mode: [Map(Attribute.dryer_mode, "Dryer Mode", None, None)],
Capability.dryer_operating_state: [
Map(Attribute.machine_state, "Dryer Machine State", None, None),
Map(Attribute.dryer_job_state, "Dryer Job State", None, None),
Map(
Attribute.completion_time,
"Dryer Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
),
],
Capability.dust_sensor: [
Map(Attribute.fine_dust_level, "Fine Dust Level", None, None),
Map(Attribute.dust_level, "Dust Level", None, None),
],
Capability.energy_meter: [
Map(Attribute.energy, "Energy Meter", ENERGY_KILO_WATT_HOUR, None)
],
Capability.equivalent_carbon_dioxide_measurement: [
Map(
Attribute.equivalent_carbon_dioxide_measurement,
"Equivalent Carbon Dioxide Measurement",
"ppm",
None,
)
],
Capability.formaldehyde_measurement: [
Map(Attribute.formaldehyde_level, "Formaldehyde Measurement", "ppm", None)
],
Capability.illuminance_measurement: [
Map(Attribute.illuminance, "Illuminance", "lux", DEVICE_CLASS_ILLUMINANCE)
],
Capability.infrared_level: [
Map(Attribute.infrared_level, "Infrared Level", "%", None)
],
Capability.media_input_source: [
Map(Attribute.input_source, "Media Input Source", None, None)
],
Capability.media_playback_repeat: [
Map(Attribute.playback_repeat_mode, "Media Playback Repeat", None, None)
],
Capability.media_playback_shuffle: [
Map(Attribute.playback_shuffle, "Media Playback Shuffle", None, None)
],
Capability.media_playback: [
Map(Attribute.playback_status, "Media Playback Status", None, None)
],
Capability.odor_sensor: [Map(Attribute.odor_level, "Odor Sensor", None, None)],
Capability.oven_mode: [Map(Attribute.oven_mode, "Oven Mode", None, None)],
Capability.oven_operating_state: [
Map(Attribute.machine_state, "Oven Machine State", None, None),
Map(Attribute.oven_job_state, "Oven Job State", None, None),
Map(Attribute.completion_time, "Oven Completion Time", None, None),
],
Capability.oven_setpoint: [
Map(Attribute.oven_setpoint, "Oven Set Point", None, None)
],
Capability.power_meter: [Map(Attribute.power, "Power Meter", POWER_WATT, None)],
Capability.power_source: [Map(Attribute.power_source, "Power Source", None, None)],
Capability.refrigeration_setpoint: [
Map(
Attribute.refrigeration_setpoint,
"Refrigeration Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.relative_humidity_measurement: [
Map(
Attribute.humidity,
"Relative Humidity Measurement",
"%",
DEVICE_CLASS_HUMIDITY,
)
],
Capability.robot_cleaner_cleaning_mode: [
Map(
Attribute.robot_cleaner_cleaning_mode,
"Robot Cleaner Cleaning Mode",
None,
None,
)
],
Capability.robot_cleaner_movement: [
Map(Attribute.robot_cleaner_movement, "Robot Cleaner Movement", None, None)
],
Capability.robot_cleaner_turbo_mode: [
Map(Attribute.robot_cleaner_turbo_mode, "Robot Cleaner Turbo Mode", None, None)
],
Capability.signal_strength: [
Map(Attribute.lqi, "LQI Signal Strength", None, None),
Map(Attribute.rssi, "RSSI Signal Strength", None, None),
],
Capability.smoke_detector: [Map(Attribute.smoke, "Smoke Detector", None, None)],
Capability.temperature_measurement: [
Map(
Attribute.temperature,
"Temperature Measurement",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.thermostat_cooling_setpoint: [
Map(
Attribute.cooling_setpoint,
"Thermostat Cooling Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.thermostat_fan_mode: [
Map(Attribute.thermostat_fan_mode, "Thermostat Fan Mode", None, None)
],
Capability.thermostat_heating_setpoint: [
Map(
Attribute.heating_setpoint,
"Thermostat Heating Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.thermostat_mode: [
Map(Attribute.thermostat_mode, "Thermostat Mode", None, None)
],
Capability.thermostat_operating_state: [
Map(
Attribute.thermostat_operating_state,
"Thermostat Operating State",
None,
None,
)
],
Capability.thermostat_setpoint: [
Map(
Attribute.thermostat_setpoint,
"Thermostat Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
)
],
Capability.three_axis: [],
Capability.tv_channel: [Map(Attribute.tv_channel, "Tv Channel", None, None)],
Capability.tvoc_measurement: [
Map(Attribute.tvoc_level, "Tvoc Measurement", "ppm", None)
],
Capability.ultraviolet_index: [
Map(Attribute.ultraviolet_index, "Ultraviolet Index", None, None)
],
Capability.voltage_measurement: [
Map(Attribute.voltage, "Voltage Measurement", "V", None)
],
Capability.washer_mode: [Map(Attribute.washer_mode, "Washer Mode", None, None)],
Capability.washer_operating_state: [
Map(Attribute.machine_state, "Washer Machine State", None, None),
Map(Attribute.washer_job_state, "Washer Job State", None, None),
Map(
Attribute.completion_time,
"Washer Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
),
],
}
UNITS = {"C": TEMP_CELSIUS, "F": TEMP_FAHRENHEIT}
THREE_AXIS_NAMES = ["X Coordinate", "Y Coordinate", "Z Coordinate"]
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Platform uses config entry setup."""
pass
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add binary sensors for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
sensors = []
for device in broker.devices.values():
for capability in broker.get_assigned(device.device_id, "sensor"):
if capability == Capability.three_axis:
sensors.extend(
[
SmartThingsThreeAxisSensor(device, index)
for index in range(len(THREE_AXIS_NAMES))
]
)
else:
maps = CAPABILITY_TO_SENSORS[capability]
sensors.extend(
[
SmartThingsSensor(
device, m.attribute, m.name, m.default_unit, m.device_class
)
for m in maps
]
)
async_add_entities(sensors)
def get_capabilities(capabilities: Sequence[str]) -> Optional[Sequence[str]]:
"""Return all capabilities supported if minimum required are present."""
return [
capability for capability in CAPABILITY_TO_SENSORS if capability in capabilities
]
class SmartThingsSensor(SmartThingsEntity):
"""Define a SmartThings Sensor."""
def __init__(
self, device, attribute: str, name: str, default_unit: str, device_class: str
):
"""Init the class."""
super().__init__(device)
self._attribute = attribute
self._name = name
self._device_class = device_class
self._default_unit = default_unit
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return f"{self._device.label} {self._name}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._device.device_id}.{self._attribute}"
@property
def state(self):
"""Return the state of the sensor."""
return self._device.status.attributes[self._attribute].value
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
unit = self._device.status.attributes[self._attribute].unit
return UNITS.get(unit, unit) if unit else self._default_unit
class SmartThingsThreeAxisSensor(SmartThingsEntity):
"""Define a SmartThings Three Axis Sensor."""
def __init__(self, device, index):
"""Init the class."""
super().__init__(device)
self._index = index
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return "{} {}".format(self._device.label, THREE_AXIS_NAMES[self._index])
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return "{}.{}".format(self._device.device_id, THREE_AXIS_NAMES[self._index])
@property
def state(self):
"""Return the state of the sensor."""
three_axis = self._device.status.attributes[Attribute.three_axis].value
try:
return three_axis[self._index]
except (TypeError, IndexError):
return None
| leppa/home-assistant | homeassistant/components/smartthings/sensor.py | Python | apache-2.0 | 11,668 |
from . import config_params
from . import thermometer
from . import util
import subprocess
import shlex
import os
import collections
import logging
logger = logging.getLogger(__name__)
def _iterate_command_output(self, command):
process = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL, stdin=subprocess.DEVNULL,
universal_newlines=True)
for line in process.stdout:
yield line
if process.wait():
raise RuntimeError("Command {} failed with return code {}".format(_list_to_shell(command),
process.returncode))
def _list_to_shell(l):
return " ".join(shlex.quote(x) for x in l)
class Harddrive(thermometer.Thermometer, config_params.Configurable):
_params = [
("path", None, "Device file of the disk. For example /dev/sda"),
("stat_path", "", "Path for reading activity statistics (/sys/block/<path basename>/stat). "
"If empty (the default), gets automatically assigned."),
("name", "", "Optional name that wil appear in status output if present."),
("spindown_time", 0, "After how long inactivity should the disk spin down (seconds). "
"This value will be rounded to the nearest update interval, "
"if zero, the drive will not be spun down by this sctipt."),
("measure_in_idle", False, "Selects whether to keep measuring temperature even when the drive is idle."),
]
def __init__(self, parent, params):
self.process_params(params)
if not len(self.stat_path):
self.stat_path = "/sys/block/{}/stat".format(os.path.basename(self.path))
self._previous_stat = None
self._spindown_timeout = util.TimeoutHelper(self.spindown_time)
self._cached_temperature = None
self._cached_spinning = None
self._cached_iops = None
def get_temperature(self):
command = ["smartctl", "-A", self.path]
for line in _iterate_command_output(self, command):
split = line.split()
if len(split) < 10:
continue
try:
id_number = int(split[0])
except ValueError:
continue
if id_number == 194: #"Temperature_Celsius"
return int(split[9])
elif id_number == 190: #"Airflow_Temperature_Cel
return int(split[9])
raise RuntimeError("Didn't find temperature in output of {}".format(_list_to_shell(command)))
def spindown(self):
logger.info("Spinning down hard drive %s", self.name)
subprocess.check_call(["hdparm", "-y", self.path],
stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
def is_spinning(self):
command = ["hdparm", "-C", self.path]
for line in _iterate_command_output(self, command):
split = line.split(":")
if len(split) >= 2 and split[0].strip() == "drive state is":
state = split[1].strip()
if state == "unknown":
raise RuntimeError("Hdparm reported unknown state for " + self.path)
elif state == "active/idle":
return True
else:
return False
raise RuntimeError("Didn't find drive state in output of {}".format(_list_to_shell(command)))
def _get_stat(self):
with open(self.stat_path, "r") as fp:
return tuple(map(int, fp.read().split()))
def _get_io(self):
stat = self._get_stat()
had_io = stat != self._previous_stat
if self._previous_stat is None:
ops = 0
else:
ops = stat[0] - self._previous_stat[0] + stat[4] - self._previous_stat[4]
self._previous_stat = stat
return had_io, ops
def get_cached_temperature(self):
return self._cached_temperature
def get_cached_activity(self):
return (int(self._cached_spinning), self._cached_iops)
def _get_temp_safe(self):
""" Return temperature, is_spinning tuple."""
is_spinning = self.is_spinning()
if is_spinning or self.measure_in_idle:
temperature = self.get_temperature()
else:
temperature = None
return temperature, is_spinning
def init(self):
temperature, is_spinning = self._get_temp_safe()
self._previous_stat = self._get_stat()
self._cached_temperature = temperature
self._cached_spinning = is_spinning
self._cached_iops = 0
def update(self, dt):
temperature, is_spinning = self._get_temp_safe()
had_io, ops = self._get_io()
if is_spinning and self.spindown_time > 0:
if had_io:
self._spindown_timeout.reset()
elif self._spindown_timeout(dt):
self.spindown()
self._cached_temperature = temperature
self._cached_spinning = is_spinning
self._cached_iops = ops / dt
logger.debug("Harddrive {} {}°C (target {}°C), {:.1g} iops{}".format(self.name,
self._cached_temperature,
self.target_temperature,
self._cached_iops,
", spinning" if self._cached_spinning else ""))
return {"type": self.__class__.__name__,
"temperature": self._cached_temperature,
"target_temperature": self.target_temperature,
"iops": self._cached_iops,
"spinning": self._cached_spinning}
| bluecube/pysystemfan | pysystemfan/harddrive.py | Python | mit | 5,952 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'c+#8x$+zfq00i(z@6_81ht9i-0dcp53iyl_bp$mr^f22k600i*'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'register',
'inventory',
'corsheaders',
)
MIDDLEWARE_CLASSES = (
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'orthosie.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'orthosie.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'orthosie/orthosie.db'),
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
STATICFILES_DIRS = (
os.path.join(os.path.dirname(__file__), '../static').replace('\\', '/'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TAX = .075
PRINTER = '/dev/null'
RECEIPT_HEADER = ['Header 1', 'Header 2']
RECEIPT_FOOTER = ['Footer 1', 'Footer 2']
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
CORS_ORIGIN_WHITELIST = (
'localhost:3000/'
)
| kkoci/orthosie | orthosie/settings.py | Python | gpl-3.0 | 2,988 |
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unittest
import urllib
import filters
import jinja2
def linkify(inp, commit):
return str(filters.do_linkify_stacktrace(
inp, commit, 'kubernetes/kubernetes'))
class HelperTest(unittest.TestCase):
def test_timestamp(self):
self.assertEqual(
'<span class="timestamp" data-epoch="1461100940">'
'2016-04-19 21:22</span>',
filters.do_timestamp(1461100940))
def test_duration(self):
for duration, expected in {
3.56: '3.56s',
13.6: '13s',
78.2: '1m18s',
60 * 62 + 3: '1h2m',
}.iteritems():
self.assertEqual(expected, filters.do_duration(duration))
def test_linkify_safe(self):
self.assertEqual('<a>',
linkify('<a>', '3'))
def test_linkify(self):
linked = linkify(
"/go/src/k8s.io/kubernetes/test/example.go:123", 'VERSION')
self.assertIn('<a href="https://github.com/kubernetes/kubernetes/blob/'
'VERSION/test/example.go#L123">', linked)
def test_linkify_trailing(self):
linked = linkify(
" /go/src/k8s.io/kubernetes/test/example.go:123 +0x1ad", 'VERSION')
self.assertIn('github.com', linked)
def test_linkify_unicode(self):
# Check that Unicode characters pass through cleanly.
linked = filters.do_linkify_stacktrace(u'\u883c', 'VERSION', '')
self.assertEqual(linked, u'\u883c')
def test_maybe_linkify(self):
for inp, expected in [
(3, 3),
({"a": "b"}, {"a": "b"}),
("", ""),
("whatever", "whatever"),
("http://example.com",
jinja2.Markup('<a href="http://example.com">http://example.com</a>')),
("http://&",
jinja2.Markup('<a href="http://&">http://&</a>')),
]:
self.assertEqual(filters.do_maybe_linkify(inp), expected)
def test_slugify(self):
self.assertEqual('k8s-test-foo', filters.do_slugify('[k8s] Test Foo'))
def test_testcmd(self):
for name, expected in (
('k8s.io/kubernetes/pkg/api/errors TestErrorNew',
'go test -v k8s.io/kubernetes/pkg/api/errors -run TestErrorNew$'),
('[k8s.io] Proxy [k8s.io] works',
"go run hack/e2e.go -v --test --test_args='--ginkgo.focus="
"Proxy\\s\\[k8s\\.io\\]\\sworks$'"),
('//pkg/foo/bar:go_default_test',
'bazel test //pkg/foo/bar:go_default_test'),
('verify typecheck', 'make verify WHAT=typecheck')):
print 'test name:', name
self.assertEqual(filters.do_testcmd(name), expected)
def test_classify_size(self):
self.assertEqual(filters.do_classify_size(
{'labels': {'size/FOO': 1}}), 'FOO')
self.assertEqual(filters.do_classify_size(
{'labels': {}, 'additions': 70, 'deletions': 20}), 'M')
def test_render_status_basic(self):
payload = {'status': {'ci': ['pending', '', '']}}
self.assertEqual(str(filters.do_render_status(payload, '')),
'<span class="text-pending octicon octicon-primitive-dot" title="pending tests">'
'</span>Pending')
def test_render_status_complex(self):
def expect(payload, expected, user=''):
# strip the excess html from the result down to the text class,
# the opticon class, and the rendered text
result = str(filters.do_render_status(payload, user))
result = re.sub(r'<span class="text-|octicon octicon-| title="[^"]*"|</span>',
'', result)
result = result.replace('">', ' ')
self.assertEqual(result, expected)
statuses = lambda *xs: {str(n): [x, '', ''] for n, x in enumerate(xs)}
expect({'status': {}}, 'Pending')
expect({'status': statuses('pending')}, 'pending primitive-dot Pending')
expect({'status': statuses('failure')}, 'failure x Pending')
expect({'status': statuses('success')}, 'success check Pending')
expect({'status': statuses('pending', 'success')}, 'pending primitive-dot Pending')
expect({'status': statuses('failure', 'pending', 'success')}, 'failure x Pending')
expect({'status': {'ci': ['success', '', ''],
'Submit Queue': ['pending', '', 'does not have LGTM']}}, 'success check Pending')
expect({'status': {'ci': ['success', '', ''],
'tide': ['pending', '', '']}}, 'success check Pending')
expect({'status': {'ci': ['success', '', ''],
'code-review/reviewable': ['pending', '', '10 files left']}}, 'success check Pending')
expect({'status': {'ci': ['success', '', '']}, 'labels': ['lgtm']}, 'success check LGTM')
expect({'attn': {'foo': 'Needs Rebase'}}, 'Needs Rebase', user='foo')
expect({'attn': {'foo': 'Needs Rebase'}, 'labels': {'lgtm'}}, 'LGTM', user='foo')
expect({'author': 'u', 'labels': ['lgtm']}, 'LGTM', 'u')
expect({'author': 'b', 'labels': ['lgtm'], 'approvers': ['u'],
'attn': {'u': 'needs approval'}},
'Needs Approval', 'u')
def test_tg_url(self):
self.assertEqual(
filters.do_tg_url('a#b'),
'https://testgrid.k8s.io/a#b')
self.assertEqual(
filters.do_tg_url('a#b', '[low] test'),
'https://testgrid.k8s.io/a#b&include-filter-by-regex=%s' %
urllib.quote('^Overall$|\\[low\\]\\ test'))
def test_gcs_browse_url(self):
self.assertEqual(
filters.do_gcs_browse_url('/k8s/foo'),
'http://gcsweb.k8s.io/gcs/k8s/foo/')
self.assertEqual(
filters.do_gcs_browse_url('/k8s/bar/'),
'http://gcsweb.k8s.io/gcs/k8s/bar/')
def test_pod_name(self):
self.assertEqual(filters.do_parse_pod_name("start pod 'client-c6671' to"), 'client-c6671')
self.assertEqual(filters.do_parse_pod_name('tripod "blah"'), '')
# exercise pathological case
self.assertEqual(filters.do_parse_pod_name('abcd pode ' * 10000), '')
if __name__ == '__main__':
unittest.main()
| jlowdermilk/test-infra | gubernator/filters_test.py | Python | apache-2.0 | 6,840 |
import numpy as np
import pandas as pd
from darkcore import *
df = pd.DataFrame(np.random.randn(3, 3))
class MyApp(Darkcore):
def get_data(self, params):
return df
def get_chart(self, params):
ax = self.get_data(params).plot(figsize=(4, 3))
return ax
if __name__ == "__main__":
app = MyApp('Sample App', use_CDN=True,
contents = [TabPanel(name='tabgroup',
contents=[Tab(id='tab1', name='Tab1', contents='get_data'),
Tab(id='tab2', name='Tab2', contents='get_chart')])])
app.run(port=5024)
| sinhrks/darkcore | examples/simpleapp.py | Python | bsd-3-clause | 603 |
'''Test cases for qInstallMsgHandler'''
import unittest
import sys
from PySide2.QtCore import *
param = []
def handler(msgt, msg):
global param
param = [msgt, msg.strip()]
def handleruseless(msgt, msg):
pass
class QInstallMsgHandlerTest(unittest.TestCase):
def tearDown(self):
# Ensure that next test will have a clear environment
qInstallMsgHandler(None)
def testNone(self):
ret = qInstallMsgHandler(None)
self.assertEqual(ret, None)
def testRet(self):
ret = qInstallMsgHandler(None)
self.assertEqual(ret, None)
refcount = sys.getrefcount(handleruseless)
retNone = qInstallMsgHandler(handleruseless)
self.assertEqual(sys.getrefcount(handleruseless), refcount + 1)
rethandler = qInstallMsgHandler(None)
self.assertEqual(rethandler, handleruseless)
del rethandler
self.assertEqual(sys.getrefcount(handleruseless), refcount)
def testHandler(self):
rethandler = qInstallMsgHandler(handler)
qDebug("Test Debug")
self.assertEqual(param[0], QtDebugMsg)
self.assertEqual(param[1], "Test Debug")
qWarning("Test Warning")
self.assertEqual(param[0], QtWarningMsg)
self.assertEqual(param[1], "Test Warning")
qCritical("Test Critical")
self.assertEqual(param[0], QtCriticalMsg)
self.assertEqual(param[1], "Test Critical")
if __name__ == '__main__':
unittest.main()
| BadSingleton/pyside2 | tests/QtCore/qinstallmsghandler_test.py | Python | lgpl-2.1 | 1,482 |
from .crp import CRP
from .bah import BAH
from .anticor import Anticor
from .corn import CORN
from .bcrp import BCRP
from .cwmr import CWMR
from .olmar import OLMAR
from .pamr import PAMR
from .rmr import RMR
from .up import UP
from .wmamr import WMAMR
from .ons import ONS
from .kelly import Kelly
from .up import UP
from .eg import EG
from .bnn import BNN
from .dynamic_crp import DynamicCRP
from .best_so_far import BestSoFar
from .best_markowitz import BestMarkowitz
from .mpt import MPT
| greenlin/universal-portfolios | universal/algos/__init__.py | Python | mit | 492 |
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
from cryptography import utils
from cryptography.exceptions import (
InvalidTag, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.primitives import ciphers, constant_time
from cryptography.hazmat.primitives.ciphers import modes
from cryptography.hazmat.primitives.ciphers.modes import (
CFB, CFB8, CTR, OFB
)
@utils.register_interface(ciphers.CipherContext)
class _CipherContext(object):
def __init__(self, backend, cipher, mode, operation):
self._backend = backend
self._cipher = cipher
self._mode = mode
self._operation = operation
# There is a bug in CommonCrypto where block ciphers do not raise
# kCCAlignmentError when finalizing if you supply non-block aligned
# data. To work around this we need to keep track of the block
# alignment ourselves, but only for alg+mode combos that require
# block alignment. OFB, CFB, and CTR make a block cipher algorithm
# into a stream cipher so we don't need to track them (and thus their
# block size is effectively 1 byte just like OpenSSL/CommonCrypto
# treat RC4 and other stream cipher block sizes).
# This bug has been filed as rdar://15589470
self._bytes_processed = 0
if (isinstance(cipher, ciphers.BlockCipherAlgorithm) and not
isinstance(mode, (OFB, CFB, CFB8, CTR))):
self._byte_block_size = cipher.block_size // 8
else:
self._byte_block_size = 1
registry = self._backend._cipher_registry
try:
cipher_enum, mode_enum = registry[type(cipher), type(mode)]
except KeyError:
raise UnsupportedAlgorithm(
"cipher {0} in {1} mode is not supported "
"by this backend.".format(
cipher.name, mode.name if mode else mode),
_Reasons.UNSUPPORTED_CIPHER
)
ctx = self._backend._ffi.new("CCCryptorRef *")
ctx = self._backend._ffi.gc(ctx, self._backend._release_cipher_ctx)
if isinstance(mode, modes.ModeWithInitializationVector):
iv_nonce = mode.initialization_vector
elif isinstance(mode, modes.ModeWithNonce):
iv_nonce = mode.nonce
else:
iv_nonce = self._backend._ffi.NULL
if isinstance(mode, CTR):
mode_option = self._backend._lib.kCCModeOptionCTR_BE
else:
mode_option = 0
res = self._backend._lib.CCCryptorCreateWithMode(
operation,
mode_enum, cipher_enum,
self._backend._lib.ccNoPadding, iv_nonce,
cipher.key, len(cipher.key),
self._backend._ffi.NULL, 0, 0, mode_option, ctx)
self._backend._check_cipher_response(res)
self._ctx = ctx
def update(self, data):
# Count bytes processed to handle block alignment.
self._bytes_processed += len(data)
buf = self._backend._ffi.new(
"unsigned char[]", len(data) + self._byte_block_size - 1)
outlen = self._backend._ffi.new("size_t *")
res = self._backend._lib.CCCryptorUpdate(
self._ctx[0], data, len(data), buf,
len(data) + self._byte_block_size - 1, outlen)
self._backend._check_cipher_response(res)
return self._backend._ffi.buffer(buf)[:outlen[0]]
def finalize(self):
# Raise error if block alignment is wrong.
if self._bytes_processed % self._byte_block_size:
raise ValueError(
"The length of the provided data is not a multiple of "
"the block length."
)
buf = self._backend._ffi.new("unsigned char[]", self._byte_block_size)
outlen = self._backend._ffi.new("size_t *")
res = self._backend._lib.CCCryptorFinal(
self._ctx[0], buf, len(buf), outlen)
self._backend._check_cipher_response(res)
self._backend._release_cipher_ctx(self._ctx)
return self._backend._ffi.buffer(buf)[:outlen[0]]
@utils.register_interface(ciphers.AEADCipherContext)
@utils.register_interface(ciphers.AEADEncryptionContext)
class _GCMCipherContext(object):
def __init__(self, backend, cipher, mode, operation):
self._backend = backend
self._cipher = cipher
self._mode = mode
self._operation = operation
self._tag = None
registry = self._backend._cipher_registry
try:
cipher_enum, mode_enum = registry[type(cipher), type(mode)]
except KeyError:
raise UnsupportedAlgorithm(
"cipher {0} in {1} mode is not supported "
"by this backend.".format(
cipher.name, mode.name if mode else mode),
_Reasons.UNSUPPORTED_CIPHER
)
ctx = self._backend._ffi.new("CCCryptorRef *")
ctx = self._backend._ffi.gc(ctx, self._backend._release_cipher_ctx)
self._ctx = ctx
res = self._backend._lib.CCCryptorCreateWithMode(
operation,
mode_enum, cipher_enum,
self._backend._lib.ccNoPadding,
self._backend._ffi.NULL,
cipher.key, len(cipher.key),
self._backend._ffi.NULL, 0, 0, 0, self._ctx)
self._backend._check_cipher_response(res)
res = self._backend._lib.CCCryptorGCMAddIV(
self._ctx[0],
mode.initialization_vector,
len(mode.initialization_vector)
)
self._backend._check_cipher_response(res)
# CommonCrypto has a bug where calling update without at least one
# call to authenticate_additional_data will result in null byte output
# for ciphertext. The following empty byte string call prevents the
# issue, which is present in at least 10.8 and 10.9.
# Filed as rdar://18314544
self.authenticate_additional_data(b"")
def update(self, data):
buf = self._backend._ffi.new("unsigned char[]", len(data))
args = (self._ctx[0], data, len(data), buf)
if self._operation == self._backend._lib.kCCEncrypt:
res = self._backend._lib.CCCryptorGCMEncrypt(*args)
else:
res = self._backend._lib.CCCryptorGCMDecrypt(*args)
self._backend._check_cipher_response(res)
return self._backend._ffi.buffer(buf)[:]
def finalize(self):
# CommonCrypto has a yet another bug where you must make at least one
# call to update. If you pass just AAD and call finalize without a call
# to update you'll get null bytes for tag. The following update call
# prevents this issue, which is present in at least 10.8 and 10.9.
# Filed as rdar://18314580
self.update(b"")
tag_size = self._cipher.block_size // 8
tag_buf = self._backend._ffi.new("unsigned char[]", tag_size)
tag_len = self._backend._ffi.new("size_t *", tag_size)
res = self._backend._lib.CCCryptorGCMFinal(
self._ctx[0], tag_buf, tag_len
)
self._backend._check_cipher_response(res)
self._backend._release_cipher_ctx(self._ctx)
self._tag = self._backend._ffi.buffer(tag_buf)[:]
if (self._operation == self._backend._lib.kCCDecrypt and
not constant_time.bytes_eq(
self._tag[:len(self._mode.tag)], self._mode.tag
)):
raise InvalidTag
return b""
def authenticate_additional_data(self, data):
res = self._backend._lib.CCCryptorGCMAddAAD(
self._ctx[0], data, len(data)
)
self._backend._check_cipher_response(res)
tag = utils.read_only_property("_tag")
| hipnusleo/laserjet | resource/pypi/cryptography-1.7.1/src/cryptography/hazmat/backends/commoncrypto/ciphers.py | Python | apache-2.0 | 8,139 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import tvm
from tvm import tir
from tvm.script import tir as T
# fmt: off
@T.prim_func
def primfunc_global_allocates(placeholder_144: T.handle, placeholder_145: T.handle, placeholder_146: T.handle, T_cast_48: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "fused_nn_conv2d_add_cast_fixed_point_multiply_clip_cast_cast_13", "tir.noalias": True})
placeholder_147 = T.match_buffer(placeholder_144, [1, 14, 14, 512], dtype="int16", elem_offset=0, align=128, offset_factor=1)
placeholder_148 = T.match_buffer(placeholder_145, [3, 3, 512, 1], dtype="int16", elem_offset=0, align=128, offset_factor=1)
placeholder_149 = T.match_buffer(placeholder_146, [1, 1, 1, 512], dtype="int32", elem_offset=0, align=128, offset_factor=1)
T_cast_49 = T.match_buffer(T_cast_48, [1, 14, 14, 512], dtype="int16", elem_offset=0, align=128, offset_factor=1)
# body
PaddedInput_22 = T.allocate([131072], "int16", "global")
DepthwiseConv2d_9 = T.allocate([100352], "int32", "global")
for i1_29, i2_39, i3_40 in T.grid(16, 16, 512):
PaddedInput_22[(((i1_29*8192) + (i2_39*512)) + i3_40)] = T.if_then_else(((((1 <= i1_29) and (i1_29 < 15)) and (1 <= i2_39)) and (i2_39 < 15)), T.load("int16", placeholder_147.data, ((((i1_29*7168) + (i2_39*512)) + i3_40) - 7680)), T.int16(0), dtype="int16")
for i_9, j_9, c_9 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((i_9*7168) + (j_9*512)) + c_9)] = 0
for di_9, dj_9 in T.grid(3, 3):
DepthwiseConv2d_9[(((i_9*7168) + (j_9*512)) + c_9)] = (T.load("int32", DepthwiseConv2d_9, (((i_9*7168) + (j_9*512)) + c_9)) + (T.load("int16", PaddedInput_22, (((((i_9*8192) + (di_9*8192)) + (j_9*512)) + (dj_9*512)) + c_9)).astype("int32")*T.load("int16", placeholder_148.data, (((di_9*1536) + (dj_9*512)) + c_9)).astype("int32")))
for ax1_27, ax2_28, ax3_30 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((ax1_27*7168) + (ax2_28*512)) + ax3_30)] = (T.load("int32", DepthwiseConv2d_9, (((ax1_27*7168) + (ax2_28*512)) + ax3_30)) + T.load("int32", placeholder_149.data, ax3_30))
for i1_30, i2_40, i3_41 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((i1_30*7168) + (i2_40*512)) + i3_41)] = T.q_multiply_shift(T.load("int32", DepthwiseConv2d_9, (((i1_30*7168) + (i2_40*512)) + i3_41)), 1269068532, 31, -4, dtype="int32")
for i1_31, i2_41, i3_42 in T.grid(14, 14, 512):
DepthwiseConv2d_9[(((i1_31*7168) + (i2_41*512)) + i3_42)] = T.max(T.max(T.load("int32", DepthwiseConv2d_9, (((i1_31*7168) + (i2_41*512)) + i3_42)), 255), 0)
for ax1_28, ax2_29, ax3_31 in T.grid(14, 14, 512):
PaddedInput_22[(((ax1_28*7168) + (ax2_29*512)) + ax3_31)] = T.load("int32", DepthwiseConv2d_9, (((ax1_28*7168) + (ax2_29*512)) + ax3_31)).astype("uint8")
for ax1_29, ax2_30, ax3_32 in T.grid(14, 14, 512):
T_cast_49.data[(((ax1_29*7168) + (ax2_30*512)) + ax3_32)] = T.load("uint8", PaddedInput_22, (((ax1_29*7168) + (ax2_30*512)) + ax3_32)).astype("int16")
# fmt: on
# fmt: off
@T.prim_func
def primfunc_local_allocates(placeholder_162: T.handle, placeholder_163: T.handle, placeholder_164: T.handle, T_cast_76: T.handle) -> None:
# function attr dict
T.func_attr({"global_symbol": "fused_nn_conv2d_add_cast_fixed_point_multiply_clip_cast_cast_9", "tir.noalias": True})
placeholder_165 = T.match_buffer(placeholder_162, [1, 14, 14, 512], dtype="int16", elem_offset=0, align=128, offset_factor=1)
placeholder_166 = T.match_buffer(placeholder_163, [3, 3, 512, 1], dtype="int16", elem_offset=0, align=128, offset_factor=1)
placeholder_167 = T.match_buffer(placeholder_164, [1, 1, 1, 512], dtype="int32", elem_offset=0, align=128, offset_factor=1)
T_cast_77 = T.match_buffer(T_cast_76, [1, 14, 14, 512], dtype="int16", elem_offset=0, align=128, offset_factor=1)
# body
PaddedInput_25 = T.allocate([1, 16, 16, 512], "int16", "global")
for i1_35, i2_46, i3_47 in T.grid(16, 16, 512):
PaddedInput_25[(((i1_35*8192) + (i2_46*512)) + i3_47)] = T.if_then_else(((((1 <= i1_35) and (i1_35 < 15)) and (1 <= i2_46)) and (i2_46 < 15)), T.load("int16", placeholder_165.data, ((((i1_35*7168) + (i2_46*512)) + i3_47) - 7680)), T.int16(0), dtype="int16")
T_add_11 = T.allocate([1, 14, 14, 512], "int32", "global")
with T.allocate([1, 14, 14, 512], "int32", "global") as DepthwiseConv2d_11:
for i_11, j_11, c_11 in T.grid(14, 14, 512):
DepthwiseConv2d_11[(((i_11*7168) + (j_11*512)) + c_11)] = 0
for di_11, dj_11 in T.grid(3, 3):
DepthwiseConv2d_11[(((i_11*7168) + (j_11*512)) + c_11)] = (T.load("int32", DepthwiseConv2d_11, (((i_11*7168) + (j_11*512)) + c_11)) + (T.load("int16", PaddedInput_25, (((((i_11*8192) + (di_11*8192)) + (j_11*512)) + (dj_11*512)) + c_11)).astype("int32")*T.load("int16", placeholder_166.data, (((di_11*1536) + (dj_11*512)) + c_11)).astype("int32")))
for ax1_44, ax2_45, ax3_47 in T.grid(14, 14, 512):
T_add_11[(((ax1_44*7168) + (ax2_45*512)) + ax3_47)] = (T.load("int32", DepthwiseConv2d_11, (((ax1_44*7168) + (ax2_45*512)) + ax3_47)) + T.load("int32", placeholder_167.data, ax3_47))
compute_22 = T.allocate([1, 14, 14, 512], "int32", "global")
with T.allocate([1, 14, 14, 512], "int32", "global") as T_cast_78:
for ax1_45, ax2_46, ax3_48 in T.grid(14, 14, 512):
T_cast_78[(((ax1_45*7168) + (ax2_46*512)) + ax3_48)] = T.load("int32", T_add_11, (((ax1_45*7168) + (ax2_46*512)) + ax3_48))
for i1_36, i2_47, i3_48 in T.grid(14, 14, 512):
compute_22[(((i1_36*7168) + (i2_47*512)) + i3_48)] = T.q_multiply_shift(T.load("int32", T_cast_78, (((i1_36*7168) + (i2_47*512)) + i3_48)), 1948805937, 31, -5, dtype="int32")
T_cast_79 = T.allocate([1, 14, 14, 512], "uint8", "global")
with T.allocate([1, 14, 14, 512], "int32", "global") as compute_23:
for i1_37, i2_48, i3_49 in T.grid(14, 14, 512):
compute_23[(((i1_37*7168) + (i2_48*512)) + i3_49)] = T.max(T.max(T.load("int32", compute_22, (((i1_37*7168) + (i2_48*512)) + i3_49)), 255), 0)
for ax1_46, ax2_47, ax3_49 in T.grid(14, 14, 512):
T_cast_79[(((ax1_46*7168) + (ax2_47*512)) + ax3_49)] = T.load("int32", compute_23, (((ax1_46*7168) + (ax2_47*512)) + ax3_49)).astype("uint8")
for ax1_47, ax2_48, ax3_50 in T.grid(14, 14, 512):
T_cast_77.data[(((ax1_47*7168) + (ax2_48*512)) + ax3_50)] = T.load("uint8", T_cast_79, (((ax1_47*7168) + (ax2_48*512)) + ax3_50)).astype("int16")
# fmt: on
@pytest.mark.parametrize("alignment_and_size", [(1, 663552), (10, 663560)])
def test_global_allocates(alignment_and_size):
alignment = alignment_and_size[0]
size = alignment_and_size[1]
primfunc = primfunc_global_allocates
assert tvm.tir.analysis.calculate_workspace_bytes(primfunc, alignment) == size
@pytest.mark.parametrize("alignment_and_size", [(1, 1566720), (100, 1567100)])
def test_local_allocates(alignment_and_size):
alignment = alignment_and_size[0]
size = alignment_and_size[1]
primfunc = primfunc_local_allocates
assert tvm.tir.analysis.calculate_workspace_bytes(primfunc, alignment) == size
if __name__ == "__main__":
test_global_allocates()
test_local_allocates()
| Laurawly/tvm-1 | tests/python/unittest/test_tir_analysis_calculate_workspace.py | Python | apache-2.0 | 8,017 |
import logging
import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.template.loader import get_template
logger = logging.getLogger("zentral.server.base.management.commands.build_custom_error_pages")
class Command(BaseCommand):
help = 'Build custom error pages'
errors = (
(403, "Forbidden"),
(404, "Not Found"),
(500, "Internal Server Error"),
(502, "Bad Gateway"),
(503, "Service Unavailable"),
(504, "Gateway Timeout"),
)
def handle(self, *args, **options):
template = get_template("custom_error_page.html")
basedir = os.path.join(settings.STATIC_ROOT, "custom_error_pages")
os.makedirs(basedir, exist_ok=True)
for status_code, message in self.errors:
page_content = template.render({
"status_code": status_code,
"message": message,
})
with open(os.path.join(basedir, f"{status_code}.html"), "w") as f:
f.write(page_content)
| zentralopensource/zentral | server/base/management/commands/build_custom_error_pages.py | Python | apache-2.0 | 1,054 |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
__author__ = "Sergi Blanch-Torne"
__copyright__ = "Copyright 2015, CELLS / ALBA Synchrotron"
__license__ = "GPLv3+"
from ..ctliaux import defaultConfigurations
from ..ctlienums import doSave
import os
from taurus.external.qt import QtGui, Qt
from taurus.qt.qtgui.container import TaurusWidget
from taurus.qt.qtgui.util.ui import UILoadable
import traceback
@UILoadable(with_ui="ui")
class CompomentsWindow(TaurusWidget):
_attrSet = {'1': ['ATT2_u', 'GUN_Cathode_u', 'GUN_CDB_u', 'TU_u', 'A0_u',
'LLRF_u'],
'2': ['IP1_u', 'IP2_u', 'IP3_u', 'IP4_u', 'IP5_u', 'IP6_u',
'IP7_u', 'IP8_u', 'IP9_u', 'HVG1_u', 'HVG2_u', 'HVG3_u',
'HVG4_u', 'HVG5_u', 'IPC1_u', 'IPC2_u', 'IPC3_u',
'IPC4_u', 'IPC5_u'],
'3': ['BC1F_u', 'BC2F_u', 'GL_u', 'SL1F_u', 'SL2F_u',
'SL3F_u', 'SL4F_u', 'QT1F_u', 'QT2F_u', 'QT1H_u',
'QT2V_u'],
'k': ['ka_tube_u', 'ka_thyratron_u', 'ka_3GHz_RFampli_u',
'ka_DCps_thyratron_u', 'ka_HVps_u', 'ka_IP_controller',
'ka_fcoil1_u', 'ka_fcoil2_u', 'ka_fcoil3_u']}
def __init__(self, parent=None, name=None, designMode=False):
try:
self.__name = name.__name__
except:
self.__name = "CompomentsWindow"
try:
super(CompomentsWindow, self).__init__()
except Exception as e:
self.warning("[%s]__init__(): Parent exception!\n%s"
% (self.__name, e))
self.traceback()
try:
self.debug("[%s]__init__()" % (self.__name))
basePath = os.path.dirname(__file__)
if len(basePath) == 0:
basePath = '.'
self.loadUi(filename="compomentsWindow.ui",
path=basePath+"/ui")
self.showActiveAttrs()
self.setupConfigurationAttrs()
except Exception as e:
self.warning("[%s]__init__(): Widget exception! %s"
% (self.__name, e))
traceback.print_exc()
self.traceback()
# __init__
def showActiveAttrs(self):
self.ui.activeAttributes.setWithButtons(False)
modelLst = []
plcs = self._attrSet.keys()
plcs.sort()
for plc in plcs:
if plc == 'k':
modelLst += self.buildActiveAttrList4Device(4,
self._attrSet[plc])
modelLst += self.buildActiveAttrList4Device(5,
self._attrSet[plc])
else:
modelLst += self.buildActiveAttrList4Device(int(plc),
self._attrSet[plc])
self.ui.activeAttributes.setModel(modelLst)
Qt.QObject.connect(self.ui.saveButton, Qt.SIGNAL("clicked(bool)"),
self._saveAction)
def buildActiveAttrList4Device(self, number, lst):
argout = []
for element in lst:
argout.append('li/ct/plc%d/%s_meaning' % (number, element))
return argout
def setupConfigurationAttrs(self):
self.ui.componentSelection.addItems(self.buildComboBoxList())
Qt.QObject.connect(self.ui.componentSelection,
Qt.SIGNAL("currentIndexChanged(QString)"),
self.configurationChange)
self.configurationChange(self.ui.componentSelection.currentText())
def buildComboBoxList(self):
lst = []
plcs = self._attrSet.keys()
plcs.sort()
for plc in plcs:
if plc == 'k':
lst += self._attrSet[plc]
else:
lst += self._attrSet[plc]
return lst
def configurationChange(self, newSelection):
modelsLst = []
for plc in self._attrSet.keys():
if newSelection in self._attrSet[plc]:
if plc == 'k':
modelsLst = self.buildConfigurationSet(4, newSelection)
modelsLst += self.buildConfigurationSet(5, newSelection)
else:
modelsLst = self.buildConfigurationSet(int(plc),
newSelection)
self.ui.componentAttributes.setModel(modelsLst)
def buildConfigurationSet(self, number, element):
return ['li/ct/plc%d/%s_active' % (number, element),
'li/ct/plc%d/%s_options' % (number, element),
'li/ct/plc%d/%s_numeric' % (number, element),
'li/ct/plc%d/%s_meaning' % (number, element)]
def _saveAction(self):
fileName = str(QtGui.QFileDialog.getSaveFileName(self, "Select File",
defaultConfigurations,
"CSV (*.csv)"))
doSave(fileName)
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
compomentsWindow = CompomentsWindow()
compomentsWindow.show()
sys.exit(app.exec_())
| srgblnch/LinacGUI | ctli/widgets/componentswindow.py | Python | gpl-3.0 | 6,014 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# EVEX.128.66.0F38.W1 70 /r
# VPSHLDVW xmm1{k1}{z}, xmm2, xmm3/m128
myEVEX = EVEX('EVEX.128.66.0F38.W1')
Buffer = bytes.fromhex('{}700e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x70)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpshldvw')
assert_equal(myDisasm.repr(), 'vpshldvw xmm25, xmm16, xmmword ptr [r14]')
# EVEX.256.66.0F38.W1 70 /r
# VPSHLDVW ymm1{k1}{z}, ymm2, ymm3/m256
myEVEX = EVEX('EVEX.256.66.0F38.W1')
Buffer = bytes.fromhex('{}700e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x70)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpshldvw')
assert_equal(myDisasm.repr(), 'vpshldvw ymm25, ymm16, ymmword ptr [r14]')
# EVEX.512.66.0F38.W1 70 /r
# VPSHLDVW zmm1{k1}{z}, zmm2, zmm3/m512
myEVEX = EVEX('EVEX.512.66.0F38.W1')
Buffer = bytes.fromhex('{}700e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x70)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpshldvw')
assert_equal(myDisasm.repr(), 'vpshldvw zmm25, zmm16, zmmword ptr [r14]')
| 0vercl0k/rp | src/third_party/beaengine/tests/0f3870.py | Python | mit | 2,248 |
import json
"""
Transforms csv abolone data into json abolone data
"""
# Sex { M, F, I}
# Length real
# Diameter real
# Height real
# Whole weight real
# Shucked weight real
# Viscera weight real
# Shell weight real
# Class_Rings integer
def toAboloneDictionary(
sex, length, diameter, height,
wholeWeight, shuckedWeight,
visceraWeight, shellWeight, rings):
return locals()
lines = []
def if_number_then_number(e):
try:
return int(e)
except:
try:
return float(e)
except:
return e
with open('./data/abalone.csv', 'r') as f:
while True:
line = f.readline()
if line == '':
break
columns = map(if_number_then_number, line.replace('\n', '').split(','))
lines.append(toAboloneDictionary(*columns))
with open('./data/abalone.json', 'w') as f:
json.dump(lines, f)
| AKST/Data-mining | scripts/jsonify.py | Python | mit | 900 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mergedialog.ui'
#
# Created: Fri May 30 09:15:18 2014
# by: PyQt4 UI code generator 4.9.6
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MergeDialog(object):
def setupUi(self, MergeDialog):
MergeDialog.setObjectName(_fromUtf8("MergeDialog"))
MergeDialog.resize(835, 778)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MergeDialog.sizePolicy().hasHeightForWidth())
MergeDialog.setSizePolicy(sizePolicy)
MergeDialog.setMinimumSize(QtCore.QSize(0, 0))
self.verticalLayout_4 = QtGui.QVBoxLayout(MergeDialog)
self.verticalLayout_4.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.Diffs = QtGui.QWidget(MergeDialog)
self.Diffs.setObjectName(_fromUtf8("Diffs"))
self.verticalLayout = QtGui.QVBoxLayout(self.Diffs)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = QtGui.QGroupBox(self.Diffs)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.horizontalLayout = QtGui.QHBoxLayout(self.groupBox)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.branchToMergeLabel = QtGui.QLabel(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.branchToMergeLabel.sizePolicy().hasHeightForWidth())
self.branchToMergeLabel.setSizePolicy(sizePolicy)
self.branchToMergeLabel.setObjectName(_fromUtf8("branchToMergeLabel"))
self.horizontalLayout.addWidget(self.branchToMergeLabel)
self.branchToMergeBox = QtGui.QComboBox(self.groupBox)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.branchToMergeBox.sizePolicy().hasHeightForWidth())
self.branchToMergeBox.setSizePolicy(sizePolicy)
self.branchToMergeBox.setMaximumSize(QtCore.QSize(300, 1000))
self.branchToMergeBox.setObjectName(_fromUtf8("branchToMergeBox"))
self.horizontalLayout.addWidget(self.branchToMergeBox)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.viewChangesButton = QtGui.QPushButton(self.groupBox)
self.viewChangesButton.setObjectName(_fromUtf8("viewChangesButton"))
self.horizontalLayout.addWidget(self.viewChangesButton)
self.mergeButton = QtGui.QToolButton(self.groupBox)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon/merge-24.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.mergeButton.setIcon(icon)
self.mergeButton.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.mergeButton.setObjectName(_fromUtf8("mergeButton"))
self.horizontalLayout.addWidget(self.mergeButton)
self.verticalLayout.addWidget(self.groupBox)
self.splitterChanges = QtGui.QSplitter(self.Diffs)
self.splitterChanges.setOrientation(QtCore.Qt.Horizontal)
self.splitterChanges.setObjectName(_fromUtf8("splitterChanges"))
self.changesTree = QtGui.QTreeWidget(self.splitterChanges)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.changesTree.sizePolicy().hasHeightForWidth())
self.changesTree.setSizePolicy(sizePolicy)
self.changesTree.setMinimumSize(QtCore.QSize(200, 200))
self.changesTree.setMaximumSize(QtCore.QSize(232131, 321321))
self.changesTree.setAlternatingRowColors(True)
self.changesTree.setUniformRowHeights(True)
self.changesTree.setObjectName(_fromUtf8("changesTree"))
self.splitter = QtGui.QSplitter(self.splitterChanges)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName(_fromUtf8("splitter"))
self.attributesTable = QtGui.QTableWidget(self.splitter)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.attributesTable.sizePolicy().hasHeightForWidth())
self.attributesTable.setSizePolicy(sizePolicy)
self.attributesTable.setMinimumSize(QtCore.QSize(250, 100))
self.attributesTable.setMaximumSize(QtCore.QSize(1000, 150))
self.attributesTable.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.attributesTable.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.attributesTable.setProperty("showDropIndicator", False)
self.attributesTable.setDragDropOverwriteMode(False)
self.attributesTable.setAlternatingRowColors(False)
self.attributesTable.setSelectionMode(QtGui.QAbstractItemView.NoSelection)
self.attributesTable.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.attributesTable.setShowGrid(True)
self.attributesTable.setGridStyle(QtCore.Qt.DotLine)
self.attributesTable.setWordWrap(False)
self.attributesTable.setObjectName(_fromUtf8("attributesTable"))
self.attributesTable.setColumnCount(4)
self.attributesTable.setRowCount(0)
item = QtGui.QTableWidgetItem()
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
font.setKerning(True)
item.setFont(font)
item.setBackground(QtGui.QColor(0, 0, 0, 0))
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setForeground(brush)
self.attributesTable.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter | QtCore.Qt.AlignCenter)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
item.setFont(font)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setForeground(brush)
self.attributesTable.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignTop)
font = QtGui.QFont()
font.setBold(False)
font.setWeight(50)
item.setFont(font)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setForeground(brush)
self.attributesTable.setHorizontalHeaderItem(2, item)
item = QtGui.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter | QtCore.Qt.AlignCenter)
font = QtGui.QFont()
font.setBold(False)
font.setItalic(False)
font.setWeight(50)
item.setFont(font)
brush = QtGui.QBrush(QtGui.QColor(0, 0, 0))
brush.setStyle(QtCore.Qt.SolidPattern)
item.setForeground(brush)
self.attributesTable.setHorizontalHeaderItem(3, item)
self.attributesTable.horizontalHeader().setCascadingSectionResizes(True)
self.attributesTable.horizontalHeader().setDefaultSectionSize(135)
self.attributesTable.horizontalHeader().setMinimumSectionSize(30)
self.attributesTable.horizontalHeader().setStretchLastSection(True)
self.attributesTable.verticalHeader().setVisible(False)
self.attributesTable.verticalHeader().setCascadingSectionResizes(False)
self.attributesTable.verticalHeader().setStretchLastSection(False)
self.widget = QtGui.QWidget(self.splitter)
self.widget.setObjectName(_fromUtf8("widget"))
self.verticalLayout_12 = QtGui.QVBoxLayout(self.widget)
self.verticalLayout_12.setMargin(0)
self.verticalLayout_12.setObjectName(_fromUtf8("verticalLayout_12"))
self.horizontalLayout_11 = QtGui.QHBoxLayout()
self.horizontalLayout_11.setObjectName(_fromUtf8("horizontalLayout_11"))
self.label_10 = QtGui.QLabel(self.widget)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.horizontalLayout_11.addWidget(self.label_10)
self.showOursCheck = QtGui.QCheckBox(self.widget)
self.showOursCheck.setChecked(True)
self.showOursCheck.setObjectName(_fromUtf8("showOursCheck"))
self.horizontalLayout_11.addWidget(self.showOursCheck)
self.showTheirsCheck = QtGui.QCheckBox(self.widget)
self.showTheirsCheck.setChecked(True)
self.showTheirsCheck.setObjectName(_fromUtf8("showTheirsCheck"))
self.horizontalLayout_11.addWidget(self.showTheirsCheck)
self.showOriginCheck = QtGui.QCheckBox(self.widget)
self.showOriginCheck.setChecked(True)
self.showOriginCheck.setObjectName(_fromUtf8("showOriginCheck"))
self.horizontalLayout_11.addWidget(self.showOriginCheck)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_11.addItem(spacerItem1)
self.label_4 = QtGui.QLabel(self.widget)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.horizontalLayout_11.addWidget(self.label_4)
self.baseMapCombo = QtGui.QComboBox(self.widget)
self.baseMapCombo.setObjectName(_fromUtf8("baseMapCombo"))
self.baseMapCombo.addItem(_fromUtf8(""))
self.baseMapCombo.addItem(_fromUtf8(""))
self.baseMapCombo.addItem(_fromUtf8(""))
self.horizontalLayout_11.addWidget(self.baseMapCombo)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_11.addItem(spacerItem2)
self.zoomButton = QtGui.QToolButton(self.widget)
self.zoomButton.setText(_fromUtf8(""))
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(_fromUtf8(":/icon/zoom-extent.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.zoomButton.setIcon(icon1)
self.zoomButton.setObjectName(_fromUtf8("zoomButton"))
self.horizontalLayout_11.addWidget(self.zoomButton)
self.verticalLayout_12.addLayout(self.horizontalLayout_11)
self.canvasWidget = QtGui.QWidget(self.widget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.canvasWidget.sizePolicy().hasHeightForWidth())
self.canvasWidget.setSizePolicy(sizePolicy)
self.canvasWidget.setMinimumSize(QtCore.QSize(0, 300))
self.canvasWidget.setObjectName(_fromUtf8("canvasWidget"))
self.verticalLayout_12.addWidget(self.canvasWidget)
self.horizontalLayout_14 = QtGui.QHBoxLayout()
self.horizontalLayout_14.setObjectName(_fromUtf8("horizontalLayout_14"))
self.verticalLayout_12.addLayout(self.horizontalLayout_14)
self.verticalLayout.addWidget(self.splitterChanges)
self.verticalLayout_4.addWidget(self.Diffs)
self.buttonBox = QtGui.QDialogButtonBox(MergeDialog)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Close)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout_4.addWidget(self.buttonBox)
self.retranslateUi(MergeDialog)
QtCore.QMetaObject.connectSlotsByName(MergeDialog)
def retranslateUi(self, MergeDialog):
MergeDialog.setWindowTitle(_translate("MergeDialog", "Merge", None))
self.groupBox.setTitle(_translate("MergeDialog", "Merge from", None))
self.branchToMergeLabel.setText(_translate("MergeDialog", "Branch to merge onto current one:", None))
self.viewChangesButton.setText(_translate("MergeDialog", "View changes before merging >>", None))
self.mergeButton.setText(_translate("MergeDialog", "Merge", None))
self.changesTree.headerItem().setText(0, _translate("MergeDialog", "List of Changes", None))
self.attributesTable.setSortingEnabled(True)
item = self.attributesTable.horizontalHeaderItem(0)
item.setText(_translate("MergeDialog", "ATTRIBUTES", None))
item = self.attributesTable.horizontalHeaderItem(1)
item.setText(_translate("MergeDialog", "Local", None))
item = self.attributesTable.horizontalHeaderItem(2)
item.setText(_translate("MergeDialog", "To merge", None))
item = self.attributesTable.horizontalHeaderItem(3)
item.setText(_translate("MergeDialog", "Common ancestor", None))
self.label_10.setText(_translate("MergeDialog", "Show:", None))
self.showOursCheck.setText(_translate("MergeDialog", "Local", None))
self.showTheirsCheck.setText(_translate("MergeDialog", "To Merge", None))
self.showOriginCheck.setText(_translate("MergeDialog", "Common ancestor", None))
self.label_4.setText(_translate("MergeDialog", "Base Map:", None))
self.baseMapCombo.setItemText(0, _translate("MergeDialog", "None", None))
self.baseMapCombo.setItemText(1, _translate("MergeDialog", "OpenStreetMap", None))
self.baseMapCombo.setItemText(2, _translate("MergeDialog", "Google Maps", None))
import geogigclient_resources_rc
| boundlessgeo/qgis-geogig-plugin | geogig/ui/mergedialog.py | Python | gpl-2.0 | 14,704 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Boa:Frame:schoolnet_main
# JNMaster / schoolnet / shrimp entry point, main UI
#
# Copyright (C) 2011 Wang Xuerui <idontknw.wang@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import wx
from gingerprawn.api import logger
logger.install()
from gingerprawn.api import cooker
# i18n placeholder
_ = lambda x: x
SHRIMP_MINVER = (0, 1, 0, )
SHRIMP_PLATFORM = ('all', )
SHRIMP_INFO = {
# Network Settings
'name': u'\u7f51\u7edc\u8bbe\u7f6e',
'ver': '0.1.0',
'author': [u'xenon',
],
'desc': '<desc here>',
'copyr': u'(C) 2011 Wang Xuerui',
'lic': u'''\
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
''',
}
from schoolnet_icon import SHRIMP_ICON
_SHRIMP_ARGS = None
__SELF_FRAME = None
def shrimp_init():
logdebug('schoolnet init routine')
pass
def shrimp_threadproc(args):
global _SHRIMP_ARGS
_SHRIMP_ARGS = args
reason = args[0]
if reason == 'autostart':
# starting with OS, do nothing
waitqueue = args[1]
# If all shrimp behave well, it's impossible to block here
# Simply put something to indicate that we're done.
waitqueue.put('schoolnet')
return
# GUI init should take place in the main thread
wx.CallAfter(_APP_OBJECT._On_ShrimpInit, create)
def shrimp_down(just_querying=False):
if just_querying:
ret = wx.MessageBox('r u sure?', 'schoolnet', wx.YES_NO)
if ret == wx.YES:
logdebug('shutdown request approved')
return True
else:
logdebug('shutdown request declined')
return False
# not kidding, we have to go now
loginfo('teardown initiated')
wx.CallAfter(__SELF_FRAME.Destroy)
def create(parent):
global __SELF_FRAME
__SELF_FRAME = schoolnet_main(parent)
return __SELF_FRAME
[wxID_SCHOOLNET_MAIN, wxID_SCHOOLNET_MAINBTNEXIT,
] = [wx.NewId() for _init_ctrls in range(2)]
class schoolnet_main(wx.Frame):
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Frame.__init__(self, id=wxID_SCHOOLNET_MAIN, name='', parent=prnt,
size=wx.Size(400, 400), style=wx.DEFAULT_FRAME_STYLE,
title=u'schoolnet')
self.SetClientSize(wx.Size(384, 362))
self.Bind(wx.EVT_CLOSE, self.OnSchoolnet_mainClose)
self.btnExit = wx.Button(id=wxID_SCHOOLNET_MAINBTNEXIT,
label=_(u'\u9000\u51fa'), name=u'btnExit', parent=self,
pos=wx.Point(120, 16), size=wx.Size(75, 24), style=0)
self.btnExit.Bind(wx.EVT_BUTTON, self.OnBtnExitButton,
id=wxID_SCHOOLNET_MAINBTNEXIT)
def __init__(self, parent):
self._init_ctrls(parent)
def OnSchoolnet_mainClose(self, event):
loginfo('window close event, initiating subshrimp shutdown')
ok_to_shutdown = cooker.query_shutdown('schoolnet')
if ok_to_shutdown:
cooker.bring_down_shrimp('schoolnet')
event.Skip()
else:
event.Veto() # VETO the wx shutdown!
def OnBtnExitButton(self, event):
self.Close()
event.Skip()
# vi:ai:et:ts=4 sw=4 sts=4 fenc=utf-8
| xen0n/gingerprawn | gingerprawn/shrimp/schoolnet/schoolnet_main.py | Python | gpl-3.0 | 4,365 |
from __future__ import absolute_import
from collections import OrderedDict
from pybloom import BloomFilter
from os import path, makedirs
from celery import Task
from gevent.queue import Queue
from arachne.celery import celery
import errno, sys
## these directories are used to output results from scripts and errors,
## we specify the directories on the root of the virtualenv
pwd = path.split(path.split(sys.argv[0])[0])[0]
out_dir = path.join(pwd, 'var', 'out')
err_dir = path.join(pwd, 'var', 'err')
def make_path(path):
try:
makedirs(path)
except OSError as e:
## makedirs will throw an exception if the dir we're trying to make already exists
## if it is this particular exception, we ignore it
if e.errno != errno.EEXIST:
raise
## returns a dictionary with keys and values filled for parameters of a URL
def parse_params(p):
params = OrderedDict()
for keyval in p.split('&'):
kv = keyval.split('=')
key = kv[0]
val = '' if len(kv) == 1 else kv[1]
params[key] = val
return params
## given a scheme+netloc and a dictionary of parameters as returned by parse_params
## returns a fully qualified url with parameters in GET request format
def gen_url(url, p):
params = ''
for key,val in p.items():
params += '&{}={}'.format(key,val)
## the [1:] removes the leading &
return '{}?{}'.format(url, params[1:])
## for each parameter, add to the value the change argument
## optionally we don't keep the initial value
def replace_params(url, change, keep_original=True):
if url.count('?') != 1:
return []
link, param = url.split('?')
params = parse_params(param)
new_urls = []
for key,val in params.iteritems():
copy = OrderedDict(params)
copy[key] = change
if keep_original:
copy[key] += val
new_urls.append(gen_url(link, copy))
return new_urls
def strip_open(in_file):
with open(in_file, 'r+') as f:
return [x.strip() for x in f.readlines()]
def get_out_name(name, job_name, out_name):
if out_name:
return out_name
if job_name:
return name + '-' + job_name
return name
## this is mainly a placeholder, since in production we would be running celery
## on multiple machines, it does not make sense to write out to a file.
## ideally we want to have a database connection and write to a table for each script
## however, since this is mainly proof of concept, there is no need to set up a database,
## though this structure allows for a rather easy implementation of that
@celery.task()
def celery_output(item, name, job_name, out_name):
out_file = get_out_name(name, job_name, out_name)
if type(item) == list:
to_write = item
else:
to_write = [item]
with open(path.join(out_dir, out_file), 'a+') as f:
for item in to_write:
print >> f, item
return to_write
## this is just a subclass of the BloomFilter that adds an append method to be
## interoperable with code that is used to lists instead of bloom filters. the reason
## we use bloom filters instead of a list for celery code is that serializing and sending
## a list is a lot more expensive than a bf which when gzipped makes a big difference
class BloomFilter( BloomFilter ):
def append(self, key):
return self.add(key)
## the standard gevent queue subclassed to allow for callbacks on enqueueing and with
## support for boolean checking, length, and the extend method for lists
class Queue( Queue ):
def __init__(self, callback=None):
super(Queue, self).__init__()
self.callback = callback
def __nonzero__(self):
return not self.empty()
def __len__(self):
return self.qsize()
def check_callback(self, item):
if self.callback:
self.callback(item)
def append(self, item):
self.put(item)
self.check_callback(item)
def extend(self, items):
for item in items:
self.put(item)
self.check_callback(item)
def popleft(self):
return self.get() | gzzo/arachne | arachne/utils.py | Python | gpl-2.0 | 4,274 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('trans', '0045_auto_20150916_1007'),
]
operations = [
migrations.CreateModel(
name='Billing',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
],
),
migrations.CreateModel(
name='Plan',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=100)),
('price', models.IntegerField()),
('limit_strings', models.IntegerField()),
('limit_languages', models.IntegerField()),
('limit_repositories', models.IntegerField()),
('limit_projects', models.IntegerField()),
],
options={
'ordering': ['name'],
},
),
migrations.AddField(
model_name='billing',
name='plan',
field=models.ForeignKey(to='billing.Plan'),
),
migrations.AddField(
model_name='billing',
name='projects',
field=models.ManyToManyField(to='trans.Project', blank=True),
),
migrations.AddField(
model_name='billing',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL),
),
]
| miumok98/weblate | weblate/billing/migrations/0001_initial.py | Python | gpl-3.0 | 1,726 |
from distutils.core import setup
setup(name='distributed_exploration',
version='1.0',
packages = ['distributed_exploration']
)
| pabloriera/distributed_exploration | setup.py | Python | gpl-2.0 | 145 |
"""
Pure Python discrete time PID controllers
"""
from __future__ import division
import time
from builtins import object
from past.utils import old_div
import zmq
# todo: add ZMQ connections to PIDs without one
class PID_V(object):
"""
Discrete PID control: the so-called `velocity` pid_controller
"""
def __init__(self, p=1, i=1, d=1, set_point=1, set_point_max=1, set_point_min=-1, saturate_max=None,
saturate_min=None, zmq_connection=None):
self._zmq = zmq_connection
self.kp = p
self.ki = i
self.kd = d
self._Ti = self.kp/self.ki
self._Td = self.kd/self.kp
self.saturate_max = saturate_max
self.saturate_min = saturate_min
self.set_point_max = set_point_max
self.set_point_min = set_point_min
self.set_point = set_point
self.dt = 0
self.prev_t = 0
self.ut = 0.0
self.ut_1 = 0.0
self.et = 0.0
self.et_1 = 0.0
self.et_2 = 0.0
def update(self, feedback):
"""
Calculate PID output value for given reference input and feedback
:param feedback: state of the plant
"""
""" @todo: make dt a property to prevent it from being seen as a zero"""
self.dt = (time.time() - self.prev_t)
if self.dt == 0:
self.dt = 1e-3
self.prev_t = time.time()
self.et = self.set_point - feedback
self.ut = self.ut_1 + self.kp * (
(1 + old_div(self.dt, self._Ti) + old_div(self._Td, self.dt)) * self.et - (1 + 2 * self._Td / self.dt)
* self.et_1 + old_div((self._Td * self.et_2), self.dt))
self.et_2 = self.et_1
self.et_1 = self.et
self.ut_1 = self.ut
self._z_data["data"]["P"] = 0.0
self._z_data["data"]["I"] = 0.0
self._z_data["data"]["D"] = 0.0
self._z_data["data"]["E"] = self.et
self._z_data["data"]["SP"] = self.set_point
self._z_data["data"]["OUT"] = PID
if self._zmq:
try:
self._zmq.send_json(self._z_data, zmq.NOBLOCK)
except zmq.error.Again:
pass
return self.ut
"""
@property
def kp(self):
return self._kp
@kp.setter
def kp(self, p):
self._kp = p
self._Ti = self._kp / self.ki
self._Td = self.kd / self._kp
@property
def ki(self):
return self._ki
@ki.setter
def ki(self, val):
self._ki = val
self._Ti = self.kp / self._ki
self._Td = self.kd / self.kp
@property
def kd(self):
return self._kd
@kd.setter
def kd(self, d):
self._kd = d
self._Ti = self.kp / self.ki
self._Td = self._kd / self.kp
"""
@property
def set_point(self):
return self._set_point
@set_point.setter
def set_point(self, set_point):
if set_point > self.set_point_max:
self._set_point = self.set_point_max
elif set_point < self.set_point_min:
self._set_point = self.set_point_min
else:
self._set_point = set_point
# @todo: ask the professor about this
self.ut = 0
self.ut_1 = 0
@property
def ut(self):
return self._ut
@ut.setter
def ut(self, ut):
self._ut = ut
if self.saturate_max or self.saturate_min:
if self.saturate_max and ut > self.saturate_max:
self._ut = self.saturate_max
elif self.saturate_min and ut < self.saturate_min:
self._ut = self.saturate_min
class PID(object):
def __init__(self, name="N/A", p=1.0, i=0.0, d=10.0, Derivator=0, Integrator=0, Integrator_max=300,
Integrator_min=-200, set_point=0.0, power=1.0, zmq_connection=None):
self._zmq = zmq_connection
self.Kp = p
self.Ki = i
self.Kd = d
self.Derivator = Derivator
self.power = power
self.Integrator = Integrator
self.Integrator_max = Integrator_max
self.Integrator_min = Integrator_min
self.last_error = 0.0
self.last_value = 0.0
self.set_point = set_point
self.error = 0.0
self._z_data = {
"name": name,
"data": {
"P": 0.1,
"I": 0.1,
"D": 0.0,
"E": 0.0,
"SP": 0.0,
}
}
def update(self, current_value):
"""
Calculate PID output value for given reference input and feedback
"""
self.error = self.set_point - current_value
self.P_value = self.Kp * self.error
if (self.last_value >= current_value):
change = self.error - self.last_error
else:
change = 0.0
if self.error > 0.0:
self.I_value = self.Integrator * self.Ki
else:
self.I_value = (self.Integrator * self.Ki)
# self.D_value = self.Kd * ( self.error - self.Derivator)
self.D_value = self.Kd * change
self.Derivator = self.error
self.Integrator += self.error
if self.Integrator > self.Integrator_max:
self.Integrator = self.Integrator_max
elif self.Integrator < self.Integrator_min:
self.Integrator = self.Integrator_min
self.last_error = self.error
self.last_value = current_value
PID = self.P_value + self.I_value + self.D_value
self._z_data["data"]["P"] = self.P_value
self._z_data["data"]["I"] = self.I_value
self._z_data["data"]["D"] = self.D_value
self._z_data["data"]["E"] = self.error
self._z_data["data"]["SP"] = self.set_point
self._z_data["data"]["OUT"] = PID
if self._zmq:
try:
self._zmq.send_json(self._z_data, zmq.NOBLOCK)
except zmq.error.Again:
pass
return PID
def set_point(self, set_point):
"""Initilize the setpoint of PID"""
self.set_point = set_point
self.Integrator = 0
self.Derivator = 0
class PID_RP(object):
def __init__(self, name="N/A", p=1.0, i=0.0, d=10.0, Derivator=0, Integrator=0,
Integrator_max=20000, Integrator_min=-20000, set_point=0.0,
power=1.0, zmq_connection=None):
self._zmq = zmq_connection
self.Kp = p
self.Ki = i
self.Kd = d
self.name = name
self.Derivator = Derivator
self.power = power
self.Integrator = Integrator
self.Integrator_max = Integrator_max
self.Integrator_min = Integrator_min
self.last_error = 0.0
self.last_value = 0.0
self.set_point = set_point
self.error = 0.0
self.prev_t = 0
self._z_data = {
"name": name,
"data": {
"P": 0.0,
"I": 0.0,
"D": 0.0,
"E": 0.0,
"SP": 0.0,
"OUT": 0.0
}
}
def reset_dt(self):
self.prev_t = time.time()
def update(self, current_value):
"""
Calculate PID output value for given reference input and feedback
"""
dt = (time.time() - self.prev_t)
self.prev_t = time.time()
self.error = self.set_point - current_value
self.P_value = self.Kp * self.error
change = self.error - self.last_error
self.I_value = self.Integrator * self.Ki * dt
# self.D_value = self.Kd * ( self.error - self.Derivator)
self.D_value = self.Kd * change / dt
self.Derivator = self.error
self.Integrator += self.error
if self.Integrator > self.Integrator_max:
self.Integrator = self.Integrator_max
elif self.Integrator < self.Integrator_min:
self.Integrator = self.Integrator_min
self.last_error = self.error
self.last_value = current_value
# print "{}: P={}, I={}, D={}".format(self.name, self.P_value, self.I_value, self.D_value)
PID = self.P_value + self.I_value + self.D_value
self._z_data["data"]["P"] = self.P_value
self._z_data["data"]["I"] = self.I_value
self._z_data["data"]["D"] = self.D_value
self._z_data["data"]["E"] = self.error
self._z_data["data"]["SP"] = self.set_point
self._z_data["data"]["OUT"] = PID
if self._zmq:
try:
self._zmq.send_json(self._z_data, zmq.NOBLOCK)
except zmq.error.Again:
pass
return PID
def set_point(self, set_point):
"""
Initilize the setpoint of PID
"""
self.set_point = set_point
self.Integrator = 0
self.Derivator = 0
| friend0/tower | tower/controllers/pid.py | Python | isc | 8,867 |
from tests.fail_random import fail_random | griddynamics/bunch | tests/functional/bunches/light_failed/__init__.py | Python | gpl-3.0 | 41 |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import glob
class Aspa(MakefilePackage):
"""A fundamental premise in ExMatEx is that scale-bridging performed in
heterogeneous MPMD materials science simulations will place important
demands upon the exascale ecosystem that need to be identified and
quantified.
"""
homepage = "http://www.exmatex.org/aspa.html"
git = "https://github.com/exmatex/ASPA.git"
tags = ['proxy-app']
version('master', branch='master')
variant('mpi', default=True, description='Build with MPI Support')
depends_on('lapack')
depends_on('blas')
depends_on('mpi', when='+mpi')
depends_on('hdf5')
patch('fix_common_errors.patch')
@property
def build_targets(self):
targets = [
'--directory=exec',
'--file=Makefile',
'LIBS={0} {1} {2}'.format(self.spec['lapack'].libs.ld_flags,
self.spec['blas'].libs.ld_flags,
self.spec['hdf5'].libs.ld_flags),
'CXX={0}'.format(self.spec['mpi'].mpicxx)
]
return targets
def install(self, spec, prefix):
mkdirp(prefix.bin)
mkdirp(prefix.doc)
mkdirp(prefix.input)
install('exec/aspa', prefix.bin)
install('exec/README', prefix.doc)
install('exec/aspa.inp', prefix.input)
install('exec/kriging_model_centers.txt', prefix.input)
install('exec/point_data.txt', prefix.input)
install('exec/value_data.txt', prefix.input)
for files in glob.glob('doc/*.*'):
install(files, prefix.doc)
| rspavel/spack | var/spack/repos/builtin/packages/aspa/package.py | Python | lgpl-2.1 | 1,827 |
# Natural Language Toolkit: Rude Chatbot
#
# Copyright (C) 2001-2013 NLTK Project
# Author: Peter Spiller <pspiller@csse.unimelb.edu.au>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function
from .util import Chat, reflections
pairs = (
(r'We (.*)',
("What do you mean, 'we'?",
"Don't include me in that!",
"I wouldn't be so sure about that.")),
(r'You should (.*)',
("Don't tell me what to do, buddy.",
"Really? I should, should I?")),
(r'You\'re(.*)',
("More like YOU'RE %1!",
"Hah! Look who's talking.",
"Come over here and tell me I'm %1.")),
(r'You are(.*)',
("More like YOU'RE %1!",
"Hah! Look who's talking.",
"Come over here and tell me I'm %1.")),
(r'I can\'t(.*)',
("You do sound like the type who can't %1.",
"Hear that splashing sound? That's my heart bleeding for you.",
"Tell somebody who might actually care.")),
(r'I think (.*)',
("I wouldn't think too hard if I were you.",
"You actually think? I'd never have guessed...")),
(r'I (.*)',
("I'm getting a bit tired of hearing about you.",
"How about we talk about me instead?",
"Me, me, me... Frankly, I don't care.")),
(r'How (.*)',
("How do you think?",
"Take a wild guess.",
"I'm not even going to dignify that with an answer.")),
(r'What (.*)',
("Do I look like an encyclopedia?",
"Figure it out yourself.")),
(r'Why (.*)',
("Why not?",
"That's so obvious I thought even you'd have already figured it out.")),
(r'(.*)shut up(.*)',
("Make me.",
"Getting angry at a feeble NLP assignment? Somebody's losing it.",
"Say that again, I dare you.")),
(r'Shut up(.*)',
("Make me.",
"Getting angry at a feeble NLP assignment? Somebody's losing it.",
"Say that again, I dare you.")),
(r'Hello(.*)',
("Oh good, somebody else to talk to. Joy.",
"'Hello'? How original...")),
(r'(.*)',
("I'm getting bored here. Become more interesting.",
"Either become more thrilling or get lost, buddy.",
"Change the subject before I die of fatal boredom."))
)
rude_chatbot = Chat(pairs, reflections)
def rude_chat():
print("Talk to the program by typing in plain English, using normal upper-")
print('and lower-case letters and punctuation. Enter "quit" when done.')
print('='*72)
print("I suppose I should say hello.")
rude_chatbot.converse()
def demo():
rude_chat()
if __name__ == "__main__":
demo()
| bbengfort/TextBlob | textblob/nltk/chat/rude.py | Python | mit | 2,703 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from nose.tools import raises
import tvm
import pickle as pkl
def test_schedule_create():
m = tvm.var('m')
n = tvm.var('n')
l = tvm.var('l')
A = tvm.placeholder((m, l), name='A')
B = tvm.placeholder((n, l), name='B')
AA = tvm.compute((m, l), lambda i, j: A[i, j])
T = tvm.compute((m, n, l), lambda i, j, k: AA(i, k) * B(j, k))
s = tvm.create_schedule(T.op)
s[AA].set_scope("shared")
xo, xi = s[T].split(T.op.axis[0], factor=10)
xi1, xi2 = s[T].split(xi, factor=2)
s[AA].compute_at(s[T], xi1)
xo, xi = s[AA].split(AA.op.axis[0], factor=10)
s[T].reorder(xi2, xi1)
assert T.op.axis[1] in s[T].leaf_iter_vars
# save load json
json_str = tvm.save_json(s)
s_loaded = tvm.load_json(json_str)
assert isinstance(s_loaded, tvm.schedule.Schedule)
assert(str(s_loaded.outputs[0].body) == str(s.outputs[0].body))
# pickle unpickle
dump = pkl.dumps(s)
s_loaded = pkl.loads(dump)
assert isinstance(s_loaded, tvm.schedule.Schedule)
assert(str(s_loaded.outputs[0].body) == str(s.outputs[0].body))
def test_reorder():
m = tvm.var('m')
A = tvm.placeholder((m,), name='A')
T = tvm.compute(m, lambda i: A[i+1])
s = tvm.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
xi1, xi2 = s[T].split(xi, factor=2)
order = (xi2, xi1, xo)
assert tuple(s[T].leaf_iter_vars) != order
s[T].reorder(*order)
assert tuple(s[T].leaf_iter_vars) == order
try:
# pass duplicate IterVar
# must raise an error
s[T].reorder(xi2, xi1, xi2)
assert False
except tvm.TVMError:
pass
def test_split():
m = tvm.var('m')
A = tvm.placeholder((m,), name='A')
T = tvm.compute((m,), lambda i: A[i])
s = tvm.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
assert tuple(s[T].leaf_iter_vars) == (xo, xi)
def test_tile():
m = tvm.var('m')
n = tvm.var('n')
A = tvm.placeholder((m, n), name='A')
T = tvm.compute((m, n), lambda i, j: A[i, j])
s = tvm.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
assert tuple(s[T].leaf_iter_vars) == (xo, yo, xi, yi)
def test_fuse():
m = tvm.var('m')
n = tvm.var('n')
A = tvm.placeholder((m, n), name='A')
T = tvm.compute((m, n), lambda i, j: A[i, j])
s = tvm.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
fused = s[T].fuse(xo, yo)
assert any(isinstance(x, tvm.schedule.Fuse) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (fused, xi, yi)
def test_singleton():
A = tvm.placeholder((), name='A')
T = tvm.compute((), lambda : A() + 1)
s = tvm.create_schedule(T.op)
fused = s[T].fuse()
assert any(isinstance(x, tvm.schedule.Singleton) for x in s[T].relations)
assert tuple(s[T].leaf_iter_vars) == (fused,)
dump = pkl.dumps(s)
s_loaded = pkl.loads(dump)
assert isinstance(s_loaded, tvm.schedule.Schedule)
def test_vectorize():
m = tvm.var('m')
n = tvm.var('n')
A = tvm.placeholder((m, n), name='A')
T = tvm.compute((m, n), lambda i, j: A[i, j])
s = tvm.create_schedule(T.op)
xo, yo, xi, yi = s[T].tile(T.op.axis[0], T.op.axis[1], x_factor=10, y_factor=5)
s[T].vectorize(yi)
s[T].unroll(xi)
UNROLL = tvm.schedule.IterVar.Unrolled
VECTORIZE = tvm.schedule.IterVar.Vectorized
assert s[T].iter_var_attrs[xi].iter_type == UNROLL
assert s[T].iter_var_attrs[yi].iter_type == VECTORIZE
@raises(Exception)
def test_vectorize_commreduce():
V = tvm.placeholder((128,), name='V')
ax = tvm.reduce_axis((0, 128), name='ax')
O = tvm.compute((1,), lambda _: tvm.sum(V[ax], axis=[ax]))
s = tvm.create_schedule(O.op)
s[O].vectorize(ax) # should throw here
def test_pragma():
m = 100
A = tvm.placeholder((m,), name='A')
T = tvm.compute((m,), lambda i: A[i])
s = tvm.create_schedule(T.op)
xo, xi = s[T].split(T.op.axis[0], factor=10)
s[T].pragma(xo, "pragma1")
s[T].pragma(xi, "vectorize")
VECTORIZE = tvm.schedule.IterVar.Vectorized
assert s[T].iter_var_attrs[xo].pragma_keys[0].value == "pragma1"
assert s[T].iter_var_attrs[xi].iter_type == VECTORIZE
def test_rfactor():
n = tvm.var('n')
k1 = tvm.reduce_axis((0, n), name="k1")
k2 = tvm.reduce_axis((0, n), name="k2")
A = tvm.placeholder((n, n, n), name='A')
B = tvm.compute((n, ), lambda i: tvm.sum(A[i, k1, k2], axis=[k1, k2]))
# normal schedule
s = tvm.create_schedule(B.op)
BF = s.rfactor(B, k1)
assert(tuple(BF.shape) == (n, n))
assert(set(BF.op.body[0].axis) == set([k2]))
assert(s[B].op.body[0].axis[0].dom.extent == n)
assert(len(s[B].all_iter_vars) == 2)
# schedule with splot
s = tvm.create_schedule(B.op)
ko, ki = s[B].split(k1, factor=4)
xo, xi = s[B].split(B.op.axis[0], factor=8)
BF = s.rfactor(B, ki)
assert(BF.shape[0].value == 4)
assert(BF.shape[1] == n)
assert(BF.op.body[0].axis[0] == k2)
assert(BF.op.body[0].axis[1].var == ko.var)
assert(s[B].op.body[0].axis[0].dom.extent.value == 4)
# schedule with factor_axis
s = tvm.create_schedule(B.op)
ko, ki = s[B].split(k1, factor=4)
xo, xi = s[B].split(B.op.axis[0], factor=8)
BF = s.rfactor(B, ki, 1)
assert(n == BF.shape[0])
assert(BF.shape[1].value == 4)
assert(BF.op.body[0].axis[0] == k2)
assert(BF.op.body[0].axis[1].var == ko.var)
assert(s[B].op.body[0].axis[0].dom.extent.value == 4)
def test_tensor_intrin():
n = 16
x = tvm.placeholder((n,), name='x')
y = tvm.placeholder((n,), name='y')
z = tvm.compute(x.shape, lambda i: x[i] + y[i], name='z')
def intrin_func(ins, outs):
assert(isinstance(ins[0], tvm.schedule.Buffer))
assert(ins[0].shape[0].value == n)
return tvm.call_packed("vadd", ins[0].data, outs[0].data, ins[0].shape[0])
intrin = tvm.decl_tensor_intrin(z.op, intrin_func)
assert intrin.op == z.op
assert intrin.reduce_init is None
assert tuple(intrin.inputs) == tuple(z.op.input_tensors)
assert(intrin.buffers[0].shape[0].value == n)
m = 32
x = tvm.placeholder((m,), name='x')
y = tvm.placeholder((m,), name='y')
z = tvm.compute(x.shape, lambda i: x[i] + y[i], name='z')
s = tvm.create_schedule(z.op)
xo, xi = s[z].split(z.op.axis[0], factor=n)
s[z].tensorize(xi, intrin)
assert(s[z].iter_var_attrs[xi].tensor_intrin == intrin)
assert(s[z].iter_var_attrs[xi].iter_type == tvm.schedule.IterVar.Tensorized)
def test_tensor_intrin_scalar_params():
n = tvm.var("n")
x = tvm.placeholder((n,), name='x')
v = tvm.var("v")
w = tvm.var("w")
z = tvm.compute((n,), lambda i: x[i]*v + w, name='z')
def intrin_func(ins, outs, sp):
assert(isinstance(ins[0], tvm.schedule.Buffer))
assert(ins[0].shape[0] == n)
assert(sp[0] == v)
assert(sp[1] == w)
return tvm.call_packed("hw_func", ins[0].data, outs[0].data, sp[0], sp[1])
with tvm.build_config(offset_factor=1):
intrin = tvm.decl_tensor_intrin(z.op, intrin_func, scalar_params=[v, w])
assert intrin.op == z.op
assert intrin.reduce_init is None
assert tuple(intrin.inputs) == tuple(z.op.input_tensors)
assert(intrin.buffers[0].shape[0] == n)
assert tuple(intrin.scalar_params) == tuple((v, w))
A = tvm.placeholder((10,10), name='A')
# Pass scalar inputs to the TensorIntrin, interleaved with tensor inputs
C = tvm.compute((10,10), lambda i, j: intrin(i*i, A[i, j], i+j), name="C")
s = tvm.create_schedule(C.op)
stmt = tvm.lower(s, [A, C], simple_mode=True)
assert isinstance(stmt.body.body.body, tvm.stmt.Evaluate)
assert len(stmt.body.body.body.value.args) == 5
assert str(stmt.body.body.body.value.args[3]) == "(i*i)"
assert str(stmt.body.body.body.value.args[4]) == "(i + j)"
if __name__ == "__main__":
test_singleton()
test_pragma()
test_tensor_intrin()
test_tensor_intrin_scalar_params()
test_rfactor()
test_schedule_create()
test_reorder()
test_tile()
test_split()
test_fuse()
test_vectorize()
test_vectorize_commreduce()
| mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/tests/python/unittest/test_lang_schedule.py | Python | apache-2.0 | 9,054 |
#!/usr/bin/env python3.5
from libs.git_wrapper import GitManager
from libs.utils import generic_setup, parse_args, explode
def clone_repos(logger, settings, dry_run=False):
for repo in settings["repositories"]:
protocol = repo[0]
host = repo[1]
user = repo[2]
repository = repo[3]
destination = explode(*repo[4])
logger.info('Cloning repo "%s" into "%s".' % (
GitManager.get_github_path(
user=user,
repo=repository,
host=host,
protocol=protocol),
destination))
if not dry_run:
GitManager.clone_repo(user=user,
repository=repository,
destination=destination,
protocol=protocol,
host=host)
def main():
args = parse_args(required_root_path=True)
logger, settings = generic_setup(args['root_path'])
clone_repos(logger, settings, dry_run=args['dry_run'])
if __name__ == '__main__':
main()
| xyder/dotfiles | utils/installer/tools/install_repos.py | Python | mit | 1,106 |
import numpy
from palm.util import n_choose_k
from palm.route_collection import RouteCollectionFactory
class Route(object):
'''
A generic route class for AggregatedKineticModels. Routes represent
transitions between states in such models.
Parameters
----------
id_str : string
Identifier string for this route.
start_state_id, end_state_id : string
The identifier strings for the states that are connected by this route.
rate_id : string
A string that denotes the rate law that governs this route.
multiplicity : int
A combinatoric factor for this route, which is determined by
the number of fluorophores in the initial microstate of
the transition.
'''
def __init__(self, id_str, start_state_id, end_state_id, rate_id,
multiplicity):
self.id = id_str
self.start_state_id = start_state_id
self.end_state_id = end_state_id
self.rate_id = rate_id
self.multiplicity = multiplicity
def __str__(self):
my_str = "%s %s %s %s %d" % (
self.id, self.start_state_id, self.end_state_id,
self.rate_id, self.multiplicity)
return my_str
def get_id(self):
return self.id
def get_start_state(self):
return self.start_state_id
def get_end_state(self):
return self.end_state_id
def get_multiplicity(self):
return self.multiplicity
def as_dict(self):
return {'start_state':self.start_state_id,
'end_state':self.end_state_id,
'rate_id':self.rate_id,
'multiplicity':self.multiplicity}
class SingleDarkRouteMapperFactory(object):
"""
This factory class creates a route mapper for
a blink model with one dark state.
Attributes
----------
transition_factory : class
A factory class for transitions, which are simply used as
helper objects when enumerating all of the routes.
Parameters
----------
parameter_set : ParameterSet
route_factory : class, optional
A class for making Route objects.
max_A : int, optional
Number of fluorophores that can be simultaneously active.
"""
def __init__(self, parameter_set, route_factory=Route, max_A=5):
super(SingleDarkRouteMapperFactory, self).__init__()
self.parameter_set = parameter_set
self.route_factory = route_factory
self.max_A = max_A
self.transition_factory = SingleDarkTransition
def create_route_mapper(self):
"""
Creates a method that builds a RouteCollection, made up of
all possible routes in the model.
Returns
-------
map_routes : callable f(state_collection)
A method that builds a RouteCollection.
"""
activation = self.transition_factory(
-1, 1, 0, 0, {'I':1}, 'ka')
blinking = self.transition_factory(
0, -1, 1, 0, {'A':1}, 'kd')
recovery = self.transition_factory(
0, 1, -1, 0, {'D':1}, 'kr')
bleaching = self.transition_factory(
0, -1, 0, 1, {'A':1}, 'kb')
allowed_transitions_list = [activation, blinking, recovery, bleaching]
def map_routes(state_collection):
"""
Build a route collection, based on the states in `state_collection`.
Parameters
----------
state_collection : StateCollection
States for a model with one dark state.
Returns
-------
route_collection : RouteCollection
"""
rc_factory = RouteCollectionFactory()
for start_id, start_state in state_collection.iter_states():
route_iterator = self._enumerate_allowed_transitions(
start_state, allowed_transitions_list)
for end_id, transition in route_iterator:
rate_id = transition.rate_id
multiplicity = transition.compute_multiplicity(start_state)
route_id = "%s__%s" % (start_id, end_id)
new_route = self.route_factory(route_id, start_id, end_id,
rate_id, multiplicity)
rc_factory.add_route(new_route)
route_collection = rc_factory.make_route_collection()
return route_collection
return map_routes
def _enumerate_allowed_transitions(self, start_state,
allowed_transitions_list):
"""
Iterate through possible transitions and filter out those
that lead to invalid states.
Parameters
----------
start_state : State
Enumerate transitions that lead from this state to other states.
allowed_transitions_list : list
Possible transitions from `start_state` to other states.
Returns
-------
end_id : string
Transition will lead from `start_state` to the state with this id.
transition : SingleDarkTransition
"""
for transition in allowed_transitions_list:
I2 = start_state['I'] + transition.get_dPop('I')
A2 = start_state['A'] + transition.get_dPop('A')
D2 = start_state['D'] + transition.get_dPop('D')
B2 = start_state['B'] + transition.get_dPop('B')
end_state_array = numpy.array([I2, A2, D2, B2])
no_negative_pop = len(numpy.where(end_state_array < 0)[0]) == 0
if A2 <= self.max_A and no_negative_pop and\
transition.is_allowed(start_state):
end_id = "%d_%d_%d_%d" % (I2, A2, D2, B2)
yield end_id, transition
class SingleDarkTransition(object):
"""
A helper class for SingleDarkRouteMapperFactory. Represents information
about a transition between two states.
Attributes
----------
dPop_dict : dict
The change in populations of microstates for this transition.
Parameters
----------
dI, dA, dD, dB : int
Changes in microstate populations. The little 'd' here means 'delta'.
reacting_species_dict : dict
The stoichiometry of the reactants for this transition.
If the transition is I goes to A. Then `reacting_species_dict` will
be a dictionary like this {'I':1}.
rate_id : string
A string that denotes the rate law that governs this transition.
"""
def __init__(self, dI, dA, dD, dB, reacting_species_dict, rate_id):
self.dPop_dict = {'I':dI, 'A':dA, 'D':dD, 'B':dB}
self.reacting_species_dict = reacting_species_dict
self.rate_id = rate_id
def __str__(self):
return "%s %d_%d_%d_%d" % (self.label,
self.dPop_dict['I'],
self.dPop_dict['A'],
self.dPop_dict['D'],
self.dPop_dict['B'])
def get_dPop(self, species_label):
return self.dPop_dict[species_label]
def is_allowed(self, state):
"""
Determine whether state can undergo this transition,
based on whether the state has the requisite reactants
to undergo the transition.
Parameters
----------
state : State
Starting state for the transition.
Returns
-------
is_transition_allowed : bool
"""
is_transition_allowed = True
for rs in self.reacting_species_dict.iterkeys():
num_reactants = self.reacting_species_dict[rs]
if rs == 'I':
species_starting_pop = state['I']
elif rs == 'A':
species_starting_pop = state['A']
elif rs == 'D':
species_starting_pop = state['D']
elif rs == 'B':
species_starting_pop = state['B']
if species_starting_pop < num_reactants:
# we need at least num_reactants for the transition
is_transition_allowed = False
break
return is_transition_allowed
def compute_multiplicity(self, start_state):
return 10**self.compute_log_combinatoric_factor(start_state)
def compute_log_combinatoric_factor(self, start_state):
"""
Compute combinatoric factor for this transition,
which is based on the population of the reactant
species (microstate) and the stoichiometry of
the transition.
Parameters
----------
state_state : State
Returns
-------
log_combinatoric_factor : float
Log base 10 combinatoric factor.
"""
# reacting_species_id = I, A, D, or B
reacting_species_id = self.reacting_species_dict.keys()[0]
n = start_state[reacting_species_id]
k = abs(self.reacting_species_dict[reacting_species_id])
combinatoric_factor = n_choose_k(n,k)
log_combinatoric_factor = numpy.log10(combinatoric_factor)
return log_combinatoric_factor
class DoubleDarkRouteMapperFactory(object):
"""
This factory class creates a route mapper for
a blink model with two dark states.
Attributes
----------
transition_factory : class
A factory class for transitions, which are simply used as
helper objects when enumerating all of the routes.
Parameters
----------
parameter_set : ParameterSet
route_factory : class, optional
A class for making Route objects.
max_A : int, optional
Number of fluorophores that can be simultaneously active.
"""
def __init__(self, parameter_set, route_factory=Route, max_A=5):
super(DoubleDarkRouteMapperFactory, self).__init__()
self.parameter_set = parameter_set
self.route_factory = route_factory
self.max_A = max_A
self.transition_factory = DoubleDarkTransition
def create_route_mapper(self):
"""
Creates a method that builds a RouteCollection, made up of
all possible routes in the model.
Returns
-------
map_routes : callable f(state_collection)
A method that builds a RouteCollection.
"""
activation = self.transition_factory(
-1, 1, 0, 0, 0, {'I':1}, 'ka')
blinking1 = self.transition_factory(
0, -1, 1, 0, 0, {'A':1}, 'kd1')
recovery1 = self.transition_factory(
0, 1, -1, 0, 0, {'D1':1}, 'kr1')
blinking2 = self.transition_factory(
0, -1, 0, 1, 0, {'A':1}, 'kd2')
recovery2 = self.transition_factory(
0, 1, 0, -1, 0, {'D2':1}, 'kr2')
bleaching = self.transition_factory(
0, -1, 0, 0, 1, {'A':1}, 'kb')
allowed_transitions_list = [activation, blinking1, recovery1,
blinking2, recovery2, bleaching]
def map_routes(state_collection):
"""
Build a route collection, based on the states in `state_collection`.
Parameters
----------
state_collection : StateCollection
States for a model with two dark states.
Returns
-------
route_collection : RouteCollection
"""
rc_factory = RouteCollectionFactory()
for start_id, start_state in state_collection.iter_states():
route_iterator = self._enumerate_allowed_transitions(
start_state, allowed_transitions_list)
for end_id, transition in route_iterator:
rate_id = transition.rate_id
multiplicity = transition.compute_multiplicity(start_state)
route_id = "%s__%s" % (start_id, end_id)
new_route = self.route_factory(route_id, start_id, end_id,
rate_id, multiplicity)
rc_factory.add_route(new_route)
route_collection = rc_factory.make_route_collection()
return route_collection
return map_routes
def _enumerate_allowed_transitions(self, start_state,
allowed_transitions_list):
"""
Iterate through possible transitions and filter out those
that lead to invalid states.
Parameters
----------
start_state : State
Enumerate transitions that lead from this state to other states.
allowed_transitions_list : list
Possible transitions from `start_state` to other states.
Returns
-------
end_id : string
Transition will lead from `start_state` to the state with this id.
transition : SingleDarkTransition
"""
for transition in allowed_transitions_list:
end_I = start_state['I'] + transition.get_dPop('I')
end_A = start_state['A'] + transition.get_dPop('A')
end_D1 = start_state['D1'] + transition.get_dPop('D1')
end_D2 = start_state['D2'] + transition.get_dPop('D2')
end_B = start_state['B'] + transition.get_dPop('B')
end_state_array = numpy.array([end_I, end_A, end_D1, end_D2, end_B])
no_negative_pop = len(numpy.where(end_state_array < 0)[0]) == 0
if end_A <= self.max_A and no_negative_pop and\
transition.is_allowed(start_state):
end_id = "%d_%d_%d_%d_%d" % (end_I, end_A, end_D1, end_D2, end_B)
yield end_id, transition
class DoubleDarkTransition(object):
"""
A helper class for DoubleDarkRouteMapperFactory. Represents information
about a transition between two states.
Attributes
----------
dPop_dict : dict
The change in populations of microstates for this transition.
Parameters
----------
dI, dA, dD1, dD2, dB : int
Changes in microstate populations. The little 'd' here means 'delta'.
reacting_species_dict : dict
The stoichiometry of the reactants for this transition.
If the transition is I goes to A. Then `reacting_species_dict` will
be a dictionary like this {'I':1}.
rate_id : string
A string that denotes the rate law that governs this route.
"""
def __init__(self, dI, dA, dD1, dD2, dB, reacting_species_dict, rate_id):
self.dPop_dict = {'I':dI, 'A':dA, 'D1':dD1, 'D2':dD2, 'B':dB}
self.reacting_species_dict = reacting_species_dict
self.rate_id = rate_id
def __str__(self):
return "%s %d_%d_%d_%d_%d" % (self.label,
self.dPop_dict['I'],
self.dPop_dict['A'],
self.dPop_dict['D1'],
self.dPop_dict['D2'],
self.dPop_dict['B'])
def get_dPop(self, species_label):
return self.dPop_dict[species_label]
def is_allowed(self, start_state):
"""
Determine whether state can undergo this transition,
based on whether the state has the requisite reactants
to undergo the transition.
Parameters
----------
state : State
Starting state for the transition.
Returns
-------
is_transition_allowed : bool
"""
return_value = True
for rs in self.reacting_species_dict.iterkeys():
num_reactants = self.reacting_species_dict[rs]
if rs == 'I':
species_starting_pop = start_state['I']
elif rs == 'A':
species_starting_pop = start_state['A']
elif rs == 'D1':
species_starting_pop = start_state['D1']
elif rs == 'D2':
species_starting_pop = start_state['D2']
elif rs == 'B':
species_starting_pop = start_state['B']
if species_starting_pop < num_reactants:
# we need at least num_reactants for the transition
return_value = False
break
return return_value
def compute_multiplicity(self, start_state):
return 10**self.compute_log_combinatoric_factor(start_state)
def compute_log_combinatoric_factor(self, start_state):
"""
Compute combinatoric factor for this transition,
which is based on the population of the reactant
species (microstate) and the stoichiometry of
the transition.
Parameters
----------
state_state : State
Returns
-------
log_combinatoric_factor : float
Log base 10 combinatoric factor.
"""
# reacting_species_id = I, A, D1, D2, or B
reacting_species_id = self.reacting_species_dict.keys()[0]
n = start_state[reacting_species_id]
k = abs(self.reacting_species_dict[reacting_species_id])
combinatoric_factor = n_choose_k(n,k)
return numpy.log10(combinatoric_factor)
class ConnectedDarkRouteMapperFactory(object):
"""
This factory class creates a route mapper for
a blink model with two, connected dark states.
Attributes
----------
transition_factory : class
A factory class for transitions, which are simply used as
helper objects when enumerating all of the routes.
Parameters
----------
parameter_set : ParameterSet
route_factory : class, optional
A class for making Route objects.
max_A : int, optional
Number of fluorophores that can be simultaneously active.
"""
def __init__(self, parameter_set, route_factory=Route, max_A=5):
super(ConnectedDarkRouteMapperFactory, self).__init__()
self.parameter_set = parameter_set
self.route_factory = route_factory
self.max_A = max_A
self.transition_factory = DoubleDarkTransition
def create_route_mapper(self):
"""
Creates a method that builds a RouteCollection, made up of
all possible routes in the model.
Returns
-------
map_routes : callable f(state_collection)
A method that builds a RouteCollection.
"""
activation = self.transition_factory(
-1, 1, 0, 0, 0, {'I':1}, 'ka')
blinking1 = self.transition_factory(
0, -1, 1, 0, 0, {'A':1}, 'kd1')
recovery1 = self.transition_factory(
0, 1, -1, 0, 0, {'D1':1}, 'kr1')
blinking2 = self.transition_factory(
0, 0, -1, 1, 0, {'D1':1}, 'kd2')
recovery2 = self.transition_factory(
0, 0, 1, -1, 0, {'D2':1}, 'kr2b')
bleaching = self.transition_factory(
0, -1, 0, 0, 1, {'A':1}, 'kb')
allowed_transitions_list = [activation, blinking1, recovery1,
blinking2, recovery2, bleaching]
def map_routes(state_collection):
"""
Build a route collection, based on the states in `state_collection`.
Parameters
----------
state_collection : StateCollection
States for a model with two dark states.
Returns
-------
route_collection : RouteCollection
"""
rc_factory = RouteCollectionFactory()
for start_id, start_state in state_collection.iter_states():
route_iterator = self._enumerate_allowed_transitions(
start_state, allowed_transitions_list)
for end_id, transition in route_iterator:
rate_id = transition.rate_id
multiplicity = transition.compute_multiplicity(start_state)
route_id = "%s__%s" % (start_id, end_id)
new_route = self.route_factory(route_id, start_id, end_id,
rate_id, multiplicity)
rc_factory.add_route(new_route)
route_collection = rc_factory.make_route_collection()
return route_collection
return map_routes
def _enumerate_allowed_transitions(self, start_state,
allowed_transitions_list):
"""
Iterate through possible transitions and filter out those
that lead to invalid states.
Parameters
----------
start_state : State
Enumerate transitions that lead from this state to other states.
allowed_transitions_list : list
Possible transitions from `start_state` to other states.
Returns
-------
end_id : string
Transition will lead from `start_state` to the state with this id.
transition : SingleDarkTransition
"""
for transition in allowed_transitions_list:
end_I = start_state['I'] + transition.get_dPop('I')
end_A = start_state['A'] + transition.get_dPop('A')
end_D1 = start_state['D1'] + transition.get_dPop('D1')
end_D2 = start_state['D2'] + transition.get_dPop('D2')
end_B = start_state['B'] + transition.get_dPop('B')
end_state_array = numpy.array([end_I, end_A, end_D1, end_D2, end_B])
no_negative_pop = len(numpy.where(end_state_array < 0)[0]) == 0
if end_A <= self.max_A and no_negative_pop and\
transition.is_allowed(start_state):
end_id = "%d_%d_%d_%d_%d" % (end_I, end_A, end_D1, end_D2, end_B)
yield end_id, transition
| milapour/palm | palm/blink_route_mapper.py | Python | bsd-2-clause | 22,106 |
#Copyright ReportLab Europe Ltd. 2000-2016
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/xpreformatted.py
__version__='3.3.0'
__doc__='''A 'rich preformatted text' widget allowing internal markup'''
from reportlab.lib import PyFontify
from reportlab.platypus.paragraph import Paragraph, cleanBlockQuotedText, _handleBulletWidth, \
ParaLines, _getFragWords, stringWidth, getAscentDescent, imgVRange, imgNormV
from reportlab.lib.utils import isSeq
from reportlab.platypus.flowables import _dedenter
def _getFragLines(frags):
lines = []
cline = []
W = frags[:]
while W != []:
w = W[0]
t = w.text
del W[0]
i = t.find('\n')
if i>=0:
tleft = t[i+1:]
cline.append(w.clone(text=t[:i]))
lines.append(cline)
cline = []
if tleft!='':
W.insert(0,w.clone(text=tleft))
else:
cline.append(w)
if cline!=[]:
lines.append(cline)
return lines
def _split_blPara(blPara,start,stop):
f = blPara.clone()
for a in ('lines', 'text'):
if hasattr(f,a): delattr(f,a)
f.lines = blPara.lines[start:stop]
return [f]
# Will be removed shortly.
def _countSpaces(text):
return text.count(' ')
## i = 0
## s = 0
## while 1:
## j = text.find(' ',i)
## if j<0: return s
## s = s + 1
## i = j + 1
def _getFragWord(frags,maxWidth):
''' given a fragment list return a list of lists
[size, spaces, (f00,w00), ..., (f0n,w0n)]
each pair f,w represents a style and some string
'''
W = []
n = 0
s = 0
for f in frags:
text = f.text[:]
W.append((f,text))
cb = getattr(f,'cbDefn',None)
if cb:
_w = getattr(cb,'width',0)
if hasattr(_w,'normalizedValue'):
_w._normalizer = maxWidth
n = n + stringWidth(text, f.fontName, f.fontSize)
#s = s + _countSpaces(text)
s = s + text.count(' ') # much faster for many blanks
#del f.text # we can't do this until we sort out splitting
# of paragraphs
return n, s, W
class XPreformatted(Paragraph):
def __init__(self, text, style, bulletText = None, frags=None, caseSensitive=1, dedent=0):
self.caseSensitive = caseSensitive
cleaner = lambda text, dedent=dedent: '\n'.join(_dedenter(text or '',dedent))
self._setup(text, style, bulletText, frags, cleaner)
def breakLines(self, width):
"""
Returns a broken line structure. There are two cases
A) For the simple case of a single formatting input fragment the output is
A fragment specifier with
- kind = 0
- fontName, fontSize, leading, textColor
- lines= A list of lines
Each line has two items:
1. unused width in points
2. a list of words
B) When there is more than one input formatting fragment the out put is
A fragment specifier with
- kind = 1
- lines = A list of fragments each having fields:
- extraspace (needed for justified)
- fontSize
- words=word list
- each word is itself a fragment with
- various settings
This structure can be used to easily draw paragraphs with the various alignments.
You can supply either a single width or a list of widths; the latter will have its
last item repeated until necessary. A 2-element list is useful when there is a
different first line indent; a longer list could be created to facilitate custom wraps
around irregular objects."""
if not isSeq(width): maxWidths = [width]
else: maxWidths = width
lines = []
lineno = 0
maxWidth = maxWidths[lineno]
style = self.style
fFontSize = float(style.fontSize)
requiredWidth = 0
#for bullets, work out width and ensure we wrap the right amount onto line one
_handleBulletWidth(self.bulletText,style,maxWidths)
self.height = 0
autoLeading = getattr(self,'autoLeading',getattr(style,'autoLeading',''))
calcBounds = autoLeading not in ('','off')
frags = self.frags
nFrags= len(frags)
if nFrags==1:
f = frags[0]
if hasattr(f,'text'):
fontSize = f.fontSize
fontName = f.fontName
ascent, descent = getAscentDescent(fontName,fontSize)
kind = 0
L=f.text.split('\n')
for l in L:
currentWidth = stringWidth(l,fontName,fontSize)
requiredWidth = max(currentWidth,requiredWidth)
extraSpace = maxWidth-currentWidth
lines.append((extraSpace,l.split(' '),currentWidth))
lineno = lineno+1
maxWidth = lineno<len(maxWidths) and maxWidths[lineno] or maxWidths[-1]
blPara = f.clone(kind=kind, lines=lines,ascent=ascent,descent=descent,fontSize=fontSize)
else:
kind = f.kind
lines = f.lines
for L in lines:
if kind==0:
currentWidth = L[2]
else:
currentWidth = L.currentWidth
requiredWidth = max(currentWidth,requiredWidth)
blPara = f.clone(kind=kind, lines=lines)
self.width = max(self.width,requiredWidth)
return blPara
elif nFrags<=0:
return ParaLines(kind=0, fontSize=style.fontSize, fontName=style.fontName,
textColor=style.textColor, ascent=style.fontSize,descent=-0.2*style.fontSize,
lines=[])
else:
for L in _getFragLines(frags):
currentWidth, n, w = _getFragWord(L,maxWidth)
f = w[0][0]
maxSize = f.fontSize
maxAscent, minDescent = getAscentDescent(f.fontName,maxSize)
words = [f.clone()]
words[-1].text = w[0][1]
for i in w[1:]:
f = i[0].clone()
f.text=i[1]
words.append(f)
fontSize = f.fontSize
fontName = f.fontName
if calcBounds:
cbDefn = getattr(f,'cbDefn',None)
if getattr(cbDefn,'width',0):
descent,ascent = imgVRange(imgNormV(cbDefn.height,fontSize),cbDefn.valign,fontSize)
else:
ascent, descent = getAscentDescent(fontName,fontSize)
else:
ascent, descent = getAscentDescent(fontName,fontSize)
maxSize = max(maxSize,fontSize)
maxAscent = max(maxAscent,ascent)
minDescent = min(minDescent,descent)
lineno += 1
maxWidth = lineno<len(maxWidths) and maxWidths[lineno] or maxWidths[-1]
requiredWidth = max(currentWidth,requiredWidth)
extraSpace = maxWidth - currentWidth
lines.append(ParaLines(extraSpace=extraSpace,wordCount=n, words=words, fontSize=maxSize, ascent=maxAscent,descent=minDescent,currentWidth=currentWidth))
self.width = max(self.width,requiredWidth)
return ParaLines(kind=1, lines=lines)
return lines
breakLinesCJK = breakLines #TODO fixme fixme fixme
# we need this her to get the right splitter
def _get_split_blParaFunc(self):
return _split_blPara
class PythonPreformatted(XPreformatted):
"""Used for syntax-colored Python code, otherwise like XPreformatted.
"""
formats = {
'rest' : ('', ''),
'comment' : ('<font color="green">', '</font>'),
'keyword' : ('<font color="blue"><b>', '</b></font>'),
'parameter' : ('<font color="black">', '</font>'),
'identifier' : ('<font color="red">', '</font>'),
'string' : ('<font color="gray">', '</font>') }
def __init__(self, text, style, bulletText = None, dedent=0, frags=None):
if text:
text = self.fontify(self.escapeHtml(text))
XPreformatted.__init__(self, text, style,bulletText=bulletText,dedent=dedent,frags=frags)
def escapeHtml(self, text):
s = text.replace('&', '&')
s = s.replace('<', '<')
s = s.replace('>', '>')
return s
def fontify(self, code):
"Return a fontified version of some Python code."
if code[0] == '\n':
code = code[1:]
tags = PyFontify.fontify(code)
fontifiedCode = ''
pos = 0
for k, i, j, dummy in tags:
fontifiedCode = fontifiedCode + code[pos:i]
s, e = self.formats[k]
fontifiedCode = fontifiedCode + s + code[i:j] + e
pos = j
fontifiedCode = fontifiedCode + code[pos:]
return fontifiedCode
if __name__=='__main__': #NORUNTESTS
import sys
def dumpXPreformattedLines(P):
print('\n############dumpXPreforemattedLines(%s)' % str(P))
lines = P.blPara.lines
n =len(lines)
outw=sys.stdout.write
for l in range(n):
line = lines[l]
words = line.words
nwords = len(words)
outw('line%d: %d(%d)\n ' % (l,nwords,line.wordCount))
for w in range(nwords):
outw(" %d:'%s'"%(w,words[w].text))
print()
def dumpXPreformattedFrags(P):
print('\n############dumpXPreforemattedFrags(%s)' % str(P))
frags = P.frags
n =len(frags)
for l in range(n):
print("frag%d: '%s'" % (l, frags[l].text))
outw=sys.stdout.write
l = 0
for L in _getFragLines(frags):
n=0
for W in _getFragWords(L,360):
outw("frag%d.%d: size=%d" % (l, n, W[0]))
n = n + 1
for w in W[1:]:
outw(" '%s'" % w[1])
print()
l = l + 1
def try_it(text,style,dedent,aW,aH):
P=XPreformatted(text,style,dedent=dedent)
dumpXPreformattedFrags(P)
w,h = P.wrap(aW, aH)
dumpXPreformattedLines(P)
S = P.split(aW,aH)
dumpXPreformattedLines(P)
for s in S:
s.wrap(aW,aH)
dumpXPreformattedLines(s)
aH = 500
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
styleSheet = getSampleStyleSheet()
B = styleSheet['BodyText']
DTstyle = ParagraphStyle("discussiontext", parent=B)
DTstyle.fontName= 'Helvetica'
for (text,dedent,style, aW, aH, active) in [('''
The <font name=courier color=green>CMYK</font> or subtractive
method follows the way a printer
mixes three pigments (cyan, magenta, and yellow) to form colors.
Because mixing chemicals is more difficult than combining light there
is a fourth parameter for darkness. For example a chemical
combination of the <font name=courier color=green>CMY</font> pigments generally never makes a perfect
black -- instead producing a muddy color -- so, to get black printers
don't use the <font name=courier color=green>CMY</font> pigments but use a direct black ink. Because
<font name=courier color=green>CMYK</font> maps more directly to the way printer hardware works it may
be the case that &| & | colors specified in <font name=courier color=green>CMYK</font> will provide better fidelity
and better control when printed.
''',0,DTstyle, 456.0, 42.8, 0),
('''
This is a non rearranging form of the <b>Paragraph</b> class;
<b><font color=red>XML</font></b> tags are allowed in <i>text</i> and have the same
meanings as for the <b>Paragraph</b> class.
As for <b>Preformatted</b>, if dedent is non zero <font color=red size=+1>dedent</font>
common leading spaces will be removed from the
front of each line.
''',3, DTstyle, 456.0, 42.8, 0),
("""\
<font color=blue>class </font><font color=red>FastXMLParser</font>:
# Nonsense method
def nonsense(self):
self.foo = 'bar'
""",0, styleSheet['Code'], 456.0, 4.8, 1),
]:
if active: try_it(text,style,dedent,aW,aH)
| EduPepperPDTesting/pepper2013-testing | lms/djangoapps/reportlab/platypus/xpreformatted.py | Python | agpl-3.0 | 13,022 |
from django import shortcuts
def render( request, template, ** kwargs ):
kwargs[ 'request' ] = request
return shortcuts.render( request, template, kwargs )
| shlomimatichin/workflow | workflow/render.py | Python | gpl-3.0 | 159 |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import gto
from pyscf.data import nist
from pyscf.prop.rotational_gtensor import rhf
class KnowValues(unittest.TestCase):
def test_nuc_contribution(self):
mol = gto.M(atom='''H , 0. 0. 0.
F , 0. 0. 0.917
''')
nuc = rhf.nuc(mol)
self.assertAlmostEqual(nuc[0,0], 0.972976229429035, 9)
mol = gto.M(atom='''C , 0. 0. 0.
O , 0. 0. 1.1283
''')
nuc = rhf.nuc(mol)
self.assertAlmostEqual(nuc[0,0], 0.503388273805359, 9)
if __name__ == "__main__":
print("Full Tests of RHF rotational g-tensor")
unittest.main()
| gkc1000/pyscf | pyscf/prop/rotational_gtensor/test/test_rhf.py | Python | apache-2.0 | 1,370 |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 12 07:50:10 2017
@author: Mok Jun Neng
"""
#
# Hello World client in Python
# Connects REQ socket to tcp://localhost:5555
# Sends "Hello" to server, expects "World" back
#
import zmq
context = zmq.Context()
a = 1.23
# Socket to talk to server
print("Connecting to hello world server…")
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5555")
# Do 10 requests, waiting each time for a response
for request in range(10):
print("Sending request %s …" % request)
socket.send(b'%s'%(str(a)))
# Get the reply.
message = socket.recv()
print type(message)
print("Received reply %s [ %s ]" % (request, message)) | tgymartin/green-fingers-2d | client.py | Python | gpl-3.0 | 706 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.