code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
#
# junitxml: extensions to Python unittest to get output junitxml
# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
#
# Copying permitted under the LGPL-3 licence, included with this library.
"""unittest compatible JUnit XML output."""
import datetime
import re
import time
import unittest
# same format as sys.version_info: "A tuple containing the five components of
# the version number: major, minor, micro, releaselevel, and serial. All
# values except releaselevel are integers; the release level is 'alpha',
# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
# releaselevel of 'dev' for unreleased under-development code.
#
# If the releaselevel is 'alpha' then the major/minor/micro components are not
# established at this point, and setup.py will use a version of next-$(revno).
# If the releaselevel is 'final', then the tarball will be major.minor.micro.
# Otherwise it is major.minor.micro~$(revno).
__version__ = (0, 7, 0, 'alpha', 0)
def test_suite():
import junitxml.tests
return junitxml.tests.test_suite()
class LocalTimezone(datetime.tzinfo):
def __init__(self):
self._offset = None
# It seems that the minimal possible implementation is to just return all
# None for every function, but then it breaks...
def utcoffset(self, dt):
if self._offset is None:
t = 1260423030 # arbitrary, but doesn't handle dst very well
dt = datetime.datetime
self._offset = (dt.fromtimestamp(t) - dt.utcfromtimestamp(t))
return self._offset
def dst(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return None
def _error_name(eclass):
module = eclass.__module__
if module not in ("__main__", "builtins", "exceptions"):
return ".".join([module, eclass.__name__])
return eclass.__name__
_non_cdata = "[\0-\b\x0B-\x1F\uD800-\uDFFF\uFFFE\uFFFF]+"
if "\\u" in _non_cdata:
_non_cdata = _non_cdata.decode("unicode-escape")
def _strip_invalid_chars(s, _sub=re.compile(_non_cdata, re.UNICODE).sub):
if not isinstance(s, unicode):
try:
s = s.decode("utf-8")
except UnicodeDecodeError:
s = s.decode("ascii", "replace")
return _sub("", s).encode("utf-8")
else:
def _strip_invalid_chars(s, _sub=re.compile(_non_cdata, re.UNICODE).sub):
return _sub("", s)
def _escape_content(s):
return (_strip_invalid_chars(s)
.replace("&", "&")
.replace("<", "<")
.replace("]]>", "]]>"))
def _escape_attr(s):
return (_strip_invalid_chars(s)
.replace("&", "&")
.replace("<", "<")
.replace("]]>", "]]>")
.replace('"', """)
.replace("\t", "	")
.replace("\n", "
"))
class JUnitXmlResult(unittest.TestResult):
"""A TestResult which outputs JUnit compatible XML."""
def __init__(self, stream):
"""Create a JUnitXmlResult.
:param stream: A stream to write results to. Note that due to the
nature of JUnit XML output, nnothing will be written to the stream
until stopTestRun() is called.
"""
self.__super = super(JUnitXmlResult, self)
self.__super.__init__()
# GZ 2010-09-03: We have a problem if passed a text stream in Python 3
# as really we want to write raw UTF-8 to ensure that
# the encoding is not mangled later
self._stream = stream
self._results = []
self._set_time = None
self._test_start = None
self._run_start = None
self._tz_info = None
def startTestRun(self):
"""Start a test run."""
self._run_start = self._now()
def _get_tzinfo(self):
if self._tz_info is None:
self._tz_info = LocalTimezone()
return self._tz_info
def _now(self):
if self._set_time is not None:
return self._set_time
else:
return datetime.datetime.now(self._get_tzinfo())
def time(self, a_datetime):
self._set_time = a_datetime
if (self._run_start is not None and
self._run_start > a_datetime):
self._run_start = a_datetime
def startTest(self, test):
self.__super.startTest(test)
self._test_start = self._now()
def _duration(self, from_datetime):
try:
delta = self._now() - from_datetime
except TypeError:
n = self._now()
delta = datetime.timedelta(-1)
seconds = delta.days * 3600*24 + delta.seconds
return seconds + 0.000001 * delta.microseconds
def _test_case_string(self, test):
duration = self._duration(self._test_start)
test_id = test.id()
# Split on the last dot not inside a parameter
class_end = test_id.rfind(".", 0, test_id.find("("))
if class_end == -1:
classname, name = "", test_id
else:
classname, name = test_id[:class_end], test_id[class_end+1:]
self._results.append('<testcase classname="%s" name="%s" '
'time="%0.3f"' % (_escape_attr(classname), _escape_attr(name), duration))
def stopTestRun(self):
"""Stop a test run.
This allows JUnitXmlResult to output the XML representation of the test
run.
"""
duration = self._duration(self._run_start)
self._stream.write('<testsuite errors="%d" failures="%d" name="" '
'tests="%d" time="%0.3f">\n' % (len(self.errors),
len(self.failures) + len(getattr(self, "unexpectedSuccesses", ())),
self.testsRun, duration))
self._stream.write(''.join(self._results))
self._stream.write('</testsuite>\n')
def addError(self, test, error):
self.__super.addError(test, error)
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<error type="%s">%s</error>\n</testcase>\n' % (
_escape_attr(_error_name(error[0])),
_escape_content(self._exc_info_to_string(error, test))))
def addFailure(self, test, error):
self.__super.addFailure(test, error)
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<failure type="%s">%s</failure>\n</testcase>\n' %
(_escape_attr(_error_name(error[0])),
_escape_content(self._exc_info_to_string(error, test))))
def addSuccess(self, test):
self.__super.addSuccess(test)
self._test_case_string(test)
self._results.append('/>\n')
def addSkip(self, test, reason):
try:
self.__super.addSkip(test, reason)
except AttributeError:
# Python < 2.7|3.1
pass
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<skip>%s</skip>\n</testcase>\n'% _escape_attr(reason))
def addUnexpectedSuccess(self, test):
try:
self.__super.addUnexpectedSuccess(test)
except AttributeError:
# Python < 2.7|3.1
pass
self._test_case_string(test)
self._results.append('>\n')
self._results.append('<failure type="unittest.case._UnexpectedSuccess"/>\n</testcase>\n')
def addExpectedFailure(self, test, error):
try:
self.__super.addExpectedFailure(test, error)
except AttributeError:
# Python < 2.7|3.1
pass
self._test_case_string(test)
self._results.append('/>\n')
| kraziegent/mysql-5.6 | xtrabackup/test/python/junitxml/__init__.py | Python | gpl-2.0 | 7,719 |
#!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies libraries (in identical-names) are properly handeled by xcode.
The names for all libraries participating in this build are:
libtestlib.a - identical-name/testlib
libtestlib.a - identical-name/proxy/testlib
libproxy.a - identical-name/proxy
The first two libs produce a hash collision in Xcode when Gyp is executed,
because they have the same name and would be copied to the same directory with
Xcode default settings.
For this scenario to work one needs to change the Xcode variables SYMROOT and
CONFIGURATION_BUILD_DIR. Setting these to per-lib-unique directories, avoids
copying the libs into the same directory.
The test consists of two steps. The first one verifies that by setting both
vars, there is no hash collision anymore during Gyp execution and that the libs
can actually be be built. The second one verifies that there is still a hash
collision if the vars are not set and thus the current behavior is preserved.
"""
import TestGyp
import sys
def IgnoreOutput(string, expected_string):
return True
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode'])
test.run_gyp('test.gyp', chdir='identical-name')
test.build('test.gyp', test.ALL, chdir='identical-name')
test.run_gyp('test-should-fail.gyp', chdir='identical-name')
test.built_file_must_not_exist('test-should-fail.xcodeproj')
test.pass_test()
| Jet-Streaming/gyp | test/mac/gyptest-identical-name.py | Python | bsd-3-clause | 1,592 |
# flake8: noqa
from .base import Settings, Configuration
from .decorators import pristinemethod
__version__ = '0.8'
__all__ = ['Configuration', 'pristinemethod', 'Settings']
def _setup():
from . import importer
importer.install()
# django >=1.7
try:
import django
django.setup()
except AttributeError:
pass
def load_ipython_extension(ipython):
"""
The `ipython` argument is the currently active `InteractiveShell`
instance, which can be used in any way. This allows you to register
new magics or aliases, for example.
"""
_setup()
def setup(app=None):
"""
The callback for Sphinx that acts as a Sphinx extension.
Add ``'configurations'`` to the ``extensions`` config variable
in your docs' ``conf.py``.
"""
_setup()
| blindroot/django-configurations | configurations/__init__.py | Python | bsd-3-clause | 818 |
from setuptools import setup
setup(name='staticbs',
version='0.11',
description='A simple 3D electro-magnetostatic biot-savart solving simulator',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Physics',
'Topic :: Scientific/Engineering :: Visualization',
],
url='https://github.com/grungy/staticbs',
author='Josh Marks and Andrea Waite',
author_email='jmarks@udel.edu',
license='MIT',
packages=['staticbs'],
install_requires=['numpy', 'scipy'],
zip_safe=False)
| grungy/staticbs | setup.py | Python | mit | 751 |
from django.conf.urls import patterns, url
urlpatterns = patterns('leonardo.module.media.server.views',
url(r'^(?P<path>.*)$', 'serve_protected_thumbnail',),
)
| django-leonardo/django-leonardo | leonardo/module/media/server/thumbnails_server_urls.py | Python | bsd-3-clause | 208 |
"""
Contains content applicability management classes
"""
from gettext import gettext as _
from logging import getLogger
from celery import task
from pulp.plugins.conduits.profiler import ProfilerConduit
from pulp.plugins.config import PluginCallConfiguration
from pulp.plugins.loader import api as plugin_api, exceptions as plugin_exceptions
from pulp.plugins.profiler import Profiler
from pulp.server.async.tasks import Task
from pulp.server.db.model.consumer import Bind, RepoProfileApplicability, UnitProfile
from pulp.server.db.model.criteria import Criteria
from pulp.server.db.model.repository import Repo
from pulp.server.managers import factory as managers
from pulp.server.managers.consumer.query import ConsumerQueryManager
_logger = getLogger(__name__)
class ApplicabilityRegenerationManager(object):
@staticmethod
def regenerate_applicability_for_consumers(consumer_criteria):
"""
Regenerate and save applicability data for given updated consumers.
:param consumer_criteria: The consumer selection criteria
:type consumer_criteria: dict
"""
consumer_criteria = Criteria.from_dict(consumer_criteria)
consumer_query_manager = managers.consumer_query_manager()
bind_manager = managers.consumer_bind_manager()
consumer_profile_manager = managers.consumer_profile_manager()
# Process consumer_criteria and get all the consumer ids satisfied by the criteria
consumer_criteria.fields = ['id']
consumer_ids = [c['id'] for c in consumer_query_manager.find_by_criteria(consumer_criteria)]
# Following logic of checking existing applicability and getting required data
# to generate applicability is a bit more complicated than what it could be 'by design'.
# It is to optimize the number of db queries and improving applicability generation
# performance. Please consider the implications for applicability generation time
# when making any modifications to this code.
# Get all unit profiles associated with given consumers
unit_profile_criteria = Criteria(
filters={'consumer_id': {'$in': consumer_ids}},
fields=['consumer_id', 'profile_hash', 'content_type', 'id'])
all_unit_profiles = consumer_profile_manager.find_by_criteria(unit_profile_criteria)
# Create a consumer-profile map with consumer id as the key and list of tuples
# with profile details as the value
consumer_unit_profiles_map = {}
# Also create a map of profile_id keyed by profile_hash for profile lookup.
profile_hash_profile_id_map = {}
for unit_profile in all_unit_profiles:
profile_hash = unit_profile['profile_hash']
content_type = unit_profile['content_type']
consumer_id = unit_profile['consumer_id']
profile_id = unit_profile['id']
profile_tuple = (profile_hash, content_type)
# Add this tuple to the list of profile tuples for a consumer
consumer_unit_profiles_map.setdefault(consumer_id, []).append(profile_tuple)
# We need just one profile_id per profile_hash to be used in regenerate_applicability
# method to get the actual profile corresponding to given profile_hash.
if profile_hash not in profile_hash_profile_id_map:
profile_hash_profile_id_map[profile_hash] = profile_id
# Get all repos bound to given consumers
bind_criteria = Criteria(filters={'consumer_id': {'$in': consumer_ids}},
fields=['repo_id', 'consumer_id'])
all_repo_bindings = bind_manager.find_by_criteria(bind_criteria)
# Create a repo-consumer map with repo_id as the key and consumer_id list as the value
repo_consumers_map = {}
for binding in all_repo_bindings:
repo_consumers_map.setdefault(binding['repo_id'], []).append(binding['consumer_id'])
# Create a set of (repo_id, (profile_hash, content_type))
repo_profile_hashes = set()
for repo_id, consumer_id_list in repo_consumers_map.items():
for consumer_id in consumer_id_list:
if consumer_id in consumer_unit_profiles_map:
for unit_profile_tuple in consumer_unit_profiles_map[consumer_id]:
repo_profile_hashes.add((repo_id, unit_profile_tuple))
# Iterate through each tuple in repo_profile_hashes set and regenerate applicability,
# if it doesn't exist. These are all guaranteed to be unique tuples because of the logic
# used to create maps and sets above, eliminating multiple unnecessary queries
# to check for existing applicability for same profiles.
manager = managers.applicability_regeneration_manager()
for repo_id, (profile_hash, content_type) in repo_profile_hashes:
# Check if applicability for given profile_hash and repo_id already exists
if ApplicabilityRegenerationManager._is_existing_applicability(repo_id, profile_hash):
continue
# If applicability does not exist, generate applicability data for given profile
# and repo id.
profile_id = profile_hash_profile_id_map[profile_hash]
manager.regenerate_applicability(profile_hash, content_type, profile_id, repo_id)
@staticmethod
def regenerate_applicability_for_repos(repo_criteria):
"""
Regenerate and save applicability data affected by given updated repositories.
:param repo_criteria: The repo selection criteria
:type repo_criteria: dict
"""
repo_criteria = Criteria.from_dict(repo_criteria)
repo_query_manager = managers.repo_query_manager()
# Process repo criteria
repo_criteria.fields = ['id']
repo_ids = [r['id'] for r in repo_query_manager.find_by_criteria(repo_criteria)]
for repo_id in repo_ids:
# Find all existing applicabilities for given repo_id. Setting batch size of 5 ensures
# the MongoDB cursor does not time out. See https://pulp.plan.io/issues/998#note-6 for
# more details.
existing_applicabilities = RepoProfileApplicability.get_collection().find(
{'repo_id': repo_id}).batch_size(5)
for existing_applicability in existing_applicabilities:
# Convert cursor to RepoProfileApplicability object
existing_applicability = RepoProfileApplicability(**dict(existing_applicability))
profile_hash = existing_applicability['profile_hash']
unit_profile = UnitProfile.get_collection().find_one({'profile_hash': profile_hash},
fields=['id', 'content_type'])
if unit_profile is None:
# Unit profiles change whenever packages are installed or removed on consumers,
# and it is possible that existing_applicability references a UnitProfile
# that no longer exists. This is harmless, as Pulp has a monthly cleanup task
# that will identify these dangling references and remove them.
continue
# Regenerate applicability data for given unit_profile and repo id
ApplicabilityRegenerationManager.regenerate_applicability(
profile_hash, unit_profile['content_type'], unit_profile['id'], repo_id,
existing_applicability)
@staticmethod
def regenerate_applicability(profile_hash, content_type, profile_id,
bound_repo_id, existing_applicability=None):
"""
Regenerate and save applicability data for given profile and bound repo id.
If existing_applicability is not None, replace it with the new applicability data.
:param profile_hash: hash of the unit profile
:type profile_hash: basestring
:param content_type: profile (unit) type ID
:type content_type: str
:param profile_id: unique id of the unit profile
:type profile_id: str
:param bound_repo_id: repo id to be used to calculate applicability
against the given unit profile
:type bound_repo_id: str
:param existing_applicability: existing RepoProfileApplicability object to be replaced
:type existing_applicability: pulp.server.db.model.consumer.RepoProfileApplicability
"""
profiler_conduit = ProfilerConduit()
# Get the profiler for content_type of given unit_profile
profiler, profiler_cfg = ApplicabilityRegenerationManager._profiler(content_type)
# Check if the profiler supports applicability, else return
if profiler.calculate_applicable_units == Profiler.calculate_applicable_units:
# If base class calculate_applicable_units method is called,
# skip applicability regeneration
return
# Find out which content types have unit counts greater than zero in the bound repo
repo_content_types = ApplicabilityRegenerationManager._get_existing_repo_content_types(
bound_repo_id)
# Get the intersection of existing types in the repo and the types that the profiler
# handles. If the intersection is not empty, regenerate applicability
if (set(repo_content_types) & set(profiler.metadata()['types'])):
# Get the actual profile for existing_applicability or lookup using profile_id
if existing_applicability:
profile = existing_applicability.profile
else:
unit_profile = UnitProfile.get_collection().find_one({'id': profile_id},
fields=['profile'])
profile = unit_profile['profile']
call_config = PluginCallConfiguration(plugin_config=profiler_cfg,
repo_plugin_config=None)
try:
applicability = profiler.calculate_applicable_units(profile,
bound_repo_id,
call_config,
profiler_conduit)
except NotImplementedError:
msg = "Profiler for content type [%s] does not support applicability" % content_type
_logger.debug(msg)
return
if existing_applicability:
# Update existing applicability object
existing_applicability.applicability = applicability
existing_applicability.save()
else:
# Create a new RepoProfileApplicability object and save it in the db
RepoProfileApplicability.objects.create(profile_hash,
bound_repo_id,
unit_profile['profile'],
applicability)
@staticmethod
def _get_existing_repo_content_types(repo_id):
"""
For the given repo_id, return a list of content_type_ids that have content units counts
greater than 0.
:param repo_id: The repo_id for the repository that we wish to know the unit types contained
therein
:type repo_id: basestring
:return: A list of content type ids that have unit counts greater than 0
:rtype: list
"""
repo_content_types_with_non_zero_unit_count = []
repo = managers.repo_query_manager().find_by_id(repo_id)
if repo:
for content_type, count in repo['content_unit_counts'].items():
if count > 0:
repo_content_types_with_non_zero_unit_count.append(content_type)
return repo_content_types_with_non_zero_unit_count
@staticmethod
def _is_existing_applicability(repo_id, profile_hash):
"""
Check if applicability for given repo and profle hash is already calculated.
:param repo_id: repo id
:type repo_id: basestring
:param profile_hash: unit profile hash
:type profile_hash: basestring
:return: true if applicability exists, false otherwise
:type: boolean
"""
query_params = {'repo_id': repo_id, 'profile_hash': profile_hash}
if RepoProfileApplicability.get_collection().find_one(query_params, fields=['_id']):
return True
return False
@staticmethod
def _profiler(type_id):
"""
Find the profiler.
Returns the Profiler base class when not matched.
:param type_id: The content type ID.
:type type_id: str
:return: (profiler, cfg)
:rtype: tuple
"""
try:
plugin, cfg = plugin_api.get_profiler_by_type(type_id)
except plugin_exceptions.PluginNotFound:
plugin = Profiler()
cfg = {}
return plugin, cfg
regenerate_applicability_for_consumers = task(
ApplicabilityRegenerationManager.regenerate_applicability_for_consumers, base=Task,
ignore_result=True)
regenerate_applicability_for_repos = task(
ApplicabilityRegenerationManager.regenerate_applicability_for_repos, base=Task,
ignore_result=True)
class DoesNotExist(Exception):
"""
An Exception to be raised when a get() is called on a manager with query parameters that do not
match an object in the database.
"""
pass
class MultipleObjectsReturned(Exception):
"""
An Exception to be raised when a get() is called on a manager that results in more than one
object being returned.
"""
pass
class RepoProfileApplicabilityManager(object):
"""
This class is useful for querying for RepoProfileApplicability objects in the database.
"""
def create(self, profile_hash, repo_id, profile, applicability):
"""
Create and return a RepoProfileApplicability object.
:param profile_hash: The hash of the profile that this object contains applicability data
for
:type profile_hash: basestring
:param repo_id: The repo ID that this applicability data is for
:type repo_id: basestring
:param profile: The entire profile that resulted in the profile_hash
:type profile: object
:param applicability: A dictionary structure mapping unit type IDs to lists of applicable
Unit IDs.
:type applicability: dict
:return: A new RepoProfileApplicability object
:rtype: pulp.server.db.model.consumer.RepoProfileApplicability
"""
applicability = RepoProfileApplicability(
profile_hash=profile_hash, repo_id=repo_id, profile=profile,
applicability=applicability)
applicability.save()
return applicability
def filter(self, query_params):
"""
Get a list of RepoProfileApplicability objects with the given MongoDB query dict.
:param query_params: A MongoDB query dictionary that selects RepoProfileApplicability
documents
:type query_params: dict
:return: A list of RepoProfileApplicability objects that match the given query
:rtype: list
"""
collection = RepoProfileApplicability.get_collection()
mongo_applicabilities = collection.find(query_params)
applicabilities = [RepoProfileApplicability(**dict(applicability))
for applicability in mongo_applicabilities]
return applicabilities
def get(self, query_params):
"""
Get a single RepoProfileApplicability object with the given MongoDB query dict. This
will raise a DoesNotExist if no such object exists. It will also raise
MultipleObjectsReturned if the query_dict was not specific enough to match just one
RepoProfileApplicability object.
:param query_params: A MongoDB query dictionary that selects a single
RepoProfileApplicability document
:type query_params: dict
:return: A RepoProfileApplicability object that matches the given query
:rtype: pulp.server.db.model.consumer.RepoProfileApplicability
"""
applicability = self.filter(query_params)
if not applicability:
raise DoesNotExist(_('The RepoProfileApplicability object does not exist.'))
if len(applicability) > 1:
error_message = _('The given query matched %(num)s documents.')
error_message = error_message % {'num': len(applicability)}
raise MultipleObjectsReturned(error_message)
return applicability[0]
@staticmethod
def remove_orphans():
"""
The RepoProfileApplicability objects can become orphaned over time, as repositories are
deleted, or as consumer profiles change. This method searches for RepoProfileApplicability
objects that reference either repositories or profile hashes that no longer exist in Pulp.
"""
# Find all of the repo_ids that are referenced by RepoProfileApplicability objects
rpa_collection = RepoProfileApplicability.get_collection()
rpa_repo_ids = rpa_collection.distinct('repo_id')
# Find all of the repo_ids that exist in Pulp
repo_ids = Repo.get_collection().distinct('id')
# Find rpa_repo_ids that aren't part of repo_ids
missing_repo_ids = list(set(rpa_repo_ids) - set(repo_ids))
# Remove all RepoProfileApplicability objects that reference these repo_ids
if missing_repo_ids:
rpa_collection.remove({'repo_id': {'$in': missing_repo_ids}})
# Next, we need to find profile_hashes that don't exist in the UnitProfile collection
rpa_profile_hashes = rpa_collection.distinct('profile_hash')
# Find the profile hashes that exist in current UnitProfiles
profile_hashes = UnitProfile.get_collection().distinct('profile_hash')
# Find profile hashes that we have RepoProfileApplicability objects for, but no real
# UnitProfiles
missing_profile_hashes = list(set(rpa_profile_hashes) - set(profile_hashes))
# Remove all RepoProfileApplicability objects that reference these profile hashes
if missing_profile_hashes:
rpa_collection.remove({'profile_hash': {'$in': missing_profile_hashes}})
# Instantiate one of the managers on the object it manages for convenience
RepoProfileApplicability.objects = RepoProfileApplicabilityManager()
def retrieve_consumer_applicability(consumer_criteria, content_types=None):
"""
Query content applicability for consumers matched by a given consumer_criteria, optionally
limiting by content type.
This method returns a list of dictionaries that each have two
keys: 'consumers', and 'applicability'. 'consumers' will index a list of consumer_ids,
for consumers that have the same repository bindings and profiles. 'applicability' will
index a dictionary that will have keys for each content type that is applicable, and the
content type ids will index the applicability data for those content types. For example,
[{'consumers': ['consumer_1', 'consumer_2'],
'applicability': {'content_type_1': ['unit_1', 'unit_3']}},
{'consumers': ['consumer_2', 'consumer_3'],
'applicability': {'content_type_1': ['unit_1', 'unit_2']}}]
:param consumer_ids: A list of consumer ids that the applicability data should be retrieved
against
:type consumer_ids: list
:param content_types: An optional list of content types that the caller wishes to limit the
results to. Defaults to None, which will return data for all types
:type content_types: list
:return: applicability data matching the consumer criteria query
:rtype: list
"""
# We only need the consumer ids
consumer_criteria['fields'] = ['id']
consumer_ids = [c['id'] for c in ConsumerQueryManager.find_by_criteria(consumer_criteria)]
consumer_map = dict([(c, {'profiles': [], 'repo_ids': []}) for c in consumer_ids])
# Fill out the mapping of consumer_ids to profiles, and store the list of profile_hashes
profile_hashes = _add_profiles_to_consumer_map_and_get_hashes(consumer_ids, consumer_map)
# Now add in repo_ids that the consumers are bound to
_add_repo_ids_to_consumer_map(consumer_ids, consumer_map)
# We don't need the list of consumer_ids anymore, so let's free a little RAM
del consumer_ids
# Now lets get all RepoProfileApplicability objects that have the profile hashes for our
# consumers
applicability_map = _get_applicability_map(profile_hashes, content_types)
# We don't need the profile_hashes anymore, so let's free some RAM
del profile_hashes
# Now we need to add consumers who match the applicability data to the applicability_map
_add_consumers_to_applicability_map(consumer_map, applicability_map)
# We don't need the consumer_map anymore, so let's free it up
del consumer_map
# Collate all the entries for the same sets of consumers together
consumer_applicability_map = _get_consumer_applicability_map(applicability_map)
# Free the applicability_map, we don't need it anymore
del applicability_map
# Form the data into the expected output format and return
return _format_report(consumer_applicability_map)
def _add_consumers_to_applicability_map(consumer_map, applicability_map):
"""
For all consumers in the consumer_map, look for their profiles and repos in the
applicability_map, and if found, add the consumer_ids to the applicability_map.
:param consumer_map: A dictionary mapping consumer_ids to dictionaries with keys
'profiles' and 'repo_ids'. 'profiles' indexes a list of profiles
for each consumer_id, and 'repo_ids' indexes a list of repo_ids
that the consumer is bound to.
:type consumer_map: dict
:param applicability_map: The mapping of (profile_hash, repo_id) to applicability_data and
consumer_ids the data applies to. This method appends
consumer_ids to the appropriate lists of consumer_ids
:type applicability_map: dict
"""
for consumer_id, repo_profile_data in consumer_map.items():
for profile in repo_profile_data['profiles']:
for repo_id in repo_profile_data['repo_ids']:
repo_profile = (profile['profile_hash'], repo_id)
# Only add the consumer to the applicability map if there is applicability_data
# for this combination of repository and profile
if repo_profile in applicability_map:
applicability_map[repo_profile]['consumers'].append(consumer_id)
def _add_profiles_to_consumer_map_and_get_hashes(consumer_ids, consumer_map):
"""
Query for all the profiles associated with the given list of consumer_ids, add those
profiles to the consumer_map, and then return a list of all profile_hashes.
:param consumer_ids: A list of consumer_ids that we want to map the profiles to
:type consumer_ids: list
:param consumer_map: A dictionary mapping consumer_ids to a dictionary with key 'profiles',
which indexes a list that this method will append the found profiles
to.
:type consumer_map: dict
:return: A list of the profile_hashes that were associated with the given
consumers
:rtype: list
"""
profiles = UnitProfile.get_collection().find(
{'consumer_id': {'$in': consumer_ids}},
fields=['consumer_id', 'profile_hash'])
profile_hashes = set()
for p in profiles:
consumer_map[p['consumer_id']]['profiles'].append(p)
profile_hashes.add(p['profile_hash'])
# Let's return a list of all the unique profile_hashes for the query we will do a
# bit later for applicability data
return list(profile_hashes)
def _add_repo_ids_to_consumer_map(consumer_ids, consumer_map):
"""
Query for all bindings for the given list of consumer_ids, and for each one add the bound
repo_ids to the consumer_map's entry for the consumer.
:param consumer_ids: The list of consumer_ids. We could pull this from the consumer_map,
but since we already have this list it's probably more performant to
use it as is.
:type consumer_ids: list
:param consumer_map: A dictionary mapping consumer_ids to a dictionary with key 'profiles',
which indexes a list that this method will append the found profiles
to.
:type consumer_map: dict
"""
bindings = Bind.get_collection().find(
{'consumer_id': {'$in': consumer_ids}},
fields=['consumer_id', 'repo_id'])
for b in bindings:
consumer_map[b['consumer_id']]['repo_ids'].append(b['repo_id'])
def _format_report(consumer_applicability_map):
"""
Turn the consumer_applicability_map into the expected response format for this API call.
:param consumer_applicability_map: A mapping of frozensets of consumers to their
applicability data
:type consumer_applicability_map: dict
:return: A list of dictionaries that have two keys, consumers
and applicability. consumers indexes a list of
consumer_ids, and applicability indexes the
applicability data for those consumer_ids.
:rtype: list
"""
report = []
for consumers, applicability in consumer_applicability_map.iteritems():
# If there are no consumers for this applicability data, there is no need to include
# it in the report
if consumers:
applicability_data = {'consumers': list(consumers),
'applicability': applicability}
report.append(applicability_data)
return report
def _get_applicability_map(profile_hashes, content_types):
"""
Build an "applicability_map", which is a dictionary that maps tuples of
(profile_hash, repo_id) to a dictionary of applicability data and consumer_ids. The
consumer_ids are just initialized to an empty list, so that a later method can add
consumers to it. For example, it might look like:
{('profile_hash_1', 'repo_1'): {'applicability': {<applicability_data>}, 'consumers': []}}
:param profile_hashes: A list of profile hashes that the applicabilities should be queried
with. The applicability map is initialized with all applicability
data for all the given profile_hashes.
:type profile_hashes: list
:param content_types: If not None, content_types is a list of content_types to
be included in the applicability data within the
applicability_map
:type content_types: list or None
:return: The applicability map
:rtype: dict
"""
applicabilities = RepoProfileApplicability.get_collection().find(
{'profile_hash': {'$in': profile_hashes}},
fields=['profile_hash', 'repo_id', 'applicability'])
return_value = {}
for a in applicabilities:
if content_types is not None:
# The caller has requested us to filter by content_type, so we need to look through
# the applicability data and filter out the unwanted content types. Some
# applicabilities may end up being empty if they don't have any data for the
# requested types, so we'll build a list of those to remove
for key in a['applicability'].keys():
if key not in content_types:
del a['applicability'][key]
# If a doesn't have anything worth reporting, move on to the next applicability
if not a['applicability']:
continue
return_value[(a['profile_hash'], a['repo_id'])] = {'applicability': a['applicability'],
'consumers': []}
return return_value
def _get_consumer_applicability_map(applicability_map):
"""
Massage the applicability_map into a form that will help us to collate applicability
groups that contain the same data together.
:param applicability_map: The mapping of (profile_hash, repo_id) to applicability_data and
consumer_ids it applies to. This method appends consumer_ids to
the appropriate lists of consumer_ids
:type applicability_map: dict
:return: The consumer_applicability_map, which maps frozensets of
consumer_ids to their collective applicability data.
:rtype: dict
"""
consumer_applicability_map = {}
for repo_profile, data in applicability_map.iteritems():
# This will be the key for our map, a set of the consumers that share data
consumers = frozenset(data['consumers'])
if consumers in consumer_applicability_map:
for content_type, applicability in data['applicability'].iteritems():
if content_type in consumer_applicability_map[consumers]:
# There is already applicability data for this consumer set and
# content type. We will convert the existing data and the new data to
# sets, generate the union of those sets, and turn it back into a list
# so that we can report unique units.
consumer_applicability_map[consumers][content_type] = list(
set(consumer_applicability_map[consumers][content_type]) |
set(applicability))
else:
# This consumer set does not already have applicability data for this type, so
# let's set applicability as the data for it
consumer_applicability_map[consumers][content_type] = applicability
else:
# This consumer set is not already part of the consumer_applicability_map, so we can
# set all the applicability data we have to this consumer set
consumer_applicability_map[consumers] = data['applicability']
return consumer_applicability_map
| credativ/pulp | server/pulp/server/managers/consumer/applicability.py | Python | gpl-2.0 | 31,340 |
#
# Copyright 2008-2017 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
# pylint: disable=no-member
from __future__ import absolute_import
import collections
import os
import xml.etree.ElementTree as ET
from vdsm.common import conv
from vdsm.common import errors
from vdsm.common import exception
from vdsm.config import config
from vdsm import constants
from vdsm import cpuarch
from vdsm import utils
from vdsm.virt import vmtune
from vdsm.virt import vmxml
from . import core
from . import hwclass
from . import lease
DEFAULT_INTERFACE_FOR_ARCH = {
cpuarch.X86_64: 'ide',
cpuarch.PPC64: 'scsi',
cpuarch.PPC64LE: 'scsi'
}
class DISK_TYPE:
BLOCK = "block"
NETWORK = "network"
FILE = "file"
SOURCE_ATTR = {
DISK_TYPE.FILE: 'file',
DISK_TYPE.NETWORK: 'name',
DISK_TYPE.BLOCK: 'dev'
}
class DRIVE_SHARED_TYPE:
NONE = "none"
EXCLUSIVE = "exclusive"
SHARED = "shared"
TRANSIENT = "transient"
@classmethod
def getAllValues(cls):
# TODO: use introspection
return (cls.NONE, cls.EXCLUSIVE, cls.SHARED, cls.TRANSIENT)
class VolumeNotFound(errors.Base):
msg = ("Cannot find volume {self.vol_id} in drive {self.drive_name}'s "
"volume chain")
def __init__(self, drive_name, vol_id):
self.drive_name = drive_name
self.vol_id = vol_id
class InvalidBackingStoreIndex(errors.Base):
msg = ("Backing store for path {self.path} "
"contains invalid index {self.index!r}")
def __init__(self, path, index):
self.path = path
self.index = index
VolumeChainEntry = collections.namedtuple(
'VolumeChainEntry',
['uuid', 'path', 'allocation', 'index'])
class Drive(core.Base):
__slots__ = ('iface', '_path', 'readonly', 'bootOrder', 'domainID',
'poolID', 'imageID', 'UUID', 'volumeID', 'format',
'propagateErrors', 'address', 'apparentsize', 'volumeInfo',
'index', 'name', 'optional', 'shared', 'truesize',
'volumeChain', 'baseVolumeID', 'serial', 'reqsize', 'cache',
'_blockDev', 'extSharedState', 'drv', 'sgio', 'GUID',
'diskReplicate', '_diskType', 'hosts', 'protocol', 'auth',
'discard', 'vm_custom')
VOLWM_CHUNK_SIZE = (config.getint('irs', 'volume_utilization_chunk_mb') *
constants.MEGAB)
VOLWM_FREE_PCT = 100 - config.getint('irs', 'volume_utilization_percent')
VOLWM_CHUNK_REPLICATE_MULT = 2 # Chunk multiplier during replication
# Estimate of the additional space needed for qcow format internal data.
VOLWM_COW_OVERHEAD = 1.1
@classmethod
def update_device_info(cls, vm, device_conf):
# FIXME! We need to gather as much info as possible from the libvirt.
# In the future we can return this real data to management instead of
# vm's conf
for x in vm.domain.get_device_elements('disk'):
alias, devPath, name = _get_drive_identification(x)
readonly = vmxml.find_first(x, 'readonly', None) is not None
bootOrder = vmxml.find_attr(x, 'boot', 'order')
devType = vmxml.attr(x, 'device')
if devType == 'disk':
# raw/qcow2
drv = vmxml.find_attr(x, 'driver', 'type')
else:
drv = 'raw'
# Get disk address
address = vmxml.device_address(x)
# Keep data as dict for easier debugging
deviceDict = {'path': devPath, 'name': name,
'readonly': readonly, 'bootOrder': bootOrder,
'address': address, 'type': devType}
# display indexed pairs of ordered values from 2 dicts
# such as {key_1: (valueA_1, valueB_1), ...}
def mergeDicts(deviceDef, dev):
return dict((k, (deviceDef[k], getattr(dev, k, None)))
for k in deviceDef.iterkeys())
vm.log.debug('Looking for drive with attributes %s', deviceDict)
for d in device_conf:
# When we analyze a disk device that was already discovered in
# the past (generally as soon as the VM is created) we should
# verify that the cached path is the one used in libvirt.
# We already hit few times the problem that after a live
# migration the paths were not in sync anymore (BZ#1059482).
if (hasattr(d, 'alias') and d.alias == alias and
d.path != devPath):
vm.log.warning('updating drive %s path from %s to %s',
d.alias, d.path, devPath)
d.path = devPath
if d.path == devPath:
d.name = name
d.type = devType
d.drv = drv
d.alias = alias
d.address = address
d.readonly = readonly
if bootOrder:
d.bootOrder = bootOrder
vm.log.debug('Matched %s', mergeDicts(deviceDict, d))
# Update vm's conf with address for known disk devices
knownDev = False
for dev in vm.conf['devices']:
# See comment in previous loop. This part is used to update
# the vm configuration as well.
if ('alias' in dev and dev['alias'] == alias and
dev['path'] != devPath):
vm.log.warning('updating drive %s config path from %s '
'to %s', dev['alias'], dev['path'],
devPath)
dev['path'] = devPath
if (dev['type'] == hwclass.DISK and
dev['path'] == devPath):
dev['name'] = name
dev['address'] = address
dev['alias'] = alias
dev['readonly'] = str(readonly)
if bootOrder:
dev['bootOrder'] = bootOrder
vm.log.debug('Matched %s', mergeDicts(deviceDict, dev))
knownDev = True
# Add unknown disk device to vm's conf
if not knownDev:
archIface = DEFAULT_INTERFACE_FOR_ARCH[vm.arch]
iface = archIface if address['type'] == 'drive' else 'pci'
diskDev = {'type': hwclass.DISK, 'device': devType,
'iface': iface, 'path': devPath, 'name': name,
'address': address, 'alias': alias,
'readonly': str(readonly)}
if bootOrder:
diskDev['bootOrder'] = bootOrder
vm.log.warn('Found unknown drive: %s', diskDev)
vm.conf['devices'].append(diskDev)
def __init__(self, log, **kwargs):
if not kwargs.get('serial'):
self.serial = kwargs.get('imageID'[-20:]) or ''
self._path = None
super(Drive, self).__init__(log, **kwargs)
if not hasattr(self, 'vm_custom'):
self.vm_custom = {}
self.device = getattr(self, 'device', 'disk')
# Keep sizes as int
self.reqsize = int(kwargs.get('reqsize', '0')) # Backward compatible
self.truesize = int(kwargs.get('truesize', '0'))
self.apparentsize = int(kwargs.get('apparentsize', '0'))
self.name = makeName(self.iface, self.index)
self.cache = config.get('vars', 'qemu_drive_cache')
self.discard = kwargs.get('discard', False)
self._blockDev = None # Lazy initialized
self._customize()
self._setExtSharedState()
def _setExtSharedState(self):
# We cannot use tobool here as shared can take several values
# (e.g. none, exclusive) that would be all mapped to False.
shared = str(getattr(self, "shared", "false")).lower()
# Backward compatibility with the old values (true, false)
if shared == 'true':
self.extSharedState = DRIVE_SHARED_TYPE.SHARED
elif shared == 'false':
if config.getboolean('irs', 'use_volume_leases'):
self.extSharedState = DRIVE_SHARED_TYPE.EXCLUSIVE
else:
self.extSharedState = DRIVE_SHARED_TYPE.NONE
elif shared in DRIVE_SHARED_TYPE.getAllValues():
self.extSharedState = shared
else:
raise ValueError("Unknown shared value %s" % shared)
@property
def hasVolumeLeases(self):
if self.extSharedState != DRIVE_SHARED_TYPE.EXCLUSIVE:
return False
for volInfo in getattr(self, "volumeChain", []):
if "leasePath" in volInfo and "leaseOffset" in volInfo:
return True
return False
def __getitem__(self, key):
try:
value = getattr(self, str(key))
except AttributeError:
raise KeyError(key)
else:
return value
def __contains__(self, attr):
return hasattr(self, attr)
def isDiskReplicationInProgress(self):
return hasattr(self, "diskReplicate")
@property
def volExtensionChunk(self):
"""
Returns the volume extension chunk size in bytes.
This size is used for the thin provisioning on block devices. The value
is based on the vdsm configuration but can also dynamically change
according to the VM needs (e.g. increase during a live storage
migration).
"""
if self.isDiskReplicationInProgress():
return self.VOLWM_CHUNK_SIZE * self.VOLWM_CHUNK_REPLICATE_MULT
return self.VOLWM_CHUNK_SIZE
@property
def watermarkLimit(self):
"""
Returns the watermark limit in bytes.
When the LV usage reaches this limit an extension is in order (thin
provisioning on block devices).
"""
return self.VOLWM_FREE_PCT * self.volExtensionChunk / 100
def getNextVolumeSize(self, curSize, capacity):
"""
Returns the next volume size in bytes. This value is based on the
volExtensionChunk property and it's the size that should be requested
for the next LV extension. curSize is the current size of the volume
to be extended. For the leaf volume curSize == self.apparentsize.
For internal volumes it is discovered by calling irs.getVolumeSize().
capacity is the maximum size of the volume. It can be discovered using
libvirt.virDomain.blockInfo() or qemuimg.info().
"""
nextSize = utils.round(curSize + self.volExtensionChunk,
constants.MEGAB)
return min(nextSize, self.getMaxVolumeSize(capacity))
def getMaxVolumeSize(self, capacity):
"""
Returns the maximum volume size in bytes. This value is larger than
drive capacity since we must allocate extra space for cow internal
data. The actual lv size may be larger due to rounding to next lvm
extent.
"""
return utils.round(capacity * self.VOLWM_COW_OVERHEAD,
constants.MEGAB)
@property
def chunked(self):
"""
Return True if drive is using chunks and may require extending.
If a drive is chunked, current drive write watermark and
Drive.volExtensionChunk is used to detect if a drive should be
extended, and getNextVolumeSize to find the new size.
"""
return self.blockDev and self.format == "cow"
@property
def replicaChunked(self):
"""
Return True if drive is replicating to chuked storage and the replica
volume may require extending. See Drive.chunkd for more info.
"""
replica = getattr(self, "diskReplicate", {})
return (replica.get("diskType") == DISK_TYPE.BLOCK and
replica.get("format") == "cow")
@property
def networkDev(self):
return getattr(self, '_diskType', None) == DISK_TYPE.NETWORK
@property
def blockDev(self):
if self._blockDev is None:
if self.networkDev or self.device in ("cdrom", "floppy"):
self._blockDev = False
else:
try:
self._blockDev = utils.isBlockDevice(self.path)
except Exception:
self.log.debug("Unable to determine if the path '%s' is a "
"block device", self.path, exc_info=True)
return self._blockDev
@property
def path(self):
return self._path
@path.setter
def path(self, path):
if self._path is not None and self._path != path:
self.log.debug("Drive %s moved from %r to %r",
self.name, self._path, path)
# After live storage migration domain type may have changed
# invalidating cached blockDev.
self._blockDev = None
self._path = path
@property
def diskType(self):
if self.blockDev:
return DISK_TYPE.BLOCK
elif self.networkDev:
return DISK_TYPE.NETWORK
else:
return DISK_TYPE.FILE
@diskType.setter
def diskType(self, value):
self._diskType = value
@property
def transientDisk(self):
# Using getattr to handle legacy and removable drives.
return getattr(self, 'shared', None) == DRIVE_SHARED_TYPE.TRANSIENT
def _customize(self):
if self.transientDisk:
# Force the cache to be writethrough, which is qemu's default.
# This is done to ensure that we don't ever use cache=none for
# transient disks, since we create them in /var/run/vdsm which
# may end up on tmpfs and don't support O_DIRECT, and qemu uses
# O_DIRECT when cache=none and hence hotplug might fail with
# error that one can take eternity to debug the reason behind it!
self.cache = "writethrough"
elif self.iface == 'virtio':
try:
self.cache = self.vm_custom['viodiskcache']
except KeyError:
pass # Ignore if custom disk cache is missing
def getLeasesXML(self):
"""
Get lease device elements for drive leases.
See `.lease.Device.getXML` for more info.
:returns: generator of `..vmxml.Element` instances
"""
if not self.hasVolumeLeases:
return # empty items generator
# NOTE: at the moment we are generating the lease only for the leaf,
# when libvirt will support shared leases this will loop over all the
# volumes
for volInfo in self.volumeChain[-1:]:
device = lease.Device(self.log,
lease_id=volInfo['volumeID'],
sd_id=volInfo['domainID'],
path=volInfo['leasePath'],
offset=volInfo['leaseOffset'])
yield device.getXML()
def getXML(self):
"""
Create domxml for disk/cdrom/floppy.
<disk type='file' device='disk' snapshot='no'>
<driver name='qemu' type='qcow2' cache='none'/>
<source file='/path/to/image'/>
<target dev='hda' bus='ide'/>
<serial>54-a672-23e5b495a9ea</serial>
</disk>
"""
self._validate()
diskelem = self.createXmlElem('disk', self.diskType,
['device', 'address', 'sgio'])
diskelem.setAttrs(snapshot='no')
diskelem.appendChild(_getSourceXML(self))
if self.diskType == DISK_TYPE.NETWORK and hasattr(self, 'auth'):
diskelem.appendChild(self._getAuthXML())
diskelem.appendChild(self._getTargetXML())
if self.extSharedState == DRIVE_SHARED_TYPE.SHARED:
diskelem.appendChildWithArgs('shareable')
if hasattr(self, 'readonly') and conv.tobool(self.readonly):
diskelem.appendChildWithArgs('readonly')
elif self.device == 'floppy' and not hasattr(self, 'readonly'):
# floppies are used only internally for sysprep, so
# they are readonly unless explicitely stated otherwise
diskelem.appendChildWithArgs('readonly')
if getattr(self, 'serial', False) and self.device != 'lun':
diskelem.appendChildWithArgs('serial', text=self.serial)
if hasattr(self, 'bootOrder'):
diskelem.appendChildWithArgs('boot', order=self.bootOrder)
if self.device == 'disk' or self.device == 'lun':
diskelem.appendChild(_getDriverXML(self))
if self.iotune:
diskelem.appendChild(self._getIotuneXML())
return diskelem
def getReplicaXML(self):
disk = vmxml.Element(
"disk",
device=self.diskReplicate["device"],
snapshot="no",
type=self.diskReplicate["diskType"],
)
disk.appendChild(_getSourceXML(self.diskReplicate))
disk.appendChild(_getDriverXML(self.diskReplicate))
return disk
def _getAuthXML(self):
auth = vmxml.Element("auth", username=self.auth["username"])
auth.appendChildWithArgs("secret",
type=self.auth["type"],
uuid=self.auth["uuid"])
return auth
def _getTargetXML(self):
target = vmxml.Element('target', dev=self.name)
if self.iface:
target.setAttrs(bus=self.iface)
return target
def _getIotuneXML(self):
iotune = vmxml.Element('iotune')
for key, value in sorted(self.iotune.items()):
iotune.appendChildWithArgs(key, text=str(value))
return iotune
def _validate(self):
if self.diskType == DISK_TYPE.NETWORK:
if not getattr(self, 'hosts', None):
raise ValueError("Network disk without hosts")
if not getattr(self, 'protocol', None):
raise ValueError("Network disk without protocol")
if self.device != 'lun' and hasattr(self, 'sgio'):
raise ValueError("sgio attribute can be set only for LUN devices")
if self.device == 'lun' and self.format == 'cow':
raise ValueError("cow format is not supported for LUN devices")
@property
def _xpath(self):
"""
Returns xpath to the device in libvirt dom xml
The path is relative to the root element
"""
source_key = {
DISK_TYPE.FILE: 'file',
DISK_TYPE.BLOCK: 'dev',
DISK_TYPE.NETWORK: 'name',
}
return ("./devices/disk/source[@%s='%s']" %
(source_key[self.diskType], self.path))
def is_attached_to(self, xml_string):
dom = ET.fromstring(xml_string)
return bool(dom.findall(self._xpath))
def __repr__(self):
return ("<Drive name={self.name}, type={self.diskType}, "
"path={self.path} "
"at {addr:#x}>").format(self=self, addr=id(self))
@property
def iotune(self):
return self.specParams.get('ioTune', {}).copy()
@iotune.setter
def iotune(self, value):
iotune = value.copy()
vmtune.validate_io_tune_params(iotune)
self.specParams['ioTune'] = iotune
def volume_target(self, vol_id, actual_chain):
"""
Retrieves volume's device target
from drive's volume chain using its ID.
That device target is used in block-commit api of libvirt.
Arguments:
vol_id (str): Volume's UUID
actual_chain (VolumeChainEntry[]): Current volume chain
as parsed from libvirt xml,
see parse_volume_chain. We expect it to be
ordered from base to top.
Returns:
str: Volume device target - None for top volume,
"vda[1]" for the next volume after top and so on.
Raises:
VolumeNotFound exception when volume is not in chain.
"""
for v in self.volumeChain:
if v['volumeID'] == vol_id:
index = chain_index(actual_chain, vol_id, self.name)
# libvirt device target format is name[index] where name is
# target device name inside a vm and index is a number,
# pointing to a snapshot layer.
# Unfortunately, top layer do not have index value and libvirt
# doesn't support referencing top layer as name[0] therefore,
# we have to check for index absence and return just name for
# the top layer. We have an RFE for that problem,
# https://bugzilla.redhat.com/1451398 and when it will be
# implemented, we need to remove special handling of
# the active layer.
if index is None:
# As right now libvirt is not able to correctly parse
# 'name' as a reference to the active layer we need to
# return None, so libvirt will use active layer as a
# default value for None. We have bug filed for that issue:
# https://bugzilla.redhat.com/1451394 and we need to return
# self.name instead of None when it is fixed.
return None
else:
return "%s[%d]" % (self.name, index)
raise VolumeNotFound(drive_name=self.name, vol_id=vol_id)
def volume_id(self, vol_path):
"""
Retrieves volume id from drive's volume chain using its path.
libvirt path and Drive.path may be different symlinks
to the same file or block device:
- /run/vdsm/storage/sd_id/img_id/vol_id
- /rhev/data-center/pool_id/sd_id/images/img_id/vol_id
"""
for vol in self.volumeChain:
if self.diskType == DISK_TYPE.NETWORK:
if vol['path'] == vol_path:
return vol['volumeID']
else:
if os.path.realpath(vol['path']) == os.path.realpath(vol_path):
return vol['volumeID']
raise LookupError("Unable to find VolumeID for path '%s'", vol_path)
def parse_volume_chain(self, disk_xml):
"""
Parses libvirt xml and extracts volume chain from it.
Arguments:
disk_xml (ElementTree): libvirt xml to parse
Returns:
list: VolumeChainEntry[] - List of chain entries where
each entry contains volume UUID, volume path
and volume index. For the 'top' volume index
is None, as 'top' volume have no indices at
all.
VolumeChainEntry is reversed in relation to
libvirt xml: xml is ordered from top to base
volume, while volume chain is ordered from
base to the top.
Raises:
InvalidBackingStoreIndex exception when index value is not int.
"""
volChain = []
index = None
source_attr = SOURCE_ATTR[self.diskType]
while True:
path = vmxml.find_attr(disk_xml, 'source', source_attr)
if not path:
break
if index is not None:
try:
index = int(index)
except ValueError:
raise InvalidBackingStoreIndex(path, index)
# TODO: Allocation information is not available in the XML. Switch
# to the new interface once it becomes available in libvirt.
alloc = None
backingstore = next(vmxml.children(disk_xml, 'backingStore'), None)
if backingstore is None:
self.log.warning("<backingStore/> missing from backing "
"chain for drive %s", self.name)
break
entry = VolumeChainEntry(self.volume_id(path), path, alloc, index)
volChain.insert(0, entry)
disk_xml = backingstore
index = vmxml.attr(backingstore, 'index')
return volChain or None
def get_snapshot_xml(self, snap_info):
"""Libvirt snapshot XML"""
if 'diskType' in snap_info:
if self.diskType != snap_info['diskType']:
raise exception.UnsupportedOperation(
"Unexpected diskType",
drive_disk_type=self.diskType,
snapshot_disk_type=snap_info["diskType"])
if self.diskType == DISK_TYPE.NETWORK:
if self.protocol != snap_info['protocol']:
raise exception.UnsupportedOperation(
"Unexpected protocol",
drive_protocol=self.protocol,
snapshot_protocol=snap_info["protocol"])
disk = vmxml.Element('disk', name=self.name, snapshot='external',
type=self.diskType)
drive_info = snap_info.copy()
drive_info["diskType"] = self.diskType
snap_elem = _getSourceXML(drive_info)
# Type attribute is needed but not documented:
# https://bugzilla.redhat.com/1452103
snap_elem.setAttrs(type=self.diskType)
disk.appendChild(snap_elem)
return disk
def chain_index(actual_chain, vol_id, drive_name):
"""
Retrieves volume index from the volume chain.
Arguments:
actual_chain (VolumeChainEntry[]): Current volume chain
vol_id (str): Volume's UUID
drive_name (str): Drive's name
Returns:
str: Volume index
Raises:
VolumeNotFound exception when volume is not in chain.
"""
for entry in actual_chain:
if entry.uuid == vol_id:
return entry.index
raise VolumeNotFound(drive_name=drive_name, vol_id=vol_id)
def _getSourceXML(drive):
source = vmxml.Element('source')
if drive["diskType"] == DISK_TYPE.BLOCK:
source.setAttrs(dev=drive["path"])
elif drive["diskType"] == DISK_TYPE.NETWORK:
source.setAttrs(protocol=drive["protocol"], name=drive["path"])
for host in drive["hosts"]:
source.appendChildWithArgs('host', **host)
elif drive["diskType"] == DISK_TYPE.FILE:
source.setAttrs(file=drive["path"])
if drive["device"] == 'cdrom' or drive["device"] == 'floppy':
source.setAttrs(startupPolicy='optional')
else:
raise RuntimeError("Unsupported diskType %r", drive["diskType"])
return source
def _getDriverXML(drive):
driver = vmxml.Element('driver')
driverAttrs = {'name': 'qemu'}
if drive['diskType'] == DISK_TYPE.BLOCK:
driverAttrs['io'] = 'native'
else:
driverAttrs['io'] = 'threads'
if drive['format'] == 'cow':
driverAttrs['type'] = 'qcow2'
elif drive['format']:
driverAttrs['type'] = 'raw'
if 'discard' in drive and drive['discard']:
driverAttrs['discard'] = 'unmap'
try:
driverAttrs['iothread'] = str(drive['specParams']['pinToIoThread'])
except KeyError:
pass
driverAttrs['cache'] = drive['cache']
if (drive['propagateErrors'] == 'on' or
conv.tobool(drive['propagateErrors'])):
driverAttrs['error_policy'] = 'enospace'
else:
driverAttrs['error_policy'] = 'stop'
driver.setAttrs(**driverAttrs)
return driver
def _get_drive_identification(dom):
source = vmxml.find_first(dom, 'source', None)
if source is not None:
devPath = (vmxml.attr(source, 'file') or
vmxml.attr(source, 'dev') or
vmxml.attr(source, 'name'))
else:
devPath = ''
name = vmxml.find_attr(dom, 'target', 'dev')
alias = core.find_device_alias(dom)
return alias, devPath, name
def makeName(interface, index):
devname = {'ide': 'hd', 'scsi': 'sd', 'virtio': 'vd', 'fdc': 'fd',
'sata': 'sd'}
devindex = ''
i = int(index)
while i > 0:
devindex = chr(ord('a') + (i % 26)) + devindex
i /= 26
return devname.get(interface, 'hd') + (devindex or 'a')
| EdDev/vdsm | lib/vdsm/virt/vmdevices/storage.py | Python | gpl-2.0 | 29,125 |
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='nose-unittest',
version='0.1.1',
author='DISQUS',
author_email='opensource@disqus.com',
url='http://github.com/disqus/nose-unittest',
package_dir={'': 'src'},
packages=find_packages('src'),
zip_safe=False,
install_requires=[
'nose>=0.9',
],
entry_points={
'nose.plugins.0.10': [
'nose_unittest = nose_unittest.plugin:UnitTestPlugin'
]
},
license='Apache License 2.0',
include_package_data=True,
)
| disqus/nose-unittest | setup.py | Python | apache-2.0 | 569 |
from __future__ import absolute_import, print_function, division
import argparse
import sys
class GPUCommand:
def __init__(self, logger):
self.logger = logger
self.client = None
self.registered = False
self.active = True
def main(self, args):
import aetros.cuda_gpu
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
prog=aetros.const.__prog__ + ' gpu')
try:
print("CUDA version: " +str(aetros.cuda_gpu.get_version()))
except aetros.cuda_gpu.CudaNotImplementedException:
sys.stderr.write('It seems you dont have NVIDIA CUDA not installed properly.')
sys.exit(2)
for gpu in aetros.cuda_gpu.get_ordered_devices():
properties = aetros.cuda_gpu.get_device_properties(gpu['device'], all=True)
free, total = aetros.cuda_gpu.get_memory(gpu['device'])
print("%s GPU id=%s %s (memory %.2fGB, free %.2fGB)" %(gpu['fullId'], str(gpu['id']), properties['name'], total/1024/1024/1024, free/1024/1024/1024))
| aetros/aetros-cli | aetros/commands/GPUCommand.py | Python | mit | 1,123 |
"""
Created on 26.05.2017
:author: Humbert Moreaux
Tuleap REST API Client for Python
Copyright (c) Humbert Moreaux, All rights reserved.
This Python module is free software; you can redistribute it and/or modify it under the terms of the
GNU Lesser General Public License as published by the Free Software Foundation; either version 3.0
of the License, or (at your option) any later version.
This Python module is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along with this library. If
not, see <http://www.gnu.org/licenses/>.
"""
import json
class Git(object):
"""
Handles "/git" methods of the Tuleap REST API.
Fields type information:
:type _connection: Tuleap.RestClient.Connection.Connection
:type _data: dict | list[dict]
"""
def __init__(self, connection):
"""
Constructor
:param connection: connection object (must already be logged in)
:type connection: Tuleap.RestClient.Connection.Connection
"""
self._connection = connection
self._data = None
def get_data(self):
"""
Get data received in the last response message.
:return: Response data
:rtype: dict | list[dict]
:note: One of the request method should be successfully executed before this method is
called!
"""
return self._data
def request_repository(self, repository_id):
"""
Request repository data from the server using the "/git" method of the Tuleap REST API.
:param int repository_id: Git Repository ID
:return: success: Success or failure
:rtype: bool
"""
# Check if we are logged in
if not self._connection.is_logged_in():
return False
# Get repository
relative_url = "/git/{:}".format(repository_id)
success = self._connection.call_get_method(relative_url)
# Parse response
if success:
self._data = json.loads(self._connection.get_last_response_message().text)
return success
| djurodrljaca/tuleap-rest-api-client | Tuleap/RestClient/Git.py | Python | lgpl-3.0 | 2,316 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from ....unittest import TestCase
import datetime
from oauthlib import common
from oauthlib.oauth2.rfc6749 import utils
from oauthlib.oauth2 import Client
from oauthlib.oauth2 import InsecureTransportError
from oauthlib.oauth2.rfc6749.clients import AUTH_HEADER, URI_QUERY, BODY
class ClientTest(TestCase):
client_id = "someclientid"
uri = "https://example.com/path?query=world"
body = "not=empty"
headers = {}
access_token = "token"
mac_key = "secret"
bearer_query = uri + "&access_token=" + access_token
bearer_header = {
"Authorization": "Bearer " + access_token
}
bearer_body = body + "&access_token=" + access_token
mac_00_header = {
"Authorization": 'MAC id="' + access_token + '", nonce="0:abc123",' +
' bodyhash="Yqyso8r3hR5Nm1ZFv+6AvNHrxjE=",' +
' mac="0X6aACoBY0G6xgGZVJ1IeE8dF9k="'
}
mac_01_header = {
"Authorization": 'MAC id="' + access_token + '", ts="123456789",' +
' nonce="abc123", mac="Xuk+9oqaaKyhitkgh1CD0xrI6+s="'
}
def test_add_bearer_token(self):
"""Test a number of bearer token placements"""
# Invalid token type
client = Client(self.client_id, token_type="invalid")
self.assertRaises(ValueError, client.add_token, self.uri)
# Case-insensitive token type
client = Client(self.client_id, access_token=self.access_token, token_type="bEAreR")
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.bearer_header)
# Missing access token
client = Client(self.client_id)
self.assertRaises(ValueError, client.add_token, self.uri)
# The default token placement, bearer in auth header
client = Client(self.client_id, access_token=self.access_token)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.bearer_header)
# Setting default placements of tokens
client = Client(self.client_id, access_token=self.access_token,
default_token_placement=AUTH_HEADER)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.bearer_header)
client = Client(self.client_id, access_token=self.access_token,
default_token_placement=URI_QUERY)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.bearer_query)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.headers)
client = Client(self.client_id, access_token=self.access_token,
default_token_placement=BODY)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.bearer_body)
self.assertEqual(headers, self.headers)
# Asking for specific placement in the add_token method
client = Client(self.client_id, access_token=self.access_token)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, token_placement=AUTH_HEADER)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.bearer_header)
client = Client(self.client_id, access_token=self.access_token)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, token_placement=URI_QUERY)
self.assertURLEqual(uri, self.bearer_query)
self.assertFormBodyEqual(body, self.body)
self.assertEqual(headers, self.headers)
client = Client(self.client_id, access_token=self.access_token)
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, token_placement=BODY)
self.assertURLEqual(uri, self.uri)
self.assertFormBodyEqual(body, self.bearer_body)
self.assertEqual(headers, self.headers)
# Invalid token placement
client = Client(self.client_id, access_token=self.access_token)
self.assertRaises(ValueError, client.add_token, self.uri, body=self.body,
headers=self.headers, token_placement="invalid")
client = Client(self.client_id, access_token=self.access_token,
default_token_placement="invalid")
self.assertRaises(ValueError, client.add_token, self.uri, body=self.body,
headers=self.headers)
def test_add_mac_token(self):
# Missing access token
client = Client(self.client_id, token_type="MAC")
self.assertRaises(ValueError, client.add_token, self.uri)
# Invalid hash algorithm
client = Client(self.client_id, token_type="MAC",
access_token=self.access_token, mac_key=self.mac_key,
mac_algorithm="hmac-sha-2")
self.assertRaises(ValueError, client.add_token, self.uri)
orig_generate_timestamp = common.generate_timestamp
orig_generate_nonce = common.generate_nonce
orig_generate_age = utils.generate_age
self.addCleanup(setattr, common, 'generage_timestamp', orig_generate_timestamp)
self.addCleanup(setattr, common, 'generage_nonce', orig_generate_nonce)
self.addCleanup(setattr, utils, 'generate_age', orig_generate_age)
common.generate_timestamp = lambda: '123456789'
common.generate_nonce = lambda: 'abc123'
utils.generate_age = lambda *args: 0
# Add the Authorization header (draft 00)
client = Client(self.client_id, token_type="MAC",
access_token=self.access_token, mac_key=self.mac_key,
mac_algorithm="hmac-sha-1")
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, issue_time=datetime.datetime.now())
self.assertEqual(uri, self.uri)
self.assertEqual(body, self.body)
self.assertEqual(headers, self.mac_00_header)
# Add the Authorization header (draft 00)
client = Client(self.client_id, token_type="MAC",
access_token=self.access_token, mac_key=self.mac_key,
mac_algorithm="hmac-sha-1")
uri, headers, body = client.add_token(self.uri, body=self.body,
headers=self.headers, draft=1)
self.assertEqual(uri, self.uri)
self.assertEqual(body, self.body)
self.assertEqual(headers, self.mac_01_header)
def test_revocation_request(self):
client = Client(self.client_id)
url = 'https://example.com/revoke'
token = 'foobar'
# Valid request
u, h, b = client.prepare_token_revocation_request(url, token)
self.assertEqual(u, url)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(b, 'token=%s&token_type_hint=access_token' % token)
# Non-HTTPS revocation endpoint
self.assertRaises(InsecureTransportError,
client.prepare_token_revocation_request,
'http://example.com/revoke', token)
u, h, b = client.prepare_token_revocation_request(
url, token, token_type_hint='refresh_token')
self.assertEqual(u, url)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(b, 'token=%s&token_type_hint=refresh_token' % token)
# JSONP
u, h, b = client.prepare_token_revocation_request(
url, token, callback='hello.world')
self.assertURLEqual(u, url + '?callback=hello.world&token=%s&token_type_hint=access_token' % token)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertEqual(b, '')
def test_prepare_authorization_request(self):
redirect_url = 'https://example.com/callback/'
scopes = 'read'
auth_url = 'https://example.com/authorize/'
state = 'fake_state'
client = Client(self.client_id, redirect_url=redirect_url, scope=scopes, state=state)
# Non-HTTPS
self.assertRaises(InsecureTransportError,
client.prepare_authorization_request, 'http://example.com/authorize/')
# NotImplementedError
self.assertRaises(NotImplementedError, client.prepare_authorization_request, auth_url)
def test_prepare_refresh_token_request(self):
client = Client(self.client_id)
url = 'https://example.com/revoke'
token = 'foobar'
scope = 'extra_scope'
u, h, b = client.prepare_refresh_token_request(url, token)
self.assertEqual(u, url)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertFormBodyEqual(b, 'grant_type=refresh_token&refresh_token=%s' % token)
# Non-HTTPS revocation endpoint
self.assertRaises(InsecureTransportError,
client.prepare_refresh_token_request,
'http://example.com/revoke', token)
# provide extra scope
u, h, b = client.prepare_refresh_token_request(url, token, scope=scope)
self.assertEqual(u, url)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertFormBodyEqual(b, 'grant_type=refresh_token&scope=%s&refresh_token=%s' % (scope, token))
# provide scope while init
client = Client(self.client_id, scope=scope)
u, h, b = client.prepare_refresh_token_request(url, token, scope=scope)
self.assertEqual(u, url)
self.assertEqual(h, {'Content-Type': 'application/x-www-form-urlencoded'})
self.assertFormBodyEqual(b, 'grant_type=refresh_token&scope=%s&refresh_token=%s' % (scope, token))
| metatoaster/oauthlib | tests/oauth2/rfc6749/clients/test_base.py | Python | bsd-3-clause | 10,495 |
r'''Parse strings using a specification based on the Python format() syntax.
``parse()`` is the opposite of ``format()``
The module is set up to only export ``parse()``, ``search()`` and
``findall()`` when ``import *`` is used:
>>> from parse import *
From there it's a simple thing to parse a string:
>>> parse("It's {}, I love it!", "It's spam, I love it!")
<Result ('spam',) {}>
>>> _[0]
'spam'
Or to search a string for some pattern:
>>> search('Age: {:d}\n', 'Name: Rufus\nAge: 42\nColor: red\n')
<Result (42,) {}>
Or find all the occurrences of some pattern in a string:
>>> ''.join(r.fixed[0] for r in findall(">{}<", "<p>the <b>bold</b> text</p>"))
'the bold text'
If you're going to use the same pattern to match lots of strings you can
compile it once:
>>> from parse import compile
>>> p = compile("It's {}, I love it!")
>>> print(p)
<Parser "It's {}, I love it!">
>>> p.parse("It's spam, I love it!")
<Result ('spam',) {}>
("compile" is not exported for ``import *`` usage as it would override the
built-in ``compile()`` function)
Format Syntax
-------------
A basic version of the `Format String Syntax`_ is supported with anonymous
(fixed-position), named and formatted fields::
{[field name]:[format spec]}
Field names must be a valid Python identifiers, including dotted names;
element indexes imply dictionaries (see below for example).
Numbered fields are also not supported: the result of parsing will include
the parsed fields in the order they are parsed.
The conversion of fields to types other than strings is done based on the
type in the format specification, which mirrors the ``format()`` behaviour.
There are no "!" field conversions like ``format()`` has.
Some simple parse() format string examples:
>>> parse("Bring me a {}", "Bring me a shrubbery")
<Result ('shrubbery',) {}>
>>> r = parse("The {} who say {}", "The knights who say Ni!")
>>> print(r)
<Result ('knights', 'Ni!') {}>
>>> print(r.fixed)
('knights', 'Ni!')
>>> r = parse("Bring out the holy {item}", "Bring out the holy hand grenade")
>>> print(r)
<Result () {'item': 'hand grenade'}>
>>> print(r.named)
{'item': 'hand grenade'}
>>> print(r['item'])
hand grenade
Dotted names and indexes are possible though the application must make
additional sense of the result:
>>> r = parse("Mmm, {food.type}, I love it!", "Mmm, spam, I love it!")
>>> print(r)
<Result () {'food.type': 'spam'}>
>>> print(r.named)
{'food.type': 'spam'}
>>> print(r['food.type'])
spam
>>> r = parse("My quest is {quest[name]}", "My quest is to seek the holy grail!")
>>> print(r)
<Result () {'quest': {'name': 'to seek the holy grail!'}}>
>>> print(r['quest'])
{'name': 'to seek the holy grail!'}
>>> print(r['quest']['name'])
to seek the holy grail!
Format Specification
--------------------
Most often a straight format-less ``{}`` will suffice where a more complex
format specification might have been used.
Most of `format()`'s `Format Specification Mini-Language`_ is supported:
[[fill]align][0][width][.precision][type]
The differences between `parse()` and `format()` are:
- The align operators will cause spaces (or specified fill character) to be
stripped from the parsed value. The width is not enforced; it just indicates
there may be whitespace or "0"s to strip.
- Numeric parsing will automatically handle a "0b", "0o" or "0x" prefix.
That is, the "#" format character is handled automatically by d, b, o
and x formats. For "d" any will be accepted, but for the others the correct
prefix must be present if at all.
- Numeric sign is handled automatically.
- The thousands separator is handled automatically if the "n" type is used.
- The types supported are a slightly different mix to the format() types. Some
format() types come directly over: "d", "n", "%", "f", "e", "b", "o" and "x".
In addition some regular expression character group types "D", "w", "W", "s"
and "S" are also available.
- The "e" and "g" types are case-insensitive so there is not need for
the "E" or "G" types.
===== =========================================== ========
Type Characters Matched Output
===== =========================================== ========
w Letters and underscore str
W Non-letter and underscore str
s Whitespace str
S Non-whitespace str
d Digits (effectively integer numbers) int
D Non-digit str
n Numbers with thousands separators (, or .) int
% Percentage (converted to value/100.0) float
f Fixed-point numbers float
e Floating-point numbers with exponent float
e.g. 1.1e-10, NAN (all case insensitive)
g General number format (either d, f or e) float
b Binary numbers int
o Octal numbers int
x Hexadecimal numbers (lower and upper case) int
ti ISO 8601 format date/time datetime
e.g. 1972-01-20T10:21:36Z ("T" and "Z"
optional)
te RFC2822 e-mail format date/time datetime
e.g. Mon, 20 Jan 1972 10:21:36 +1000
tg Global (day/month) format date/time datetime
e.g. 20/1/1972 10:21:36 AM +1:00
ta US (month/day) format date/time datetime
e.g. 1/20/1972 10:21:36 PM +10:30
tc ctime() format date/time datetime
e.g. Sun Sep 16 01:03:52 1973
th HTTP log format date/time datetime
e.g. 21/Nov/2011:00:07:11 +0000
ts Linux system log format date/time datetime
e.g. Nov 9 03:37:44
tt Time time
e.g. 10:21:36 PM -5:30
===== =========================================== ========
Some examples of typed parsing with ``None`` returned if the typing
does not match:
>>> parse('Our {:d} {:w} are...', 'Our 3 weapons are...')
<Result (3, 'weapons') {}>
>>> parse('Our {:d} {:w} are...', 'Our three weapons are...')
>>> parse('Meet at {:tg}', 'Meet at 1/2/2011 11:00 PM')
<Result (datetime.datetime(2011, 2, 1, 23, 0),) {}>
And messing about with alignment:
>>> parse('with {:>} herring', 'with a herring')
<Result ('a',) {}>
>>> parse('spam {:^} spam', 'spam lovely spam')
<Result ('lovely',) {}>
Note that the "center" alignment does not test to make sure the value is
centered - it just strips leading and trailing whitespace.
Some notes for the date and time types:
- the presence of the time part is optional (including ISO 8601, starting
at the "T"). A full datetime object will always be returned; the time
will be set to 00:00:00. You may also specify a time without seconds.
- when a seconds amount is present in the input fractions will be parsed
to give microseconds.
- except in ISO 8601 the day and month digits may be 0-padded.
- the date separator for the tg and ta formats may be "-" or "/".
- named months (abbreviations or full names) may be used in the ta and tg
formats in place of numeric months.
- as per RFC 2822 the e-mail format may omit the day (and comma), and the
seconds but nothing else.
- hours greater than 12 will be happily accepted.
- the AM/PM are optional, and if PM is found then 12 hours will be added
to the datetime object's hours amount - even if the hour is greater
than 12 (for consistency.)
- in ISO 8601 the "Z" (UTC) timezone part may be a numeric offset
- timezones are specified as "+HH:MM" or "-HH:MM". The hour may be one or two
digits (0-padded is OK.) Also, the ":" is optional.
- the timezone is optional in all except the e-mail format (it defaults to
UTC.)
- named timezones are not handled yet.
Note: attempting to match too many datetime fields in a single parse() will
currently result in a resource allocation issue. A TooManyFields exception
will be raised in this instance. The current limit is about 15. It is hoped
that this limit will be removed one day.
.. _`Format String Syntax`:
http://docs.python.org/library/string.html#format-string-syntax
.. _`Format Specification Mini-Language`:
http://docs.python.org/library/string.html#format-specification-mini-language
Result and Match Objects
------------------------
The result of a ``parse()`` and ``search()`` operation is either ``None`` (no match), a
``Result`` instance or a ``Match`` instance if ``evaluate_result`` is False.
The ``Result`` instance has three attributes:
fixed
A tuple of the fixed-position, anonymous fields extracted from the input.
named
A dictionary of the named fields extracted from the input.
spans
A dictionary mapping the names and fixed position indices matched to a
2-tuple slice range of where the match occurred in the input.
The span does not include any stripped padding (alignment or width).
The ``Match`` instance has one method:
evaluate_result()
Generates and returns a ``Result`` instance for this ``Match`` object.
Custom Type Conversions
-----------------------
If you wish to have matched fields automatically converted to your own type you
may pass in a dictionary of type conversion information to ``parse()`` and
``compile()``.
The converter will be passed the field string matched. Whatever it returns
will be substituted in the ``Result`` instance for that field.
Your custom type conversions may override the builtin types if you supply one
with the same identifier.
>>> def shouty(string):
... return string.upper()
...
>>> parse('{:shouty} world', 'hello world', dict(shouty=shouty))
<Result ('HELLO',) {}>
If the type converter has the optional ``pattern`` attribute, it is used as
regular expression for better pattern matching (instead of the default one).
>>> def parse_number(text):
... return int(text)
>>> parse_number.pattern = r'\d+'
>>> parse('Answer: {number:Number}', 'Answer: 42', dict(Number=parse_number))
<Result () {'number': 42}>
>>> _ = parse('Answer: {:Number}', 'Answer: Alice', dict(Number=parse_number))
>>> assert _ is None, "MISMATCH"
You can also use the ``with_pattern(pattern)`` decorator to add this
information to a type converter function:
>>> from parse import with_pattern
>>> @with_pattern(r'\d+')
... def parse_number(text):
... return int(text)
>>> parse('Answer: {number:Number}', 'Answer: 42', dict(Number=parse_number))
<Result () {'number': 42}>
A more complete example of a custom type might be:
>>> yesno_mapping = {
... "yes": True, "no": False,
... "on": True, "off": False,
... "true": True, "false": False,
... }
>>> @with_pattern(r"|".join(yesno_mapping))
... def parse_yesno(text):
... return yesno_mapping[text.lower()]
----
**Version history (in brief)**:
- 1.8.0 support manual control over result evaluation (thanks Timo Furrer)
- 1.7.0 parse dict fields (thanks Mark Visser) and adapted to allow
more than 100 re groups in Python 3.5+ (thanks David King)
- 1.6.6 parse Linux system log dates (thanks Alex Cowan)
- 1.6.5 handle precision in float format (thanks Levi Kilcher)
- 1.6.4 handle pipe "|" characters in parse string (thanks Martijn Pieters)
- 1.6.3 handle repeated instances of named fields, fix bug in PM time
overflow
- 1.6.2 fix logging to use local, not root logger (thanks Necku)
- 1.6.1 be more flexible regarding matched ISO datetimes and timezones in
general, fix bug in timezones without ":" and improve docs
- 1.6.0 add support for optional ``pattern`` attribute in user-defined types
(thanks Jens Engel)
- 1.5.3 fix handling of question marks
- 1.5.2 fix type conversion error with dotted names (thanks Sebastian Thiel)
- 1.5.1 implement handling of named datetime fields
- 1.5 add handling of dotted field names (thanks Sebastian Thiel)
- 1.4.1 fix parsing of "0" in int conversion (thanks James Rowe)
- 1.4 add __getitem__ convenience access on Result.
- 1.3.3 fix Python 2.5 setup.py issue.
- 1.3.2 fix Python 3.2 setup.py issue.
- 1.3.1 fix a couple of Python 3.2 compatibility issues.
- 1.3 added search() and findall(); removed compile() from ``import *``
export as it overwrites builtin.
- 1.2 added ability for custom and override type conversions to be
provided; some cleanup
- 1.1.9 to keep things simpler number sign is handled automatically;
significant robustification in the face of edge-case input.
- 1.1.8 allow "d" fields to have number base "0x" etc. prefixes;
fix up some field type interactions after stress-testing the parser;
implement "%" type.
- 1.1.7 Python 3 compatibility tweaks (2.5 to 2.7 and 3.2 are supported).
- 1.1.6 add "e" and "g" field types; removed redundant "h" and "X";
removed need for explicit "#".
- 1.1.5 accept textual dates in more places; Result now holds match span
positions.
- 1.1.4 fixes to some int type conversion; implemented "=" alignment; added
date/time parsing with a variety of formats handled.
- 1.1.3 type conversion is automatic based on specified field types. Also added
"f" and "n" types.
- 1.1.2 refactored, added compile() and limited ``from parse import *``
- 1.1.1 documentation improvements
- 1.1.0 implemented more of the `Format Specification Mini-Language`_
and removed the restriction on mixing fixed-position and named fields
- 1.0.0 initial release
This code is copyright 2012-2017 Richard Jones <richard@python.org>
See the end of the source file for the license of use.
'''
__version__ = '1.8.0'
# yes, I now have two problems
import re
import sys
from datetime import datetime, time, tzinfo, timedelta
from functools import partial
import logging
__all__ = 'parse search findall with_pattern'.split()
log = logging.getLogger(__name__)
def with_pattern(pattern):
"""Attach a regular expression pattern matcher to a custom type converter
function.
This annotates the type converter with the :attr:`pattern` attribute.
EXAMPLE:
>>> import parse
>>> @parse.with_pattern(r"\d+")
... def parse_number(text):
... return int(text)
is equivalent to:
>>> def parse_number(text):
... return int(text)
>>> parse_number.pattern = r"\d+"
:param pattern: regular expression pattern (as text)
:return: wrapped function
"""
def decorator(func):
func.pattern = pattern
return func
return decorator
def int_convert(base):
'''Convert a string to an integer.
The string may start with a sign.
It may be of a base other than 10.
It may also have other non-numeric characters that we can ignore.
'''
CHARS = '0123456789abcdefghijklmnopqrstuvwxyz'
def f(string, match, base=base):
if string[0] == '-':
sign = -1
else:
sign = 1
if string[0] == '0' and len(string) > 1:
if string[1] in 'bB':
base = 2
elif string[1] in 'oO':
base = 8
elif string[1] in 'xX':
base = 16
else:
# just go with the base specifed
pass
chars = CHARS[:base]
string = re.sub('[^%s]' % chars, '', string.lower())
return sign * int(string, base)
return f
def percentage(string, match):
return float(string[:-1]) / 100.
class FixedTzOffset(tzinfo):
"""Fixed offset in minutes east from UTC.
"""
ZERO = timedelta(0)
def __init__(self, offset, name):
self._offset = timedelta(minutes=offset)
self._name = name
def __repr__(self):
return '<%s %s %s>' % (self.__class__.__name__, self._name,
self._offset)
def utcoffset(self, dt):
return self._offset
def tzname(self, dt):
return self._name
def dst(self, dt):
return self.ZERO
def __eq__(self, other):
return self._name == other._name and self._offset == other._offset
MONTHS_MAP = dict(
Jan=1, January=1,
Feb=2, February=2,
Mar=3, March=3,
Apr=4, April=4,
May=5,
Jun=6, June=6,
Jul=7, July=7,
Aug=8, August=8,
Sep=9, September=9,
Oct=10, October=10,
Nov=11, November=11,
Dec=12, December=12
)
DAYS_PAT = '(Mon|Tue|Wed|Thu|Fri|Sat|Sun)'
MONTHS_PAT = '(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)'
ALL_MONTHS_PAT = '(%s)' % '|'.join(MONTHS_MAP)
TIME_PAT = r'(\d{1,2}:\d{1,2}(:\d{1,2}(\.\d+)?)?)'
AM_PAT = r'(\s+[AP]M)'
TZ_PAT = r'(\s+[-+]\d\d?:?\d\d)'
def date_convert(string, match, ymd=None, mdy=None, dmy=None,
d_m_y=None, hms=None, am=None, tz=None, mm=None, dd=None):
'''Convert the incoming string containing some date / time info into a
datetime instance.
'''
groups = match.groups()
time_only = False
if mm and dd:
y=datetime.today().year
m=groups[mm]
d=groups[dd]
elif ymd is not None:
y, m, d = re.split('[-/\s]', groups[ymd])
elif mdy is not None:
m, d, y = re.split('[-/\s]', groups[mdy])
elif dmy is not None:
d, m, y = re.split('[-/\s]', groups[dmy])
elif d_m_y is not None:
d, m, y = d_m_y
d = groups[d]
m = groups[m]
y = groups[y]
else:
time_only = True
H = M = S = u = 0
if hms is not None and groups[hms]:
t = groups[hms].split(':')
if len(t) == 2:
H, M = t
else:
H, M, S = t
if '.' in S:
S, u = S.split('.')
u = int(float('.' + u) * 1000000)
S = int(S)
H = int(H)
M = int(M)
day_incr = False
if am is not None:
am = groups[am]
if am and am.strip() == 'PM':
H += 12
if H > 23:
day_incr = True
H -= 24
if tz is not None:
tz = groups[tz]
if tz == 'Z':
tz = FixedTzOffset(0, 'UTC')
elif tz:
tz = tz.strip()
if tz.isupper():
# TODO use the awesome python TZ module?
pass
else:
sign = tz[0]
if ':' in tz:
tzh, tzm = tz[1:].split(':')
elif len(tz) == 4: # 'snnn'
tzh, tzm = tz[1], tz[2:4]
else:
tzh, tzm = tz[1:3], tz[3:5]
offset = int(tzm) + int(tzh) * 60
if sign == '-':
offset = -offset
tz = FixedTzOffset(offset, tz)
if time_only:
d = time(H, M, S, u, tzinfo=tz)
else:
y = int(y)
if m.isdigit():
m = int(m)
else:
m = MONTHS_MAP[m]
d = int(d)
d = datetime(y, m, d, H, M, S, u, tzinfo=tz)
if day_incr:
d = d + timedelta(days=1)
return d
class TooManyFields(ValueError):
pass
class RepeatedNameError(ValueError):
pass
# note: {} are handled separately
# note: I don't use r'' here because Sublime Text 2 syntax highlight has a fit
REGEX_SAFETY = re.compile('([?\\\\.[\]()*+\^$!\|])')
# allowed field types
ALLOWED_TYPES = set(list('nbox%fegwWdDsS') +
['t' + c for c in 'ieahgcts'])
def extract_format(format, extra_types):
'''Pull apart the format [[fill]align][0][width][.precision][type]
'''
fill = align = None
if format[0] in '<>=^':
align = format[0]
format = format[1:]
elif len(format) > 1 and format[1] in '<>=^':
fill = format[0]
align = format[1]
format = format[2:]
zero = False
if format and format[0] == '0':
zero = True
format = format[1:]
width = ''
while format:
if not format[0].isdigit():
break
width += format[0]
format = format[1:]
if format.startswith('.'):
# Precision isn't needed but we need to capture it so that
# the ValueError isn't raised.
format = format[1:] # drop the '.'
precision = ''
while format:
if not format[0].isdigit():
break
precision += format[0]
format = format[1:]
# the rest is the type, if present
type = format
if type and type not in ALLOWED_TYPES and type not in extra_types:
raise ValueError('type %r not recognised' % type)
return locals()
PARSE_RE = re.compile(r"""({{|}}|{\w*(?:(?:\.\w+)|(?:\[[^\]]+\]))*(?::[^}]+)?})""")
class Parser(object):
'''Encapsulate a format string that may be used to parse other strings.
'''
def __init__(self, format, extra_types={}):
# a mapping of a name as in {hello.world} to a regex-group compatible
# name, like hello__world Its used to prevent the transformation of
# name-to-group and group to name to fail subtly, such as in:
# hello_.world-> hello___world->hello._world
self._group_to_name_map = {}
# also store the original field name to group name mapping to allow
# multiple instances of a name in the format string
self._name_to_group_map = {}
# and to sanity check the repeated instances store away the first
# field type specification for the named field
self._name_types = {}
self._format = format
self._extra_types = extra_types
self._fixed_fields = []
self._named_fields = []
self._group_index = 0
self._type_conversions = {}
self._expression = self._generate_expression()
self.__search_re = None
self.__match_re = None
log.debug('format %r -> %r' % (format, self._expression))
def __repr__(self):
if len(self._format) > 20:
return '<%s %r>' % (self.__class__.__name__,
self._format[:17] + '...')
return '<%s %r>' % (self.__class__.__name__, self._format)
@property
def _search_re(self):
if self.__search_re is None:
try:
self.__search_re = re.compile(self._expression,
re.IGNORECASE | re.DOTALL)
except AssertionError:
# access error through sys to keep py3k and backward compat
e = str(sys.exc_info()[1])
if e.endswith('this version only supports 100 named groups'):
raise TooManyFields('sorry, you are attempting to parse '
'too many complex fields')
return self.__search_re
@property
def _match_re(self):
if self.__match_re is None:
expression = '^%s$' % self._expression
try:
self.__match_re = re.compile(expression,
re.IGNORECASE | re.DOTALL)
except AssertionError:
# access error through sys to keep py3k and backward compat
e = str(sys.exc_info()[1])
if e.endswith('this version only supports 100 named groups'):
raise TooManyFields('sorry, you are attempting to parse '
'too many complex fields')
except re.error:
raise NotImplementedError("Group names (e.g. (?P<name>) can "
"cause failure, as they are not escaped properly: '%s'" %
expression)
return self.__match_re
def parse(self, string, evaluate_result=True):
'''Match my format to the string exactly.
Return a Result or Match instance or None if there's no match.
'''
m = self._match_re.match(string)
if m is None:
return None
if evaluate_result:
return self.evaluate_result(m)
else:
return Match(self, m)
def search(self, string, pos=0, endpos=None, evaluate_result=True):
'''Search the string for my format.
Optionally start the search at "pos" character index and limit the
search to a maximum index of endpos - equivalent to
search(string[:endpos]).
If the ``evaluate_result`` argument is set to ``False`` a
Match instance is returned instead of the actual Result instance.
Return either a Result instance or None if there's no match.
'''
if endpos is None:
endpos = len(string)
m = self._search_re.search(string, pos, endpos)
if m is None:
return None
if evaluate_result:
return self.evaluate_result(m)
else:
return Match(self, m)
def findall(self, string, pos=0, endpos=None, extra_types={}, evaluate_result=True):
'''Search "string" for the all occurrances of "format".
Optionally start the search at "pos" character index and limit the
search to a maximum index of endpos - equivalent to
search(string[:endpos]).
Returns an iterator that holds Result or Match instances for each format match
found.
'''
if endpos is None:
endpos = len(string)
return ResultIterator(self, string, pos, endpos, evaluate_result=evaluate_result)
def _expand_named_fields(self, named_fields):
result = {}
for field, value in named_fields.items():
# split 'aaa[bbb][ccc]...' into 'aaa' and '[bbb][ccc]...'
basename, subkeys = re.match(r'([^\[]+)(.*)', field).groups()
# create nested dictionaries {'aaa': {'bbb': {'ccc': ...}}}
d = result
k = basename
if subkeys:
for subkey in re.findall(r'\[[^\]]+\]', subkeys):
d = d.setdefault(k,{})
k = subkey[1:-1]
# assign the value to the last key
d[k] = value
return result
def evaluate_result(self, m):
'''Generate a Result instance for the given regex match object'''
# ok, figure the fixed fields we've pulled out and type convert them
fixed_fields = list(m.groups())
for n in self._fixed_fields:
if n in self._type_conversions:
fixed_fields[n] = self._type_conversions[n](fixed_fields[n], m)
fixed_fields = tuple(fixed_fields[n] for n in self._fixed_fields)
# grab the named fields, converting where requested
groupdict = m.groupdict()
named_fields = {}
name_map = {}
for k in self._named_fields:
korig = self._group_to_name_map[k]
name_map[korig] = k
if k in self._type_conversions:
value = self._type_conversions[k](groupdict[k], m)
else:
value = groupdict[k]
named_fields[korig] = value
# now figure the match spans
spans = dict((n, m.span(name_map[n])) for n in named_fields)
spans.update((i, m.span(n + 1))
for i, n in enumerate(self._fixed_fields))
# and that's our result
return Result(fixed_fields, self._expand_named_fields(named_fields), spans)
def _regex_replace(self, match):
return '\\' + match.group(1)
def _generate_expression(self):
# turn my _format attribute into the _expression attribute
e = []
for part in PARSE_RE.split(self._format):
if not part:
continue
elif part == '{{':
e.append(r'\{')
elif part == '}}':
e.append(r'\}')
elif part[0] == '{':
# this will be a braces-delimited field to handle
e.append(self._handle_field(part))
else:
# just some text to match
e.append(REGEX_SAFETY.sub(self._regex_replace, part))
return ''.join(e)
def _to_group_name(self, field):
# return a version of field which can be used as capture group, even
# though it might contain '.'
group = field.replace('.', '_').replace('[', '_').replace(']', '_')
# make sure we don't collide ("a.b" colliding with "a_b")
n = 1
while group in self._group_to_name_map:
n += 1
if '.' in field:
group = field.replace('.', '_' * n)
elif '_' in field:
group = field.replace('_', '_' * n)
else:
raise KeyError('duplicated group name %r' % (field, ))
# save off the mapping
self._group_to_name_map[group] = field
self._name_to_group_map[field] = group
return group
def _handle_field(self, field):
# first: lose the braces
field = field[1:-1]
# now figure whether this is an anonymous or named field, and whether
# there's any format specification
format = ''
if field and field[0].isalpha():
if ':' in field:
name, format = field.split(':')
else:
name = field
if name in self._name_to_group_map:
if self._name_types[name] != format:
raise RepeatedNameError('field type %r for field "%s" '
'does not match previous seen type %r' % (format,
name, self._name_types[name]))
group = self._name_to_group_map[name]
# match previously-seen value
return '(?P=%s)' % group
else:
group = self._to_group_name(name)
self._name_types[name] = format
self._named_fields.append(group)
# this will become a group, which must not contain dots
wrap = '(?P<%s>%%s)' % group
else:
self._fixed_fields.append(self._group_index)
wrap = '(%s)'
if ':' in field:
format = field[1:]
group = self._group_index
# simplest case: no type specifier ({} or {name})
if not format:
self._group_index += 1
return wrap % '.+?'
# decode the format specification
format = extract_format(format, self._extra_types)
# figure type conversions, if any
type = format['type']
is_numeric = type and type in 'n%fegdobh'
if type in self._extra_types:
type_converter = self._extra_types[type]
s = getattr(type_converter, 'pattern', r'.+?')
def f(string, m):
return type_converter(string)
self._type_conversions[group] = f
elif type == 'n':
s = '\d{1,3}([,.]\d{3})*'
self._group_index += 1
self._type_conversions[group] = int_convert(10)
elif type == 'b':
s = '(0[bB])?[01]+'
self._type_conversions[group] = int_convert(2)
self._group_index += 1
elif type == 'o':
s = '(0[oO])?[0-7]+'
self._type_conversions[group] = int_convert(8)
self._group_index += 1
elif type == 'x':
s = '(0[xX])?[0-9a-fA-F]+'
self._type_conversions[group] = int_convert(16)
self._group_index += 1
elif type == '%':
s = r'\d+(\.\d+)?%'
self._group_index += 1
self._type_conversions[group] = percentage
elif type == 'f':
s = r'\d+\.\d+'
self._type_conversions[group] = lambda s, m: float(s)
elif type == 'e':
s = r'\d+\.\d+[eE][-+]?\d+|nan|NAN|[-+]?inf|[-+]?INF'
self._type_conversions[group] = lambda s, m: float(s)
elif type == 'g':
s = r'\d+(\.\d+)?([eE][-+]?\d+)?|nan|NAN|[-+]?inf|[-+]?INF'
self._group_index += 2
self._type_conversions[group] = lambda s, m: float(s)
elif type == 'd':
s = r'\d+|0[xX][0-9a-fA-F]+|[0-9a-fA-F]+|0[bB][01]+|0[oO][0-7]+'
self._type_conversions[group] = int_convert(10)
elif type == 'ti':
s = r'(\d{4}-\d\d-\d\d)((\s+|T)%s)?(Z|\s*[-+]\d\d:?\d\d)?' % \
TIME_PAT
n = self._group_index
self._type_conversions[group] = partial(date_convert, ymd=n + 1,
hms=n + 4, tz=n + 7)
self._group_index += 7
elif type == 'tg':
s = r'(\d{1,2}[-/](\d{1,2}|%s)[-/]\d{4})(\s+%s)?%s?%s?' % (
ALL_MONTHS_PAT, TIME_PAT, AM_PAT, TZ_PAT)
n = self._group_index
self._type_conversions[group] = partial(date_convert, dmy=n + 1,
hms=n + 5, am=n + 8, tz=n + 9)
self._group_index += 9
elif type == 'ta':
s = r'((\d{1,2}|%s)[-/]\d{1,2}[-/]\d{4})(\s+%s)?%s?%s?' % (
ALL_MONTHS_PAT, TIME_PAT, AM_PAT, TZ_PAT)
n = self._group_index
self._type_conversions[group] = partial(date_convert, mdy=n + 1,
hms=n + 5, am=n + 8, tz=n + 9)
self._group_index += 9
elif type == 'te':
# this will allow microseconds through if they're present, but meh
s = r'(%s,\s+)?(\d{1,2}\s+%s\s+\d{4})\s+%s%s' % (DAYS_PAT,
MONTHS_PAT, TIME_PAT, TZ_PAT)
n = self._group_index
self._type_conversions[group] = partial(date_convert, dmy=n + 3,
hms=n + 5, tz=n + 8)
self._group_index += 8
elif type == 'th':
# slight flexibility here from the stock Apache format
s = r'(\d{1,2}[-/]%s[-/]\d{4}):%s%s' % (MONTHS_PAT, TIME_PAT,
TZ_PAT)
n = self._group_index
self._type_conversions[group] = partial(date_convert, dmy=n + 1,
hms=n + 3, tz=n + 6)
self._group_index += 6
elif type == 'tc':
s = r'(%s)\s+%s\s+(\d{1,2})\s+%s\s+(\d{4})' % (
DAYS_PAT, MONTHS_PAT, TIME_PAT)
n = self._group_index
self._type_conversions[group] = partial(date_convert,
d_m_y=(n + 4, n + 3, n + 8), hms=n + 5)
self._group_index += 8
elif type == 'tt':
s = r'%s?%s?%s?' % (TIME_PAT, AM_PAT, TZ_PAT)
n = self._group_index
self._type_conversions[group] = partial(date_convert, hms=n + 1,
am=n + 4, tz=n + 5)
self._group_index += 5
elif type == 'ts':
s = r'%s(\s+)(\d+)(\s+)(\d{1,2}:\d{1,2}:\d{1,2})?' % (MONTHS_PAT)
n = self._group_index
self._type_conversions[group] = partial(date_convert, mm=n+1, dd=n+3,
hms=n + 5)
self._group_index += 5
elif type:
s = r'\%s+' % type
else:
s = '.+?'
align = format['align']
fill = format['fill']
# handle some numeric-specific things like fill and sign
if is_numeric:
# prefix with something (align "=" trumps zero)
if align == '=':
# special case - align "=" acts like the zero above but with
# configurable fill defaulting to "0"
if not fill:
fill = '0'
s = '%s*' % fill + s
elif format['zero']:
s = '0*' + s
# allow numbers to be prefixed with a sign
s = r'[-+ ]?' + s
if not fill:
fill = ' '
# Place into a group now - this captures the value we want to keep.
# Everything else from now is just padding to be stripped off
if wrap:
s = wrap % s
self._group_index += 1
if format['width']:
# all we really care about is that if the format originally
# specified a width then there will probably be padding - without
# an explicit alignment that'll mean right alignment with spaces
# padding
if not align:
align = '>'
if fill in '.\+?*[](){}^$':
fill = '\\' + fill
# align "=" has been handled
if align == '<':
s = '%s%s*' % (s, fill)
elif align == '>':
s = '%s*%s' % (fill, s)
elif align == '^':
s = '%s*%s%s*' % (fill, s, fill)
return s
class Result(object):
'''The result of a parse() or search().
Fixed results may be looked up using result[index]. Named results may be
looked up using result['name'].
'''
def __init__(self, fixed, named, spans):
self.fixed = fixed
self.named = named
self.spans = spans
def __getitem__(self, item):
if isinstance(item, int):
return self.fixed[item]
return self.named[item]
def __repr__(self):
return '<%s %r %r>' % (self.__class__.__name__, self.fixed,
self.named)
class Match(object):
'''The result of a parse() or search() if no results are generated.
This class is only used to expose internal used regex match objects
to the user and use them for external Parser.evaluate_result calls.
'''
def __init__(self, parser, match):
self.parser = parser
self.match = match
def evaluate_result(self):
'''Generate results for this Match'''
return self.parser.evaluate_result(self.match)
class ResultIterator(object):
'''The result of a findall() operation.
Each element is a Result instance.
'''
def __init__(self, parser, string, pos, endpos, evaluate_result=True):
self.parser = parser
self.string = string
self.pos = pos
self.endpos = endpos
self.evaluate_result = evaluate_result
def __iter__(self):
return self
def __next__(self):
m = self.parser._search_re.search(self.string, self.pos, self.endpos)
if m is None:
raise StopIteration()
self.pos = m.end()
if self.evaluate_result:
return self.parser.evaluate_result(m)
else:
return Match(self.parser, m)
# pre-py3k compat
next = __next__
def parse(format, string, extra_types={}, evaluate_result=True):
'''Using "format" attempt to pull values from "string".
The format must match the string contents exactly. If the value
you're looking for is instead just a part of the string use
search().
If ``evaluate_result`` is True the return value will be an Result instance with two attributes:
.fixed - tuple of fixed-position values from the string
.named - dict of named values from the string
If ``evaluate_result`` is False the return value will be a Match instance with one method:
.evaluate_result() - This will return a Result instance like you would get
with ``evaluate_result`` set to True
If the format is invalid a ValueError will be raised.
See the module documentation for the use of "extra_types".
In the case there is no match parse() will return None.
'''
return Parser(format, extra_types=extra_types).parse(string, evaluate_result=evaluate_result)
def search(format, string, pos=0, endpos=None, extra_types={}, evaluate_result=True):
'''Search "string" for the first occurance of "format".
The format may occur anywhere within the string. If
instead you wish for the format to exactly match the string
use parse().
Optionally start the search at "pos" character index and limit the search
to a maximum index of endpos - equivalent to search(string[:endpos]).
If ``evaluate_result`` is True the return value will be an Result instance with two attributes:
.fixed - tuple of fixed-position values from the string
.named - dict of named values from the string
If ``evaluate_result`` is False the return value will be a Match instance with one method:
.evaluate_result() - This will return a Result instance like you would get
with ``evaluate_result`` set to True
If the format is invalid a ValueError will be raised.
See the module documentation for the use of "extra_types".
In the case there is no match parse() will return None.
'''
return Parser(format, extra_types=extra_types).search(string, pos, endpos, evaluate_result=evaluate_result)
def findall(format, string, pos=0, endpos=None, extra_types={}, evaluate_result=True):
'''Search "string" for the all occurrances of "format".
You will be returned an iterator that holds Result instances
for each format match found.
Optionally start the search at "pos" character index and limit the search
to a maximum index of endpos - equivalent to search(string[:endpos]).
If ``evaluate_result`` is True each returned Result instance has two attributes:
.fixed - tuple of fixed-position values from the string
.named - dict of named values from the string
If ``evaluate_result`` is False each returned value is a Match instance with one method:
.evaluate_result() - This will return a Result instance like you would get
with ``evaluate_result`` set to True
If the format is invalid a ValueError will be raised.
See the module documentation for the use of "extra_types".
'''
return Parser(format, extra_types=extra_types).findall(string, pos, endpos, evaluate_result=evaluate_result)
def compile(format, extra_types={}):
'''Create a Parser instance to parse "format".
The resultant Parser has a method .parse(string) which
behaves in the same manner as parse(format, string).
Use this function if you intend to parse many strings
with the same format.
See the module documentation for the use of "extra_types".
Returns a Parser instance.
'''
return Parser(format, extra_types=extra_types)
# Copyright (c) 2012-2013 Richard Jones <richard@python.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# vim: set filetype=python ts=4 sw=4 et si tw=75
| nateprewitt/pipenv | pipenv/vendor/parse.py | Python | mit | 43,969 |
# -*- coding: utf-8 -*-
# borrowed from https://github.com/kevinhendricks/KindleUnpack and modified
from __future__ import unicode_literals, division, absolute_import, print_function
import struct
import string
import re
from PIL import Image
from io import BytesIO
# note: struct pack, unpack, unpack_from all require bytestring format
# data all the way up to at least python 2.7.5, python 3 okay with bytestring
from .unipath import pathof
# important pdb header offsets
unique_id_seed = 68
number_of_pdb_records = 76
# important palmdoc header offsets
book_length = 4
book_record_count = 8
first_pdb_record = 78
# important rec0 offsets
length_of_book = 4
mobi_header_base = 16
mobi_header_length = 20
mobi_type = 24
mobi_version = 36
first_non_text = 80
title_offset = 84
first_resc_record = 108
first_content_index = 192
last_content_index = 194
kf8_fdst_index = 192 # for KF8 mobi headers
fcis_index = 200
flis_index = 208
srcs_index = 224
srcs_count = 228
primary_index = 244
datp_index = 256
huffoff = 112
hufftbloff = 120
# rupor
exth_asin = 113
exth_cover_offset = 201
exth_thumb_offset = 202
exth_thumbnail_uri = 129
exth_cdetype = 501
exth_cdecontentkey = 504
def to_base(num, base=32, min_num_digits=None):
digits = string.digits + string.ascii_uppercase
sign = 1 if num >= 0 else -1
if num == 0:
return ('0' if min_num_digits is None else '0' * min_num_digits)
num *= sign
ans = []
while num:
ans.append(digits[(num % base)])
num //= base
if min_num_digits is not None and len(ans) < min_num_digits:
ans.extend('0' * (min_num_digits - len(ans)))
if sign < 0:
ans.append('-')
ans.reverse()
return ''.join(ans)
def getint(datain, ofs, sz=b'L'):
i, = struct.unpack_from(b'>' + sz, datain, ofs)
return i
def writeint(datain, ofs, n, len=b'L'):
if len == b'L':
return datain[:ofs] + struct.pack(b'>L', n) + datain[ofs + 4:]
else:
return datain[:ofs] + struct.pack(b'>H', n) + datain[ofs + 2:]
def getsecaddr(datain, secno):
nsec = getint(datain, number_of_pdb_records, b'H')
assert secno >= 0 & secno < nsec, 'secno %d out of range (nsec=%d)' % (secno, nsec)
secstart = getint(datain, first_pdb_record + secno * 8)
if secno == nsec - 1:
secend = len(datain)
else:
secend = getint(datain, first_pdb_record + (secno + 1) * 8)
return secstart, secend
def readsection(datain, secno):
secstart, secend = getsecaddr(datain, secno)
return datain[secstart:secend]
def writesection(datain, secno, secdata): # overwrite, accounting for different length
# dataout = deletesectionrange(datain,secno, secno)
# return insertsection(dataout, secno, secdata)
datalst = []
nsec = getint(datain, number_of_pdb_records, b'H')
zerosecstart, zerosecend = getsecaddr(datain, 0)
secstart, secend = getsecaddr(datain, secno)
dif = len(secdata) - (secend - secstart)
datalst.append(datain[:unique_id_seed])
datalst.append(struct.pack(b'>L', 2 * nsec + 1))
datalst.append(datain[unique_id_seed + 4:number_of_pdb_records])
datalst.append(struct.pack(b'>H', nsec))
newstart = zerosecstart
for i in range(0, secno):
ofs, flgval = struct.unpack_from(b'>2L', datain, first_pdb_record + i * 8)
datalst.append(struct.pack(b'>L', ofs) + struct.pack(b'>L', flgval))
datalst.append(struct.pack(b'>L', secstart) + struct.pack(b'>L', (2 * secno)))
for i in range(secno + 1, nsec):
ofs, flgval = struct.unpack_from(b'>2L', datain, first_pdb_record + i * 8)
ofs = ofs + dif
datalst.append(struct.pack(b'>L', ofs) + struct.pack(b'>L', flgval))
lpad = newstart - (first_pdb_record + 8 * nsec)
if lpad > 0:
datalst.append(b'\0' * lpad)
datalst.append(datain[zerosecstart:secstart])
datalst.append(secdata)
datalst.append(datain[secend:])
dataout = b''.join(datalst)
return dataout
def nullsection(datain, secno): # make it zero-length without deleting it
datalst = []
nsec = getint(datain, number_of_pdb_records, b'H')
secstart, secend = getsecaddr(datain, secno)
zerosecstart, zerosecend = getsecaddr(datain, 0)
dif = secend - secstart
datalst.append(datain[:first_pdb_record])
for i in range(0, secno + 1):
ofs, flgval = struct.unpack_from(b'>2L', datain, first_pdb_record + i * 8)
datalst.append(struct.pack(b'>L', ofs) + struct.pack(b'>L', flgval))
for i in range(secno + 1, nsec):
ofs, flgval = struct.unpack_from(b'>2L', datain, first_pdb_record + i * 8)
ofs = ofs - dif
datalst.append(struct.pack(b'>L', ofs) + struct.pack(b'>L', flgval))
lpad = zerosecstart - (first_pdb_record + 8 * nsec)
if lpad > 0:
datalst.append(b'\0' * lpad)
datalst.append(datain[zerosecstart: secstart])
datalst.append(datain[secend:])
dataout = b''.join(datalst)
return dataout
def deletesectionrange(datain, firstsec, lastsec): # delete a range of sections
datalst = []
firstsecstart, firstsecend = getsecaddr(datain, firstsec)
lastsecstart, lastsecend = getsecaddr(datain, lastsec)
zerosecstart, zerosecend = getsecaddr(datain, 0)
dif = lastsecend - firstsecstart + 8 * (lastsec - firstsec + 1)
nsec = getint(datain, number_of_pdb_records, b'H')
datalst.append(datain[:unique_id_seed])
datalst.append(struct.pack(b'>L', 2 * (nsec - (lastsec - firstsec + 1)) + 1))
datalst.append(datain[unique_id_seed + 4:number_of_pdb_records])
datalst.append(struct.pack(b'>H', nsec - (lastsec - firstsec + 1)))
newstart = zerosecstart - 8 * (lastsec - firstsec + 1)
for i in range(0, firstsec):
ofs, flgval = struct.unpack_from(b'>2L', datain, first_pdb_record + i * 8)
ofs = ofs - 8 * (lastsec - firstsec + 1)
datalst.append(struct.pack(b'>L', ofs) + struct.pack(b'>L', flgval))
for i in range(lastsec + 1, nsec):
ofs, flgval = struct.unpack_from(b'>2L', datain, first_pdb_record + i * 8)
ofs = ofs - dif
flgval = 2 * (i - (lastsec - firstsec + 1))
datalst.append(struct.pack(b'>L', ofs) + struct.pack(b'>L', flgval))
lpad = newstart - (first_pdb_record + 8 * (nsec - (lastsec - firstsec + 1)))
if lpad > 0:
datalst.append(b'\0' * lpad)
datalst.append(datain[zerosecstart:firstsecstart])
datalst.append(datain[lastsecend:])
dataout = b''.join(datalst)
return dataout
def insertsection(datain, secno, secdata): # insert a new section
datalst = []
nsec = getint(datain, number_of_pdb_records, b'H')
# print("inserting secno" , secno, "into" ,nsec, "sections")
secstart, secend = getsecaddr(datain, secno)
zerosecstart, zerosecend = getsecaddr(datain, 0)
dif = len(secdata)
datalst.append(datain[:unique_id_seed])
datalst.append(struct.pack(b'>L', 2 * (nsec + 1) + 1))
datalst.append(datain[unique_id_seed + 4:number_of_pdb_records])
datalst.append(struct.pack(b'>H', nsec + 1))
newstart = zerosecstart + 8
for i in range(0, secno):
ofs, flgval = struct.unpack_from(b'>2L', datain, first_pdb_record + i * 8)
ofs += 8
datalst.append(struct.pack(b'>L', ofs) + struct.pack(b'>L', flgval))
datalst.append(struct.pack(b'>L', secstart + 8) + struct.pack(b'>L', (2 * secno)))
for i in range(secno, nsec):
ofs, flgval = struct.unpack_from(b'>2L', datain, first_pdb_record + i * 8)
ofs = ofs + dif + 8
flgval = 2 * (i + 1)
datalst.append(struct.pack(b'>L', ofs) + struct.pack(b'>L', flgval))
lpad = newstart - (first_pdb_record + 8 * (nsec + 1))
if lpad > 0:
datalst.append(b'\0' * lpad)
datalst.append(datain[zerosecstart:secstart])
datalst.append(secdata)
datalst.append(datain[secstart:])
dataout = b''.join(datalst)
return dataout
def insertsectionrange(sectionsource, firstsec, lastsec, sectiontarget, targetsec): # insert a range of sections
datalst = []
nsec = getint(sectiontarget, number_of_pdb_records, b'H')
zerosecstart, zerosecend = getsecaddr(sectiontarget, 0)
insstart, nul = getsecaddr(sectiontarget, targetsec)
nins = lastsec - firstsec + 1
srcstart, nul = getsecaddr(sectionsource, firstsec)
nul, srcend = getsecaddr(sectionsource, lastsec)
newstart = zerosecstart + 8 * nins
datalst.append(sectiontarget[:unique_id_seed])
datalst.append(struct.pack(b'>L', 2 * (nsec + nins) + 1))
datalst.append(sectiontarget[unique_id_seed + 4:number_of_pdb_records])
datalst.append(struct.pack(b'>H', nsec + nins))
for i in range(0, targetsec):
ofs, flgval = struct.unpack_from(b'>2L', sectiontarget, first_pdb_record + i * 8)
ofsnew = ofs + 8 * nins
flgvalnew = flgval
datalst.append(struct.pack(b'>L', ofsnew) + struct.pack(b'>L', flgvalnew))
# print(ofsnew, flgvalnew, ofs, flgval)
srcstart0, nul = getsecaddr(sectionsource, firstsec)
for i in range(nins):
isrcstart, nul = getsecaddr(sectionsource, firstsec + i)
ofsnew = insstart + (isrcstart - srcstart0) + 8 * nins
flgvalnew = 2 * (targetsec + i)
datalst.append(struct.pack(b'>L', ofsnew) + struct.pack(b'>L', flgvalnew))
# print(ofsnew, flgvalnew)
dif = srcend - srcstart
for i in range(targetsec, nsec):
ofs, flgval = struct.unpack_from(b'>2L', sectiontarget, first_pdb_record + i * 8)
ofsnew = ofs + dif + 8 * nins
flgvalnew = 2 * (i + nins)
datalst.append(struct.pack(b'>L', ofsnew) + struct.pack(b'>L', flgvalnew))
# print(ofsnew, flgvalnew, ofs, flgval)
lpad = newstart - (first_pdb_record + 8 * (nsec + nins))
if lpad > 0:
datalst.append(b'\0' * lpad)
datalst.append(sectiontarget[zerosecstart:insstart])
datalst.append(sectionsource[srcstart:srcend])
datalst.append(sectiontarget[insstart:])
dataout = b''.join(datalst)
return dataout
def get_exth_params(rec0):
ebase = mobi_header_base + getint(rec0, mobi_header_length)
elen = getint(rec0, ebase + 4)
enum = getint(rec0, ebase + 8)
return ebase, elen, enum
def add_exth(rec0, exth_num, exth_bytes):
ebase, elen, enum = get_exth_params(rec0)
newrecsize = 8 + len(exth_bytes)
newrec0 = rec0[0:ebase + 4] + struct.pack(b'>L', elen + newrecsize) + struct.pack(b'>L', enum + 1) + \
struct.pack(b'>L', exth_num) + struct.pack(b'>L', newrecsize) + exth_bytes + rec0[ebase + 12:]
newrec0 = writeint(newrec0, title_offset, getint(newrec0, title_offset) + newrecsize)
return newrec0
def read_exth(rec0, exth_num):
exth_values = []
ebase, elen, enum = get_exth_params(rec0)
ebase = ebase + 12
while enum > 0:
exth_id = getint(rec0, ebase)
if exth_id == exth_num:
# We might have multiple exths, so build a list.
exth_values.append(rec0[ebase + 8:ebase + getint(rec0, ebase + 4)])
enum = enum - 1
ebase = ebase + getint(rec0, ebase + 4)
return exth_values
def write_exth(rec0, exth_num, exth_bytes):
ebase, elen, enum = get_exth_params(rec0)
ebase_idx = ebase + 12
enum_idx = enum
while enum_idx > 0:
exth_id = getint(rec0, ebase_idx)
if exth_id == exth_num:
dif = len(exth_bytes) + 8 - getint(rec0, ebase_idx + 4)
newrec0 = rec0
if dif != 0:
newrec0 = writeint(newrec0, title_offset, getint(newrec0, title_offset) + dif)
return newrec0[:ebase + 4] + struct.pack(b'>L', elen + len(exth_bytes) + 8 - getint(rec0, ebase_idx + 4)) + \
struct.pack(b'>L', enum) + rec0[ebase + 12:ebase_idx + 4] + \
struct.pack(b'>L', len(exth_bytes) + 8) + exth_bytes + \
rec0[ebase_idx + getint(rec0, ebase_idx + 4):]
enum_idx = enum_idx - 1
ebase_idx = ebase_idx + getint(rec0, ebase_idx + 4)
return rec0
def del_exth(rec0, exth_num):
ebase, elen, enum = get_exth_params(rec0)
ebase_idx = ebase + 12
enum_idx = 0
while enum_idx < enum:
exth_id = getint(rec0, ebase_idx)
exth_size = getint(rec0, ebase_idx + 4)
if exth_id == exth_num:
newrec0 = rec0
newrec0 = writeint(newrec0, title_offset, getint(newrec0, title_offset) - exth_size)
newrec0 = newrec0[:ebase_idx] + newrec0[ebase_idx + exth_size:]
newrec0 = newrec0[0:ebase + 4] + struct.pack(b'>L', elen - exth_size) + struct.pack(b'>L', enum - 1) + newrec0[ebase + 12:]
return newrec0
enum_idx += 1
ebase_idx = ebase_idx + exth_size
return rec0
class mobi_split:
def __init__(self, infile, document_id, remove_personal_label, format):
if format == 'mobi':
datain = b''
with open(pathof(infile), 'rb') as f:
datain = f.read()
datain_rec0 = readsection(datain, 0)
ver = getint(datain_rec0, mobi_version)
self.combo = (ver != 8)
if not self.combo:
return
exth121 = read_exth(datain_rec0, 121)
if len(exth121) == 0:
self.combo = False
return
else:
# only pay attention to first exth121
# (there should only be one)
datain_kf8, = struct.unpack_from(b'>L', exth121[0], 0)
if datain_kf8 == 0xffffffff:
self.combo = False
return
datain_kfrec0 = readsection(datain, datain_kf8)
self.result_file = bytearray(datain)
# check if there are SRCS records and reduce them
srcs = getint(datain_rec0, srcs_index)
num_srcs = getint(datain_rec0, srcs_count)
if srcs != 0xffffffff and num_srcs > 0:
for i in range(srcs, srcs + num_srcs):
self.result_file = nullsection(self.result_file, i)
datain_rec0 = writeint(datain_rec0, srcs_index, 0xffffffff)
datain_rec0 = writeint(datain_rec0, srcs_count, 0)
if remove_personal_label:
datain_rec0 = add_exth(datain_rec0, exth_cdetype, b"EBOK");
exth = read_exth(datain_rec0, exth_asin)
if len(exth) == 0:
datain_rec0 = add_exth(datain_rec0, exth_asin, bytes(to_base(document_id.int, base=32, min_num_digits=10), 'ascii'))
# exth = read_exth(datain_rec0, exth_cdecontentkey)
# if len(exth) == 0:
# datain_rec0 = add_exth(datain_rec0, exth_cdecontentkey, bytes(to_base(document_id.int, base=32, min_num_digits=10), 'ascii'))
self.result_file = writesection(self.result_file, 0, datain_rec0)
firstimage = getint(datain_rec0, first_resc_record)
# Only keep the correct EXTH 116 StartOffset, KG 2.5 carries over the one from the mobi7 part, which then points at garbage in the mobi8 part, and confuses FW 3.4
kf8starts = read_exth(datain_kfrec0, 116)
# If we have multiple StartOffset, keep only the last one
kf8start_count = len(kf8starts)
while kf8start_count > 1:
kf8start_count -= 1
datain_kfrec0 = del_exth(datain_kfrec0, 116)
exth_cover = read_exth(datain_kfrec0, exth_cover_offset)
if len(exth_cover) > 0:
cover_index, = struct.unpack_from('>L', exth_cover[0], 0)
cover_index += firstimage
else:
cover_index = 0xffffffff
exth_thumb = read_exth(datain_kfrec0, exth_thumb_offset)
if len(exth_thumb) > 0:
thumb_index, = struct.unpack_from('>L', exth_thumb[0], 0)
thumb_index += firstimage
else:
thumb_index = 0xffffffff
if cover_index != 0xffffffff:
if thumb_index != 0xffffffff:
# make sure embedded thumbnail has the right size
cover_image = readsection(datain, cover_index)
thumb = BytesIO()
im = Image.open(BytesIO(cover_image))
im.thumbnail((330, 470), Image.ANTIALIAS)
im.save(thumb, format=im.format)
self.result_file = writesection(self.result_file, thumb_index, thumb.getvalue())
else:
# if nothing works - fall back to the old trick, set thumbnail to the cover image
datain_kfrec0 = add_exth(datain_kfrec0, exth_thumb_offset, exth_cover[0])
thumb_index = cover_index
exth = read_exth(datain_kfrec0, exth_thumbnail_uri)
if len(exth) > 0:
datain_kfrec0 = del_exth(datain_kfrec0, exth_thumbnail_uri)
datain_kfrec0 = add_exth(datain_kfrec0, exth_thumbnail_uri, bytes('kindle:embed:%s' % (to_base(thumb_index - firstimage, base=32, min_num_digits=4)), 'ascii'))
if remove_personal_label:
datain_kfrec0 = add_exth(datain_kfrec0, exth_cdetype, b"EBOK");
# exth = read_exth(datain_kfrec0, exth_asin)
# if len(exth) == 0:
# datain_kfrec0 = add_exth(datain_kfrec0, exth_asin, bytes(to_base(document_id.int, base=32, min_num_digits=10), 'ascii'))
exth = read_exth(datain_kfrec0, exth_cdecontentkey)
if len(exth) == 0:
datain_kfrec0 = add_exth(datain_kfrec0, exth_cdecontentkey, bytes(to_base(document_id.int, base=32, min_num_digits=10), 'ascii'))
self.result_file = writesection(self.result_file, datain_kf8, datain_kfrec0)
elif format == 'azw3':
datain = b''
with open(pathof(infile), 'rb') as f:
datain = f.read()
datain_rec0 = readsection(datain, 0)
ver = getint(datain_rec0, mobi_version)
self.combo = (ver != 8)
if not self.combo:
return
exth121 = read_exth(datain_rec0, 121)
if len(exth121) == 0:
self.combo = False
return
else:
# only pay attention to first exth121
# (there should only be one)
datain_kf8, = struct.unpack_from(b'>L', exth121[0], 0)
if datain_kf8 == 0xffffffff:
self.combo = False
return
datain_kfrec0 = readsection(datain, datain_kf8)
# create the standalone mobi7
num_sec = getint(datain, number_of_pdb_records, b'H')
# remove BOUNDARY up to but not including ELF record
self.result_file7 = deletesectionrange(datain, datain_kf8 - 1, num_sec - 2)
# check if there are SRCS records and delete them
srcs = getint(datain_rec0, srcs_index)
num_srcs = getint(datain_rec0, srcs_count)
if srcs != 0xffffffff and num_srcs > 0:
self.result_file7 = deletesectionrange(self.result_file7, srcs, srcs + num_srcs - 1)
datain_rec0 = writeint(datain_rec0, srcs_index, 0xffffffff)
datain_rec0 = writeint(datain_rec0, srcs_count, 0)
# reset the EXTH 121 KF8 Boundary meta data to 0xffffffff
datain_rec0 = write_exth(datain_rec0, 121, struct.pack(b'>L', 0xffffffff))
# datain_rec0 = del_exth(datain_rec0,121)
# datain_rec0 = del_exth(datain_rec0,534)
# don't remove the EXTH 125 KF8 Count of Resources, seems to be present in mobi6 files as well
# set the EXTH 129 KF8 Masthead / Cover Image string to the null string
datain_rec0 = write_exth(datain_rec0, 129, b'')
# don't remove the EXTH 131 KF8 Unidentified Count, seems to be present in mobi6 files as well
# need to reset flags stored in 0x80-0x83
# old mobi with exth: 0x50, mobi7 part with exth: 0x1850, mobi8 part with exth: 0x1050
# Bit Flags
# 0x1000 = Bit 12 indicates if embedded fonts are used or not
# 0x0800 = means this Header points to *shared* images/resource/fonts ??
# 0x0080 = unknown new flag, why is this now being set by Kindlegen 2.8?
# 0x0040 = exth exists
# 0x0010 = Not sure but this is always set so far
fval, = struct.unpack_from(b'>L', datain_rec0, 0x80)
# need to remove flag 0x0800 for KindlePreviewer 2.8 and unset Bit 12 for embedded fonts
fval = fval & 0x07FF
datain_rec0 = datain_rec0[:0x80] + struct.pack(b'>L', fval) + datain_rec0[0x84:]
self.result_file7 = writesection(self.result_file7, 0, datain_rec0)
firstimage = getint(datain_rec0, first_resc_record)
lastimage = getint(datain_rec0, last_content_index, b'H')
# print("Old First Image, last Image", firstimage,lastimage)
if lastimage == 0xffff:
# find the lowest of the next sections and copy up to that.
ofs_list = [(fcis_index, b'L'), (flis_index, b'L'), (datp_index, b'L'), (hufftbloff, b'L')]
for ofs, sz in ofs_list:
n = getint(datain_rec0, ofs, sz)
# print("n",n)
if n > 0 and n < lastimage:
lastimage = n - 1
# print("First Image, last Image", firstimage,lastimage)
# Try to null out FONT and RES, but leave the (empty) PDB record so image refs remain valid
for i in range(firstimage, lastimage):
imgsec = readsection(self.result_file7, i)
if imgsec[0:4] in [b'RESC', b'FONT']:
self.result_file7 = nullsection(self.result_file7, i)
# mobi7 finished
# create standalone mobi8
self.result_file8 = deletesectionrange(datain, 0, datain_kf8 - 1)
target = getint(datain_kfrec0, first_resc_record)
self.result_file8 = insertsectionrange(datain, firstimage, lastimage, self.result_file8, target)
datain_kfrec0 = readsection(self.result_file8, 0)
# Only keep the correct EXTH 116 StartOffset, KG 2.5 carries over the one from the mobi7 part, which then points at garbage in the mobi8 part, and confuses FW 3.4
kf8starts = read_exth(datain_kfrec0, 116)
# If we have multiple StartOffset, keep only the last one
kf8start_count = len(kf8starts)
while kf8start_count > 1:
kf8start_count -= 1
datain_kfrec0 = del_exth(datain_kfrec0, 116)
# update the EXTH 125 KF8 Count of Images/Fonts/Resources
datain_kfrec0 = write_exth(datain_kfrec0, 125, struct.pack(b'>L', lastimage - firstimage + 1))
# need to reset flags stored in 0x80-0x83
# old mobi with exth: 0x50, mobi7 part with exth: 0x1850, mobi8 part with exth: 0x1050
# standalone mobi8 with exth: 0x0050
# Bit Flags
# 0x1000 = Bit 12 indicates if embedded fonts are used or not
# 0x0800 = means this Header points to *shared* images/resource/fonts ??
# 0x0080 = unknown new flag, why is this now being set by Kindlegen 2.8?
# 0x0040 = exth exists
# 0x0010 = Not sure but this is always set so far
fval, = struct.unpack_from('>L', datain_kfrec0, 0x80)
fval = fval & 0x1FFF
fval |= 0x0800
datain_kfrec0 = datain_kfrec0[:0x80] + struct.pack(b'>L', fval) + datain_kfrec0[0x84:]
# properly update other index pointers that have been shifted by the insertion of images
ofs_list = [(kf8_fdst_index, b'L'), (fcis_index, b'L'), (flis_index, b'L'), (datp_index, b'L'), (hufftbloff, b'L')]
for ofs, sz in ofs_list:
n = getint(datain_kfrec0, ofs, sz)
if n != 0xffffffff:
datain_kfrec0 = writeint(datain_kfrec0, ofs, n + lastimage - firstimage + 1, sz)
exth_cover = read_exth(datain_kfrec0, exth_cover_offset)
if len(exth_cover) > 0:
cover_index, = struct.unpack_from('>L', exth_cover[0], 0)
cover_index += target
else:
cover_index = 0xffffffff
exth_thumb = read_exth(datain_kfrec0, exth_thumb_offset)
if len(exth_thumb) > 0:
thumb_index, = struct.unpack_from('>L', exth_thumb[0], 0)
thumb_index += target
else:
thumb_index = 0xffffffff
if cover_index != 0xffffffff:
if thumb_index != 0xffffffff:
# make sure embedded thumbnail has the right size
cover_image = readsection(self.result_file8, cover_index)
thumb = BytesIO()
im = Image.open(BytesIO(cover_image))
im.thumbnail((330, 470), Image.ANTIALIAS)
im.save(thumb, format=im.format)
self.result_file8 = writesection(self.result_file8, thumb_index, thumb.getvalue())
else:
# if nothing works - fall back to the old trick, set thumbnail to the cover image
datain_kfrec0 = add_exth(datain_kfrec0, exth_thumb_offset, exth_cover[0])
thumb_index = cover_index
exth = read_exth(datain_kfrec0, exth_thumbnail_uri)
if len(exth) > 0:
datain_kfrec0 = del_exth(datain_kfrec0, exth_thumbnail_uri)
datain_kfrec0 = add_exth(datain_kfrec0, exth_thumbnail_uri, bytes('kindle:embed:%s' % (to_base(thumb_index - target, base=32, min_num_digits=4)), 'ascii'))
if remove_personal_label:
datain_kfrec0 = add_exth(datain_kfrec0, exth_cdetype, b"EBOK");
else:
datain_kfrec0 = add_exth(datain_kfrec0, exth_cdetype, b"PDOC");
# exth = read_exth(datain_kfrec0, exth_asin)
# if len(exth) == 0:
# datain_kfrec0 = add_exth(datain_kfrec0, exth_asin, bytes(to_base(document_id.int, base=32, min_num_digits=10), 'ascii'))
exth = read_exth(datain_kfrec0, exth_cdecontentkey)
if len(exth) == 0:
datain_kfrec0 = add_exth(datain_kfrec0, exth_cdecontentkey, bytes(to_base(document_id.int, base=32, min_num_digits=10), 'ascii'))
self.result_file8 = writesection(self.result_file8, 0, datain_kfrec0)
# mobi8 finished
def getResult(self):
return self.result_file
def getResult8(self):
return self.result_file8
def getResult7(self):
return self.result_file7
class mobi_read:
def __init__(self, infile, width=330, height=470, stretch=False):
self.asin = ''
self.cdetype = 'PDOC'
self.cdecontentkey = ''
self.acr = ''
self.thumbnail = None
self.pagedata = b''
datain = b''
with open(pathof(infile), 'rb') as f:
self.acr = re.sub('[^-A-Za-z0-9 ]+', '_', f.read(32).replace(b'\x00', b'').decode('latin-1'))
f.seek(0)
datain = f.read()
datain_rec0 = readsection(datain, 0)
exth121 = read_exth(datain_rec0, 121)
self.combo = True
if len(exth121) == 0:
self.combo = False
else:
# only pay attention to first exth121
# (there should only be one)
datain_kf8, = struct.unpack_from(b'>L', exth121[0], 0)
if datain_kf8 == 0xffffffff:
self.combo = False
# Look for PageMap
srcs = getint(datain_rec0, first_non_text)
num_srcs = getint(datain, number_of_pdb_records, b'H')
if srcs != 0xffffffff and num_srcs > 0:
for i in range(srcs, srcs + num_srcs):
data = readsection(datain, i)
if data[0:4] == b"PAGE":
self.pagedata = data
exth = read_exth(datain_rec0, exth_asin)
if len(exth) > 0:
self.asin = exth[0].decode("ascii")
exth = read_exth(datain_rec0, exth_cdetype)
if len(exth) > 0:
self.cdetype = exth[0].decode("ascii")
exth = read_exth(datain_rec0, exth_cdecontentkey)
if len(exth) > 0:
self.cdecontentkey = exth[0].decode("ascii")
firstimage = getint(datain_rec0, first_resc_record)
exth_cover = read_exth(datain_rec0, exth_cover_offset)
if len(exth_cover) > 0:
cover_index, = struct.unpack_from('>L', exth_cover[0], 0)
cover_index += firstimage
else:
cover_index = 0xffffffff
exth_thumb = read_exth(datain_rec0, exth_thumb_offset)
if len(exth_thumb) > 0:
thumb_index, = struct.unpack_from('>L', exth_thumb[0], 0)
thumb_index += firstimage
else:
thumb_index = 0xffffffff
if cover_index != 0xffffffff:
w, h = 0, 0
if thumb_index != 0xffffffff:
image = readsection(datain, thumb_index)
self.thumbnail = Image.open(BytesIO(image))
w, h = self.thumbnail.size
if (w < width and h < height) or stretch:
image = readsection(datain, cover_index)
self.thumbnail = Image.open(BytesIO(image))
if stretch:
self.thumbnail = self.thumbnail.resize((width, height), Image.LANCZOS)
else:
self.thumbnail.thumbnail((width, height), Image.LANCZOS)
if self.combo:
# always try to use information from KF8 part
datain_kfrec0 = readsection(datain, datain_kf8)
exth = read_exth(datain_kfrec0, exth_asin)
if len(exth) > 0:
self.asin = exth[0].decode("ascii")
exth = read_exth(datain_kfrec0, exth_cdetype)
if len(exth) > 0:
self.cdetype = exth[0].decode("ascii")
exth = read_exth(datain_kfrec0, exth_cdecontentkey)
if len(exth) > 0:
self.cdecontentkey = exth[0].decode("ascii")
def getACR(self):
return self.acr
def getASIN(self):
return self.asin
def getCdeType(self):
return self.cdetype
def getCdeContentKey(self):
return self.cdecontentkey
def getPageData(self):
return self.pagedata
def getThumbnail(self):
return self.thumbnail
| rupor-github/fb2mobi | modules/mobi_split.py | Python | mit | 30,736 |
#Example mathlocal.py
from math import sin # sin is imported as local
print sin(0.5)
| csparkresearch/eyes-online | app/static/scripts/Maths/mathlocal.py | Python | gpl-3.0 | 90 |
"""
@file
@brief Data about timeseries.
"""
from datetime import datetime, timedelta
import numpy
def generate_sells(duration=730, end=None,
week_coef=None, month_coef=None,
trend=1.1):
"""
Generates dummy data and trends and seasonality.
"""
if week_coef is None:
week_coef = numpy.array([0.1, 0.12, 0.12, 0.15, 0.20, 0., 0.])
week_coef[5] = 1. - week_coef.sum()
if month_coef is None:
month_coef = [0.8, 1, 1, 1, 1, 1,
0.8, 0.6, 1, 1, 1, 1.5]
month_coef = numpy.array(month_coef)
month_coef /= month_coef.sum()
if end is None:
end = datetime.now()
begin = end - timedelta(duration)
day = timedelta(1)
rows = []
rnd = (numpy.random.randn(duration + 1) * 0.1) + 1
exp = (1 + numpy.exp(- numpy.arange(duration + 1) / duration * trend)) ** (-1)
pos = 0
while begin <= end:
month = begin.month
weekd = begin.weekday()
value = rnd[pos] * week_coef[weekd] * month_coef[month - 1] * exp[pos]
pos += 1
obs = dict(date=begin, value=value)
rows.append(obs)
begin += day
return rows
| sdpython/ensae_teaching_cs | src/ensae_teaching_cs/data/data_ts.py | Python | mit | 1,197 |
# -*- coding: utf-8 -*-
"""
Tests for old bugs
~~~~~~~~~~~~~~~~~~
Unittest that test situations caused by various older bugs.
:copyright: (c) 2009 by the Jinja Team.
:license: BSD.
"""
from jinja2 import Environment, DictLoader, TemplateSyntaxError
env = Environment()
from nose import SkipTest
from nose.tools import assert_raises
def test_keyword_folding():
env = Environment()
env.filters['testing'] = lambda value, some: value + some
assert env.from_string("{{ 'test'|testing(some='stuff') }}") \
.render() == 'teststuff'
def test_extends_output_bugs():
env = Environment(loader=DictLoader({
'parent.html': '(({% block title %}{% endblock %}))'
}))
t = env.from_string('{% if expr %}{% extends "parent.html" %}{% endif %}'
'[[{% block title %}title{% endblock %}]]'
'{% for item in [1, 2, 3] %}({{ item }}){% endfor %}')
assert t.render(expr=False) == '[[title]](1)(2)(3)'
assert t.render(expr=True) == '((title))'
def test_urlize_filter_escaping():
tmpl = env.from_string('{{ "http://www.example.org/<foo"|urlize }}')
assert tmpl.render() == '<a href="http://www.example.org/<foo">http://www.example.org/<foo</a>'
def test_loop_call_loop():
tmpl = env.from_string('''
{% macro test() %}
{{ caller() }}
{% endmacro %}
{% for num1 in range(5) %}
{% call test() %}
{% for num2 in range(10) %}
{{ loop.index }}
{% endfor %}
{% endcall %}
{% endfor %}
''')
assert tmpl.render().split() == map(unicode, range(1, 11)) * 5
def test_weird_inline_comment():
env = Environment(line_statement_prefix='%')
assert_raises(TemplateSyntaxError, env.from_string,
'% for item in seq {# missing #}\n...% endfor')
def test_old_macro_loop_scoping_bug():
tmpl = env.from_string('{% for i in (1, 2) %}{{ i }}{% endfor %}'
'{% macro i() %}3{% endmacro %}{{ i() }}')
assert tmpl.render() == '123'
def test_partial_conditional_assignments():
tmpl = env.from_string('{% if b %}{% set a = 42 %}{% endif %}{{ a }}')
assert tmpl.render(a=23) == '23'
assert tmpl.render(b=True) == '42'
def test_stacked_locals_scoping_bug():
env = Environment(line_statement_prefix='#')
t = env.from_string('''\
# for j in [1, 2]:
# set x = 1
# for i in [1, 2]:
# print x
# if i % 2 == 0:
# set x = x + 1
# endif
# endfor
# endfor
# if a
# print 'A'
# elif b
# print 'B'
# elif c == d
# print 'C'
# else
# print 'D'
# endif
''')
assert t.render(a=0, b=False, c=42, d=42.0) == '1111C'
| yesudeep/cmc | app/jinja2/tests/test_old_bugs.py | Python | mit | 2,707 |
'''
Monkey patch setuptools to write faster console_scripts with this format:
import sys
from mymodule import entry_function
sys.exit(entry_function())
This is better.
(c) 2016, Aaron Christianson
http://github.com/ninjaaron/fast-entry_points
'''
from setuptools.command import easy_install
@classmethod
def get_args(cls, dist, header=None):
"""
Yield write_script() argument tuples for a distribution's
console_scripts and gui_scripts entry points.
"""
template = 'import sys\nfrom {0} import {1}\nsys.exit({1}())'
if header is None:
header = cls.get_header()
spec = str(dist.as_requirement())
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
cls._ensure_safe_name(name)
script_text = template.format(
ep.module_name, ep.attrs[0])
args = cls._get_script_args(type_, name, header, script_text)
for res in args:
yield res
easy_install.ScriptWriter.get_args = get_args
def main():
import os
import re
import shutil
import sys
dests = sys.argv[1:] or ['.']
filename = re.sub('\.pyc$', '.py', __file__)
for dst in dests:
shutil.copy(filename, dst)
manifest_path = os.path.join(dst, 'MANIFEST.in')
setup_path = os.path.join(dst, 'setup.py')
# Insert the include statement to MANIFEST.in if not present
with open(manifest_path, 'a+') as manifest:
manifest.seek(0)
manifest_content = manifest.read()
if not 'include fastentrypoints.py' in manifest_content:
manifest.write(('\n' if manifest_content else '')
+ 'include fastentrypoints.py')
# Insert the import statement to setup.py if not present
with open(setup_path, 'a+') as setup:
setup.seek(0)
setup_content = setup.read()
if not 'import fastentrypoints' in setup_content:
setup.seek(0)
setup.truncate()
setup.write('import fastentrypoints\n' + setup_content)
print(__name__)
| pomarec/core | fastentrypoints.py | Python | gpl-3.0 | 2,205 |
import sys
from java.util import Vector
def addTemplate(core):
mobileTemplates = Vector()
mobileTemplates.add('graul_mauler')
mobileTemplates.add('graul_mangler')
core.spawnService.addLairTemplate('dantooine_graul_mauler_lair_1', mobileTemplates , 15, 'object/tangible/lair/base/poi_all_lair_rocks_large_evil_fire_red.iff')
return | agry/NGECore2 | scripts/mobiles/lairs/dantooine_graul_mauler_lair_2.py | Python | lgpl-3.0 | 340 |
def get_codeset(encoding):
coding = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'
ecoding = coding + '-.'
return {
'simple': {
'coding': coding,
'max_value': 61,
'char': ',',
'dchar': '',
'none': '_',
'value': lambda x: coding[x]
},
'text': {
'coding': '',
'max_value': 100,
'none': '-1',
'char': '|',
'dchar': ',',
'value': lambda x: '%.1f' % float(x)
},
'extended': {
'coding': ecoding,
'max_value': 4095,
'none': '__',
'dchar': '',
'char': ',',
'value': lambda x: '%s%s' % \
(ecoding[int(float(x) / 64)], ecoding[int(x % 64)])
}
}[encoding]
class Encoder:
"""Data encoder that handles simple,text, and extended encodings
Based on javascript encoding algorithm and pygooglecharts"""
def __init__(self, encoding=None, scale=None, series=''):
self.series = series or ''
if encoding is None:
encoding = 'text'
assert(encoding in ('simple','text','extended')),\
'Unknown encoding: %s'%encoding
self.encoding = encoding
self.scale = scale
self.codeset = get_codeset(encoding)
def scalevalue(self, value):
return value # one day...
if self.encoding != 'text' and self.scale and \
isinstance(value, int) or isinstance(value, float):
if type(self.scale) == type(()):
lower,upper = self.scale
else:
lower,upper = 0,float(self.scale)
value = int(round(float(value - lower) * \
self.codeset['max_value'] / upper))
return min(value, self.codeset['max_value'])
def encode(self, *args, **kwargs):
"""Encode wrapper for a dataset with maximum value
Datasets can be one or two dimensional
Strings are ignored as ordinal encoding"""
if isinstance(args[0], str):
return self.encode([args[0]],**kwargs)
elif isinstance(args[0], int) or isinstance(args[0], float):
return self.encode([[args[0]]],**kwargs)
if len(args)>1:
dataset = args
else:
dataset = args[0]
typemap = list(map(type,dataset))
code = self.encoding[0]
if type('') in typemap:
data = ','.join(map(str,dataset))
elif type([]) in typemap or type(()) in typemap:
data = self.codeset['char'].join(map(self.encodedata, dataset))
elif len(dataset) == 1 and hasattr(dataset[0], '__iter__'):
data = self.encodedata(dataset[0])
else:
try:
data = self.encodedata(dataset)
except ValueError:
data = self.encodedata(','.join(map(unicode,dataset)))
if not '.' in data and code == 't':
code = 'e'
return '%s%s:%s'%(code,self.series,data)
def encodedata(self, data):
sub_data = []
enc_size = len(self.codeset['coding'])
for value in data:
if value in (None,'None'):
sub_data.append(self.codeset['none'])
elif isinstance(value, str):
sub_data.append(value)
elif value >= -1:
try:
sub_data.append(self.codeset['value'](self.scalevalue(value)))
except:
raise ValueError('cannot encode value: %s'%value)
return self.codeset['dchar'].join(sub_data)
def decode(self, astr):
e = astr[0]
dec_data = []
for data in astr[2:].split(self.codeset['char']):
sub_data = []
if e == 't':
sub_data.extend(map(float, data.split(',')))
elif e == 'e':
flag = 0
index = self.codeset['coding'].index
for i in range(len(data)):
if not flag:
this,next = index(data[i]),index(data[i+1])
flag = 1
sub_data.append((64 * this) + next)
else: flag = 0
elif e == 's':
sub_data.extend(map(self.codeset['coding'].index, data))
dec_data.append(sub_data)
return dec_data
| justquick/google-chartwrapper | gchart/encoding.py | Python | bsd-3-clause | 4,463 |
#!/usr/bin/python
input_path = './src/'
output_path = './www/editor.js'
import re, os, time, sys
class CompileError(Exception):
def __init__(self, text):
Exception.__init__(self, text)
class Source:
def __init__(self, path):
self.path = path
self.name = os.path.basename(path)
self.code = open(path, 'r').read()
# pick out the names in every '#require <...>' line
lines = self.code.split('\n')
require_re = re.compile('^#require <(.*)>$')
matches = [require_re.match(line) for line in lines]
self.dependencies = set([match.group(1) for match in matches if match])
# remove the '#require' lines from code
self.code = '\n'.join(line for line in lines if require_re.match(line) is None)
def sources():
return [os.path.join(base, f) for base, folders, files in os.walk(input_path) for f in files if f.endswith('.js')]
def compile(sources):
sources = [Source(path) for path in sources]
# check that no two sources have the same name
for source in sources:
for other in sources:
if other != source and other.name == source.name:
raise CompileError('%s and %s both named %s' %
(source.path, other.path, source.name))
# map source name => Source object
lookup = {}
for source in sources:
lookup[source.name] = source
# check that all dependencies exist
for source in sources:
for dependency in source.dependencies:
if not dependency in lookup:
raise CompileError('could not find dependency %s' % dependency)
# order based on dependencies
new_sources = []
while len(sources) > 0:
# find a source that doesn't need any other source in sources
free_source = None
for source in sources:
if not any(other.name in source.dependencies for other in sources):
free_source = source
break
# if we couldn't find a free source, then there is a circular dependency
if free_source is None:
raise CompileError('circular dependency between ' +
' and '.join(s.name for s in sources))
# add the free source to the new order
new_sources.append(free_source)
sources.remove(free_source)
return '\n'.join('// %s' % s.name for s in new_sources) + '\n\n' + \
'\n'.join(s.code for s in new_sources)
def build():
try:
data = compile(sources())
data = '(function(){\n\n' + data + '})();\n\n'
open(output_path, 'w').write(data)
print 'built %s (%u bytes)' % (output_path, len(data))
except CompileError, e:
print 'error: ' + str(e)
open(output_path, 'w').write('alert("%s")' % str(e))
def stat():
return [os.stat(file).st_mtime for file in sources()]
def monitor():
a = stat()
while True:
time.sleep(0.5)
b = stat()
if a != b:
a = b
build()
if __name__ == '__main__':
build()
if not 'release' in sys.argv:
monitor()
| superarts/JekyllMetro | games/rapt/editor/build.py | Python | mit | 2,729 |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target from a .gyp file a few subdirectories
deep when the --generator-output= option is used to put the build
configuration files in a separate directory tree.
"""
import TestGyp
# Android doesn't support --generator-output.
test = TestGyp.TestGyp(formats=['!android'])
test.writable(test.workpath('src'), False)
test.writable(test.workpath('src/subdir2/deeper/build'), True)
test.run_gyp('deeper.gyp',
'-Dset_symroot=1',
'--generator-output=' + test.workpath('gypfiles'),
chdir='src/subdir2/deeper')
test.build('deeper.gyp', test.ALL, chdir='gypfiles')
chdir = 'gypfiles'
if test.format == 'xcode':
chdir = 'src/subdir2/deeper'
test.run_built_executable('deeper',
chdir=chdir,
stdout="Hello from deeper.c\n")
test.pass_test()
| devcline/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/generator-output/gyptest-subdir2-deep.py | Python | gpl-3.0 | 1,034 |
#!/usr/bin/env python
#
# https://launchpad.net/wxbanker
# transactionlist.py: Copyright 2007-2010 Mike Rooney <mrooney@ubuntu.com>
#
# This file is part of wxBanker.
#
# wxBanker is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wxBanker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with wxBanker. If not, see <http://www.gnu.org/licenses/>.
class TransactionList(list):
def __init__(self, items=None):
# list does not understand items=None apparently.
if items is None:
items = []
list.__init__(self, items)
def __eq__(self, other):
if not len(self) == len(other):
return False
for leftTrans, rightTrans in zip(self, other):
if not leftTrans == rightTrans:
return False
return True | mrooney/wxbanker | wxbanker/bankobjects/transactionlist.py | Python | gpl-3.0 | 1,282 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import sys
import warnings
from collections import defaultdict
from collections import deque
from collections import OrderedDict
import attr
import py
import six
from more_itertools import flatten
import _pytest
from _pytest import nodes
from _pytest._code.code import FormattedExcinfo
from _pytest._code.code import TerminalRepr
from _pytest.compat import _format_args
from _pytest.compat import _PytestWrapper
from _pytest.compat import exc_clear
from _pytest.compat import FuncargnamesCompatAttr
from _pytest.compat import get_real_func
from _pytest.compat import get_real_method
from _pytest.compat import getfslineno
from _pytest.compat import getfuncargnames
from _pytest.compat import getimfunc
from _pytest.compat import getlocation
from _pytest.compat import is_generator
from _pytest.compat import isclass
from _pytest.compat import NOTSET
from _pytest.compat import safe_getattr
from _pytest.deprecated import FIXTURE_FUNCTION_CALL
from _pytest.deprecated import FIXTURE_NAMED_REQUEST
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
@attr.s(frozen=True)
class PseudoFixtureDef(object):
cached_result = attr.ib()
scope = attr.ib()
def pytest_sessionstart(session):
import _pytest.python
import _pytest.nodes
scopename2class.update(
{
"package": _pytest.python.Package,
"class": _pytest.python.Class,
"module": _pytest.python.Module,
"function": _pytest.nodes.Item,
"session": _pytest.main.Session,
}
)
session._fixturemanager = FixtureManager(session)
scopename2class = {}
scope2props = dict(session=())
scope2props["package"] = ("fspath",)
scope2props["module"] = ("fspath", "module")
scope2props["class"] = scope2props["module"] + ("cls",)
scope2props["instance"] = scope2props["class"] + ("instance",)
scope2props["function"] = scope2props["instance"] + ("function", "keywords")
def scopeproperty(name=None, doc=None):
def decoratescope(func):
scopename = name or func.__name__
def provide(self):
if func.__name__ in scope2props[self.scope]:
return func(self)
raise AttributeError(
"%s not available in %s-scoped context" % (scopename, self.scope)
)
return property(provide, None, None, func.__doc__)
return decoratescope
def get_scope_package(node, fixturedef):
import pytest
cls = pytest.Package
current = node
fixture_package_name = "%s/%s" % (fixturedef.baseid, "__init__.py")
while current and (
type(current) is not cls or fixture_package_name != current.nodeid
):
current = current.parent
if current is None:
return node.session
return current
def get_scope_node(node, scope):
cls = scopename2class.get(scope)
if cls is None:
raise ValueError("unknown scope")
return node.getparent(cls)
def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
# this function will transform all collected calls to a functions
# if they use direct funcargs (i.e. direct parametrization)
# because we want later test execution to be able to rely on
# an existing FixtureDef structure for all arguments.
# XXX we can probably avoid this algorithm if we modify CallSpec2
# to directly care for creating the fixturedefs within its methods.
if not metafunc._calls[0].funcargs:
return # this function call does not have direct parametrization
# collect funcargs of all callspecs into a list of values
arg2params = {}
arg2scope = {}
for callspec in metafunc._calls:
for argname, argvalue in callspec.funcargs.items():
assert argname not in callspec.params
callspec.params[argname] = argvalue
arg2params_list = arg2params.setdefault(argname, [])
callspec.indices[argname] = len(arg2params_list)
arg2params_list.append(argvalue)
if argname not in arg2scope:
scopenum = callspec._arg2scopenum.get(argname, scopenum_function)
arg2scope[argname] = scopes[scopenum]
callspec.funcargs.clear()
# register artificial FixtureDef's so that later at test execution
# time we can rely on a proper FixtureDef to exist for fixture setup.
arg2fixturedefs = metafunc._arg2fixturedefs
for argname, valuelist in arg2params.items():
# if we have a scope that is higher than function we need
# to make sure we only ever create an according fixturedef on
# a per-scope basis. We thus store and cache the fixturedef on the
# node related to the scope.
scope = arg2scope[argname]
node = None
if scope != "function":
node = get_scope_node(collector, scope)
if node is None:
assert scope == "class" and isinstance(collector, _pytest.python.Module)
# use module-level collector for class-scope (for now)
node = collector
if node and argname in node._name2pseudofixturedef:
arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
else:
fixturedef = FixtureDef(
fixturemanager,
"",
argname,
get_direct_param_fixture_func,
arg2scope[argname],
valuelist,
False,
False,
)
arg2fixturedefs[argname] = [fixturedef]
if node is not None:
node._name2pseudofixturedef[argname] = fixturedef
def getfixturemarker(obj):
""" return fixturemarker or None if it doesn't exist or raised
exceptions."""
try:
return getattr(obj, "_pytestfixturefunction", None)
except TEST_OUTCOME:
# some objects raise errors like request (from flask import request)
# we don't expect them to be fixture functions
return None
def get_parametrized_fixture_keys(item, scopenum):
""" return list of keys for all parametrized arguments which match
the specified scope. """
assert scopenum < scopenum_function # function
try:
cs = item.callspec
except AttributeError:
pass
else:
# cs.indices.items() is random order of argnames. Need to
# sort this so that different calls to
# get_parametrized_fixture_keys will be deterministic.
for argname, param_index in sorted(cs.indices.items()):
if cs._arg2scopenum[argname] != scopenum:
continue
if scopenum == 0: # session
key = (argname, param_index)
elif scopenum == 1: # package
key = (argname, param_index, item.fspath.dirpath())
elif scopenum == 2: # module
key = (argname, param_index, item.fspath)
elif scopenum == 3: # class
key = (argname, param_index, item.fspath, item.cls)
yield key
# algorithm for sorting on a per-parametrized resource setup basis
# it is called for scopenum==0 (session) first and performs sorting
# down to the lower scopes such as to minimize number of "high scope"
# setups and teardowns
def reorder_items(items):
argkeys_cache = {}
items_by_argkey = {}
for scopenum in range(0, scopenum_function):
argkeys_cache[scopenum] = d = {}
items_by_argkey[scopenum] = item_d = defaultdict(deque)
for item in items:
keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum))
if keys:
d[item] = keys
for key in keys:
item_d[key].append(item)
items = OrderedDict.fromkeys(items)
return list(reorder_items_atscope(items, argkeys_cache, items_by_argkey, 0))
def fix_cache_order(item, argkeys_cache, items_by_argkey):
for scopenum in range(0, scopenum_function):
for key in argkeys_cache[scopenum].get(item, []):
items_by_argkey[scopenum][key].appendleft(item)
def reorder_items_atscope(items, argkeys_cache, items_by_argkey, scopenum):
if scopenum >= scopenum_function or len(items) < 3:
return items
ignore = set()
items_deque = deque(items)
items_done = OrderedDict()
scoped_items_by_argkey = items_by_argkey[scopenum]
scoped_argkeys_cache = argkeys_cache[scopenum]
while items_deque:
no_argkey_group = OrderedDict()
slicing_argkey = None
while items_deque:
item = items_deque.popleft()
if item in items_done or item in no_argkey_group:
continue
argkeys = OrderedDict.fromkeys(
k for k in scoped_argkeys_cache.get(item, []) if k not in ignore
)
if not argkeys:
no_argkey_group[item] = None
else:
slicing_argkey, _ = argkeys.popitem()
# we don't have to remove relevant items from later in the deque because they'll just be ignored
matching_items = [
i for i in scoped_items_by_argkey[slicing_argkey] if i in items
]
for i in reversed(matching_items):
fix_cache_order(i, argkeys_cache, items_by_argkey)
items_deque.appendleft(i)
break
if no_argkey_group:
no_argkey_group = reorder_items_atscope(
no_argkey_group, argkeys_cache, items_by_argkey, scopenum + 1
)
for item in no_argkey_group:
items_done[item] = None
ignore.add(slicing_argkey)
return items_done
def fillfixtures(function):
""" fill missing funcargs for a test function. """
try:
request = function._request
except AttributeError:
# XXX this special code path is only expected to execute
# with the oejskit plugin. It uses classes with funcargs
# and we thus have to work a bit to allow this.
fm = function.session._fixturemanager
fi = fm.getfixtureinfo(function.parent, function.obj, None)
function._fixtureinfo = fi
request = function._request = FixtureRequest(function)
request._fillfixtures()
# prune out funcargs for jstests
newfuncargs = {}
for name in fi.argnames:
newfuncargs[name] = function.funcargs[name]
function.funcargs = newfuncargs
else:
request._fillfixtures()
def get_direct_param_fixture_func(request):
return request.param
@attr.s(slots=True)
class FuncFixtureInfo(object):
# original function argument names
argnames = attr.ib(type=tuple)
# argnames that function immediately requires. These include argnames +
# fixture names specified via usefixtures and via autouse=True in fixture
# definitions.
initialnames = attr.ib(type=tuple)
names_closure = attr.ib() # List[str]
name2fixturedefs = attr.ib() # List[str, List[FixtureDef]]
def prune_dependency_tree(self):
"""Recompute names_closure from initialnames and name2fixturedefs
Can only reduce names_closure, which means that the new closure will
always be a subset of the old one. The order is preserved.
This method is needed because direct parametrization may shadow some
of the fixtures that were included in the originally built dependency
tree. In this way the dependency tree can get pruned, and the closure
of argnames may get reduced.
"""
closure = set()
working_set = set(self.initialnames)
while working_set:
argname = working_set.pop()
# argname may be smth not included in the original names_closure,
# in which case we ignore it. This currently happens with pseudo
# FixtureDefs which wrap 'get_direct_param_fixture_func(request)'.
# So they introduce the new dependency 'request' which might have
# been missing in the original tree (closure).
if argname not in closure and argname in self.names_closure:
closure.add(argname)
if argname in self.name2fixturedefs:
working_set.update(self.name2fixturedefs[argname][-1].argnames)
self.names_closure[:] = sorted(closure, key=self.names_closure.index)
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for a fixture from a test or fixture function.
A request object gives access to the requesting test context
and has an optional ``param`` attribute in case
the fixture is parametrized indirectly.
"""
def __init__(self, pyfuncitem):
self._pyfuncitem = pyfuncitem
#: fixture for which this request is being performed
self.fixturename = None
#: Scope string, one of "function", "class", "module", "session"
self.scope = "function"
self._fixture_defs = {} # argname -> FixtureDef
fixtureinfo = pyfuncitem._fixtureinfo
self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
self._arg2index = {}
self._fixturemanager = pyfuncitem.session._fixturemanager
@property
def fixturenames(self):
"""names of all active fixtures in this request"""
result = list(self._pyfuncitem._fixtureinfo.names_closure)
result.extend(set(self._fixture_defs).difference(result))
return result
@property
def node(self):
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getnextfixturedef(self, argname):
fixturedefs = self._arg2fixturedefs.get(argname, None)
if fixturedefs is None:
# we arrive here because of a dynamic call to
# getfixturevalue(argname) usage which was naturally
# not known at parsing/collection time
parentid = self._pyfuncitem.parent.nodeid
fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
self._arg2fixturedefs[argname] = fixturedefs
# fixturedefs list is immutable so we maintain a decreasing index
index = self._arg2index.get(argname, 0) - 1
if fixturedefs is None or (-index > len(fixturedefs)):
raise FixtureLookupError(argname, self)
self._arg2index[argname] = index
return fixturedefs[index]
@property
def config(self):
""" the pytest config object associated with this request. """
return self._pyfuncitem.config
@scopeproperty()
def function(self):
""" test function object if the request has a per-function scope. """
return self._pyfuncitem.obj
@scopeproperty("class")
def cls(self):
""" class (can be None) where the test function was collected. """
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
if clscol:
return clscol.obj
@property
def instance(self):
""" instance (can be None) on which test function was collected. """
# unittest support hack, see _pytest.unittest.TestCaseFunction
try:
return self._pyfuncitem._testcase
except AttributeError:
function = getattr(self, "function", None)
return getattr(function, "__self__", None)
@scopeproperty()
def module(self):
""" python module object where the test function was collected. """
return self._pyfuncitem.getparent(_pytest.python.Module).obj
@scopeproperty()
def fspath(self):
""" the file system path of the test module which collected this test. """
return self._pyfuncitem.fspath
@property
def keywords(self):
""" keywords/markers dictionary for the underlying node. """
return self.node.keywords
@property
def session(self):
""" pytest session object. """
return self._pyfuncitem.session
def addfinalizer(self, finalizer):
""" add finalizer/teardown function to be called after the
last test within the requesting test context finished
execution. """
# XXX usually this method is shadowed by fixturedef specific ones
self._addfinalizer(finalizer, scope=self.scope)
def _addfinalizer(self, finalizer, scope):
colitem = self._getscopeitem(scope)
self._pyfuncitem.session._setupstate.addfinalizer(
finalizer=finalizer, colitem=colitem
)
def applymarker(self, marker):
""" Apply a marker to a single test function invocation.
This method is useful if you don't want to have a keyword/marker
on all function invocations.
:arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
created by a call to ``pytest.mark.NAME(...)``.
"""
self.node.add_marker(marker)
def raiseerror(self, msg):
""" raise a FixtureLookupError with the given message. """
raise self._fixturemanager.FixtureLookupError(None, self, msg)
def _fillfixtures(self):
item = self._pyfuncitem
fixturenames = getattr(item, "fixturenames", self.fixturenames)
for argname in fixturenames:
if argname not in item.funcargs:
item.funcargs[argname] = self.getfixturevalue(argname)
def getfixturevalue(self, argname):
""" Dynamically run a named fixture function.
Declaring fixtures via function argument is recommended where possible.
But if you can only decide whether to use another fixture at test
setup time, you may use this function to retrieve it inside a fixture
or test function body.
"""
return self._get_active_fixturedef(argname).cached_result[0]
def getfuncargvalue(self, argname):
""" Deprecated, use getfixturevalue. """
from _pytest import deprecated
warnings.warn(deprecated.GETFUNCARGVALUE, stacklevel=2)
return self.getfixturevalue(argname)
def _get_active_fixturedef(self, argname):
try:
return self._fixture_defs[argname]
except KeyError:
try:
fixturedef = self._getnextfixturedef(argname)
except FixtureLookupError:
if argname == "request":
cached_result = (self, [0], None)
scope = "function"
return PseudoFixtureDef(cached_result, scope)
raise
# remove indent to prevent the python3 exception
# from leaking into the call
self._compute_fixture_value(fixturedef)
self._fixture_defs[argname] = fixturedef
return fixturedef
def _get_fixturestack(self):
current = self
values = []
while 1:
fixturedef = getattr(current, "_fixturedef", None)
if fixturedef is None:
values.reverse()
return values
values.append(fixturedef)
current = current._parent_request
def _compute_fixture_value(self, fixturedef):
"""
Creates a SubRequest based on "self" and calls the execute method of the given fixturedef object. This will
force the FixtureDef object to throw away any previous results and compute a new fixture value, which
will be stored into the FixtureDef object itself.
:param FixtureDef fixturedef:
"""
# prepare a subrequest object before calling fixture function
# (latter managed by fixturedef)
argname = fixturedef.argname
funcitem = self._pyfuncitem
scope = fixturedef.scope
try:
param = funcitem.callspec.getparam(argname)
except (AttributeError, ValueError):
param = NOTSET
param_index = 0
has_params = fixturedef.params is not None
fixtures_not_supported = getattr(funcitem, "nofuncargs", False)
if has_params and fixtures_not_supported:
msg = (
"{name} does not support fixtures, maybe unittest.TestCase subclass?\n"
"Node id: {nodeid}\n"
"Function type: {typename}"
).format(
name=funcitem.name,
nodeid=funcitem.nodeid,
typename=type(funcitem).__name__,
)
fail(msg, pytrace=False)
if has_params:
frame = inspect.stack()[3]
frameinfo = inspect.getframeinfo(frame[0])
source_path = frameinfo.filename
source_lineno = frameinfo.lineno
source_path = py.path.local(source_path)
if source_path.relto(funcitem.config.rootdir):
source_path = source_path.relto(funcitem.config.rootdir)
msg = (
"The requested fixture has no parameter defined for test:\n"
" {}\n\n"
"Requested fixture '{}' defined in:\n{}"
"\n\nRequested here:\n{}:{}".format(
funcitem.nodeid,
fixturedef.argname,
getlocation(fixturedef.func, funcitem.config.rootdir),
source_path,
source_lineno,
)
)
fail(msg, pytrace=False)
else:
param_index = funcitem.callspec.indices[argname]
# if a parametrize invocation set a scope it will override
# the static scope defined with the fixture function
paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
if paramscopenum is not None:
scope = scopes[paramscopenum]
subrequest = SubRequest(self, scope, param, param_index, fixturedef)
# check if a higher-level scoped fixture accesses a lower level one
subrequest._check_scope(argname, self.scope, scope)
# clear sys.exc_info before invoking the fixture (python bug?)
# if it's not explicitly cleared it will leak into the call
exc_clear()
try:
# call the fixture function
fixturedef.execute(request=subrequest)
finally:
# if fixture function failed it might have registered finalizers
self.session._setupstate.addfinalizer(
functools.partial(fixturedef.finish, request=subrequest),
subrequest.node,
)
def _check_scope(self, argname, invoking_scope, requested_scope):
if argname == "request":
return
if scopemismatch(invoking_scope, requested_scope):
# try to report something helpful
lines = self._factorytraceback()
fail(
"ScopeMismatch: You tried to access the %r scoped "
"fixture %r with a %r scoped request object, "
"involved factories\n%s"
% ((requested_scope, argname, invoking_scope, "\n".join(lines))),
pytrace=False,
)
def _factorytraceback(self):
lines = []
for fixturedef in self._get_fixturestack():
factory = fixturedef.func
fs, lineno = getfslineno(factory)
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
args = _format_args(factory)
lines.append("%s:%d: def %s%s" % (p, lineno, factory.__name__, args))
return lines
def _getscopeitem(self, scope):
if scope == "function":
# this might also be a non-function Item despite its attribute name
return self._pyfuncitem
if scope == "package":
node = get_scope_package(self._pyfuncitem, self._fixturedef)
else:
node = get_scope_node(self._pyfuncitem, scope)
if node is None and scope == "class":
# fallback to function item itself
node = self._pyfuncitem
assert node, 'Could not obtain a node for scope "{}" for function {!r}'.format(
scope, self._pyfuncitem
)
return node
def __repr__(self):
return "<FixtureRequest for %r>" % (self.node)
class SubRequest(FixtureRequest):
""" a sub request for handling getting a fixture from a
test function/fixture. """
def __init__(self, request, scope, param, param_index, fixturedef):
self._parent_request = request
self.fixturename = fixturedef.argname
if param is not NOTSET:
self.param = param
self.param_index = param_index
self.scope = scope
self._fixturedef = fixturedef
self._pyfuncitem = request._pyfuncitem
self._fixture_defs = request._fixture_defs
self._arg2fixturedefs = request._arg2fixturedefs
self._arg2index = request._arg2index
self._fixturemanager = request._fixturemanager
def __repr__(self):
return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
def addfinalizer(self, finalizer):
self._fixturedef.addfinalizer(finalizer)
scopes = "session package module class function".split()
scopenum_function = scopes.index("function")
def scopemismatch(currentscope, newscope):
return scopes.index(newscope) > scopes.index(currentscope)
def scope2index(scope, descr, where=None):
"""Look up the index of ``scope`` and raise a descriptive value error
if not defined.
"""
try:
return scopes.index(scope)
except ValueError:
fail(
"{} {}got an unexpected scope value '{}'".format(
descr, "from {} ".format(where) if where else "", scope
),
pytrace=False,
)
class FixtureLookupError(LookupError):
""" could not return a requested Fixture (missing or invalid). """
def __init__(self, argname, request, msg=None):
self.argname = argname
self.request = request
self.fixturestack = request._get_fixturestack()
self.msg = msg
def formatrepr(self):
tblines = []
addline = tblines.append
stack = [self.request._pyfuncitem.obj]
stack.extend(map(lambda x: x.func, self.fixturestack))
msg = self.msg
if msg is not None:
# the last fixture raise an error, let's present
# it at the requesting side
stack = stack[:-1]
for function in stack:
fspath, lineno = getfslineno(function)
try:
lines, _ = inspect.getsourcelines(get_real_func(function))
except (IOError, IndexError, TypeError):
error_msg = "file %s, line %s: source code not available"
addline(error_msg % (fspath, lineno + 1))
else:
addline("file %s, line %s" % (fspath, lineno + 1))
for i, line in enumerate(lines):
line = line.rstrip()
addline(" " + line)
if line.lstrip().startswith("def"):
break
if msg is None:
fm = self.request._fixturemanager
available = set()
parentid = self.request._pyfuncitem.parent.nodeid
for name, fixturedefs in fm._arg2fixturedefs.items():
faclist = list(fm._matchfactories(fixturedefs, parentid))
if faclist:
available.add(name)
if self.argname in available:
msg = " recursive dependency involving fixture '{}' detected".format(
self.argname
)
else:
msg = "fixture '{}' not found".format(self.argname)
msg += "\n available fixtures: {}".format(", ".join(sorted(available)))
msg += "\n use 'pytest --fixtures [testpath]' for help on them."
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
class FixtureLookupErrorRepr(TerminalRepr):
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
self.tblines = tblines
self.errorstring = errorstring
self.filename = filename
self.firstlineno = firstlineno
self.argname = argname
def toterminal(self, tw):
# tw.line("FixtureLookupError: %s" %(self.argname), red=True)
for tbline in self.tblines:
tw.line(tbline.rstrip())
lines = self.errorstring.split("\n")
if lines:
tw.line(
"{} {}".format(FormattedExcinfo.fail_marker, lines[0].strip()),
red=True,
)
for line in lines[1:]:
tw.line(
"{} {}".format(FormattedExcinfo.flow_marker, line.strip()),
red=True,
)
tw.line()
tw.line("%s:%d" % (self.filename, self.firstlineno + 1))
def fail_fixturefunc(fixturefunc, msg):
fs, lineno = getfslineno(fixturefunc)
location = "%s:%s" % (fs, lineno + 1)
source = _pytest._code.Source(fixturefunc)
fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, pytrace=False)
def call_fixture_func(fixturefunc, request, kwargs):
yieldctx = is_generator(fixturefunc)
if yieldctx:
it = fixturefunc(**kwargs)
res = next(it)
finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, it)
request.addfinalizer(finalizer)
else:
res = fixturefunc(**kwargs)
return res
def _teardown_yield_fixture(fixturefunc, it):
"""Executes the teardown of a fixture function by advancing the iterator after the
yield and ensure the iteration ends (if not it means there is more than one yield in the function)"""
try:
next(it)
except StopIteration:
pass
else:
fail_fixturefunc(
fixturefunc, "yield_fixture function has more than one 'yield'"
)
class FixtureDef(object):
""" A container for a factory definition. """
def __init__(
self,
fixturemanager,
baseid,
argname,
func,
scope,
params,
unittest=False,
ids=None,
):
self._fixturemanager = fixturemanager
self.baseid = baseid or ""
self.has_location = baseid is not None
self.func = func
self.argname = argname
self.scope = scope
self.scopenum = scope2index(
scope or "function",
descr="Fixture '{}'".format(func.__name__),
where=baseid,
)
self.params = params
self.argnames = getfuncargnames(func, is_method=unittest)
self.unittest = unittest
self.ids = ids
self._finalizers = []
def addfinalizer(self, finalizer):
self._finalizers.append(finalizer)
def finish(self, request):
exceptions = []
try:
while self._finalizers:
try:
func = self._finalizers.pop()
func()
except: # noqa
exceptions.append(sys.exc_info())
if exceptions:
e = exceptions[0]
del exceptions # ensure we don't keep all frames alive because of the traceback
six.reraise(*e)
finally:
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
hook.pytest_fixture_post_finalizer(fixturedef=self, request=request)
# even if finalization fails, we invalidate
# the cached fixture value and remove
# all finalizers because they may be bound methods which will
# keep instances alive
if hasattr(self, "cached_result"):
del self.cached_result
self._finalizers = []
def execute(self, request):
# get required arguments and register our own finish()
# with their finalization
for argname in self.argnames:
fixturedef = request._get_active_fixturedef(argname)
if argname != "request":
fixturedef.addfinalizer(functools.partial(self.finish, request=request))
my_cache_key = request.param_index
cached_result = getattr(self, "cached_result", None)
if cached_result is not None:
result, cache_key, err = cached_result
if my_cache_key == cache_key:
if err is not None:
six.reraise(*err)
else:
return result
# we have a previous but differently parametrized fixture instance
# so we need to tear it down before creating a new one
self.finish(request)
assert not hasattr(self, "cached_result")
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
return hook.pytest_fixture_setup(fixturedef=self, request=request)
def __repr__(self):
return "<FixtureDef argname=%r scope=%r baseid=%r>" % (
self.argname,
self.scope,
self.baseid,
)
def resolve_fixture_function(fixturedef, request):
"""Gets the actual callable that can be called to obtain the fixture value, dealing with unittest-specific
instances and bound methods.
"""
fixturefunc = fixturedef.func
if fixturedef.unittest:
if request.instance is not None:
# bind the unbound method to the TestCase instance
fixturefunc = fixturedef.func.__get__(request.instance)
else:
# the fixture function needs to be bound to the actual
# request.instance so that code working with "fixturedef" behaves
# as expected.
if request.instance is not None:
fixturefunc = getimfunc(fixturedef.func)
if fixturefunc != fixturedef.func:
fixturefunc = fixturefunc.__get__(request.instance)
return fixturefunc
def pytest_fixture_setup(fixturedef, request):
""" Execution of fixture setup. """
kwargs = {}
for argname in fixturedef.argnames:
fixdef = request._get_active_fixturedef(argname)
result, arg_cache_key, exc = fixdef.cached_result
request._check_scope(argname, request.scope, fixdef.scope)
kwargs[argname] = result
fixturefunc = resolve_fixture_function(fixturedef, request)
my_cache_key = request.param_index
try:
result = call_fixture_func(fixturefunc, request, kwargs)
except TEST_OUTCOME:
fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
raise
fixturedef.cached_result = (result, my_cache_key, None)
return result
def _ensure_immutable_ids(ids):
if ids is None:
return
if callable(ids):
return ids
return tuple(ids)
def wrap_function_to_error_out_if_called_directly(function, fixture_marker):
"""Wrap the given fixture function so we can raise an error about it being called directly,
instead of used as an argument in a test function.
"""
message = FIXTURE_FUNCTION_CALL.format(
name=fixture_marker.name or function.__name__
)
@six.wraps(function)
def result(*args, **kwargs):
fail(message, pytrace=False)
# keep reference to the original function in our own custom attribute so we don't unwrap
# further than this point and lose useful wrappings like @mock.patch (#3774)
result.__pytest_wrapped__ = _PytestWrapper(function)
return result
@attr.s(frozen=True)
class FixtureFunctionMarker(object):
scope = attr.ib()
params = attr.ib(converter=attr.converters.optional(tuple))
autouse = attr.ib(default=False)
ids = attr.ib(default=None, converter=_ensure_immutable_ids)
name = attr.ib(default=None)
def __call__(self, function):
if isclass(function):
raise ValueError("class fixtures not supported (maybe in the future)")
if getattr(function, "_pytestfixturefunction", False):
raise ValueError(
"fixture is being applied more than once to the same function"
)
function = wrap_function_to_error_out_if_called_directly(function, self)
name = self.name or function.__name__
if name == "request":
warnings.warn(FIXTURE_NAMED_REQUEST)
function._pytestfixturefunction = self
return function
def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
"""Decorator to mark a fixture factory function.
This decorator can be used, with or without parameters, to define a
fixture function.
The name of the fixture function can later be referenced to cause its
invocation ahead of running tests: test
modules or classes can use the ``pytest.mark.usefixtures(fixturename)``
marker.
Test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
Fixtures can provide their values to test functions using ``return`` or ``yield``
statements. When using ``yield`` the code block after the ``yield`` statement is executed
as teardown code regardless of the test outcome, and must yield exactly once.
:arg scope: the scope for which this fixture is shared, one of
``"function"`` (default), ``"class"``, ``"module"``,
``"package"`` or ``"session"``.
``"package"`` is considered **experimental** at this time.
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture function and all of the tests
using it.
:arg autouse: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
reference is needed to activate the fixture.
:arg ids: list of string ids each corresponding to the params
so that they are part of the test id. If no ids are provided
they will be generated automatically from the params.
:arg name: the name of the fixture. This defaults to the name of the
decorated function. If a fixture is used in the same module in
which it is defined, the function name of the fixture will be
shadowed by the function arg that requests the fixture; one way
to resolve this is to name the decorated function
``fixture_<fixturename>`` and then use
``@pytest.fixture(name='<fixturename>')``.
"""
if callable(scope) and params is None and autouse is False:
# direct decoration
return FixtureFunctionMarker("function", params, autouse, name=name)(scope)
if params is not None and not isinstance(params, (list, tuple)):
params = list(params)
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None):
""" (return a) decorator to mark a yield-fixture factory function.
.. deprecated:: 3.0
Use :py:func:`pytest.fixture` directly instead.
"""
return fixture(scope=scope, params=params, autouse=autouse, ids=ids, name=name)
defaultfuncargprefixmarker = fixture()
@fixture(scope="session")
def pytestconfig(request):
"""Session-scoped fixture that returns the :class:`_pytest.config.Config` object.
Example::
def test_foo(pytestconfig):
if pytestconfig.getoption("verbose"):
...
"""
return request.config
class FixtureManager(object):
"""
pytest fixtures definitions and information is stored and managed
from this class.
During collection fm.parsefactories() is called multiple times to parse
fixture function definitions into FixtureDef objects and internal
data structures.
During collection of test functions, metafunc-mechanics instantiate
a FuncFixtureInfo object which is cached per node/func-name.
This FuncFixtureInfo object is later retrieved by Function nodes
which themselves offer a fixturenames attribute.
The FuncFixtureInfo object holds information about fixtures and FixtureDefs
relevant for a particular function. An initial list of fixtures is
assembled like this:
- ini-defined usefixtures
- autouse-marked fixtures along the collection chain up from the function
- usefixtures markers at module/class/function level
- test function funcargs
Subsequently the funcfixtureinfo.fixturenames attribute is computed
as the closure of the fixtures needed to setup the initial fixtures,
i. e. fixtures needed by fixture functions themselves are appended
to the fixturenames list.
Upon the test-setup phases all fixturenames are instantiated, retrieved
by a lookup of their FuncFixtureInfo.
"""
FixtureLookupError = FixtureLookupError
FixtureLookupErrorRepr = FixtureLookupErrorRepr
def __init__(self, session):
self.session = session
self.config = session.config
self._arg2fixturedefs = {}
self._holderobjseen = set()
self._arg2finish = {}
self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
session.config.pluginmanager.register(self, "funcmanage")
def getfixtureinfo(self, node, func, cls, funcargs=True):
if funcargs and not getattr(node, "nofuncargs", False):
argnames = getfuncargnames(func, cls=cls)
else:
argnames = ()
usefixtures = flatten(
mark.args for mark in node.iter_markers(name="usefixtures")
)
initialnames = tuple(usefixtures) + argnames
fm = node.session._fixturemanager
initialnames, names_closure, arg2fixturedefs = fm.getfixtureclosure(
initialnames, node
)
return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs)
def pytest_plugin_registered(self, plugin):
nodeid = None
try:
p = py.path.local(plugin.__file__).realpath()
except AttributeError:
pass
else:
# construct the base nodeid which is later used to check
# what fixtures are visible for particular tests (as denoted
# by their test id)
if p.basename.startswith("conftest.py"):
nodeid = p.dirpath().relto(self.config.rootdir)
if p.sep != nodes.SEP:
nodeid = nodeid.replace(p.sep, nodes.SEP)
self.parsefactories(plugin, nodeid)
def _getautousenames(self, nodeid):
""" return a tuple of fixture names to be used. """
autousenames = []
for baseid, basenames in self._nodeid_and_autousenames:
if nodeid.startswith(baseid):
if baseid:
i = len(baseid)
nextchar = nodeid[i : i + 1]
if nextchar and nextchar not in ":/":
continue
autousenames.extend(basenames)
return autousenames
def getfixtureclosure(self, fixturenames, parentnode):
# collect the closure of all fixtures , starting with the given
# fixturenames as the initial set. As we have to visit all
# factory definitions anyway, we also return an arg2fixturedefs
# mapping so that the caller can reuse it and does not have
# to re-discover fixturedefs again for each fixturename
# (discovering matching fixtures for a given name/node is expensive)
parentid = parentnode.nodeid
fixturenames_closure = self._getautousenames(parentid)
def merge(otherlist):
for arg in otherlist:
if arg not in fixturenames_closure:
fixturenames_closure.append(arg)
merge(fixturenames)
# at this point, fixturenames_closure contains what we call "initialnames",
# which is a set of fixturenames the function immediately requests. We
# need to return it as well, so save this.
initialnames = tuple(fixturenames_closure)
arg2fixturedefs = {}
lastlen = -1
while lastlen != len(fixturenames_closure):
lastlen = len(fixturenames_closure)
for argname in fixturenames_closure:
if argname in arg2fixturedefs:
continue
fixturedefs = self.getfixturedefs(argname, parentid)
if fixturedefs:
arg2fixturedefs[argname] = fixturedefs
merge(fixturedefs[-1].argnames)
def sort_by_scope(arg_name):
try:
fixturedefs = arg2fixturedefs[arg_name]
except KeyError:
return scopes.index("function")
else:
return fixturedefs[-1].scopenum
fixturenames_closure.sort(key=sort_by_scope)
return initialnames, fixturenames_closure, arg2fixturedefs
def pytest_generate_tests(self, metafunc):
for argname in metafunc.fixturenames:
faclist = metafunc._arg2fixturedefs.get(argname)
if faclist:
fixturedef = faclist[-1]
if fixturedef.params is not None:
markers = list(metafunc.definition.iter_markers("parametrize"))
for parametrize_mark in markers:
if "argnames" in parametrize_mark.kwargs:
argnames = parametrize_mark.kwargs["argnames"]
else:
argnames = parametrize_mark.args[0]
if not isinstance(argnames, (tuple, list)):
argnames = [
x.strip() for x in argnames.split(",") if x.strip()
]
if argname in argnames:
break
else:
metafunc.parametrize(
argname,
fixturedef.params,
indirect=True,
scope=fixturedef.scope,
ids=fixturedef.ids,
)
else:
continue # will raise FixtureLookupError at setup time
def pytest_collection_modifyitems(self, items):
# separate parametrized setups
items[:] = reorder_items(items)
def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
if nodeid is not NOTSET:
holderobj = node_or_obj
else:
holderobj = node_or_obj.obj
nodeid = node_or_obj.nodeid
if holderobj in self._holderobjseen:
return
self._holderobjseen.add(holderobj)
autousenames = []
for name in dir(holderobj):
# The attribute can be an arbitrary descriptor, so the attribute
# access below can raise. safe_getatt() ignores such exceptions.
obj = safe_getattr(holderobj, name, None)
marker = getfixturemarker(obj)
if not isinstance(marker, FixtureFunctionMarker):
# magic globals with __getattr__ might have got us a wrong
# fixture attribute
continue
if marker.name:
name = marker.name
# during fixture definition we wrap the original fixture function
# to issue a warning if called directly, so here we unwrap it in order to not emit the warning
# when pytest itself calls the fixture function
if six.PY2 and unittest:
# hack on Python 2 because of the unbound methods
obj = get_real_func(obj)
else:
obj = get_real_method(obj, holderobj)
fixture_def = FixtureDef(
self,
nodeid,
name,
obj,
marker.scope,
marker.params,
unittest=unittest,
ids=marker.ids,
)
faclist = self._arg2fixturedefs.setdefault(name, [])
if fixture_def.has_location:
faclist.append(fixture_def)
else:
# fixturedefs with no location are at the front
# so this inserts the current fixturedef after the
# existing fixturedefs from external plugins but
# before the fixturedefs provided in conftests.
i = len([f for f in faclist if not f.has_location])
faclist.insert(i, fixture_def)
if marker.autouse:
autousenames.append(name)
if autousenames:
self._nodeid_and_autousenames.append((nodeid or "", autousenames))
def getfixturedefs(self, argname, nodeid):
"""
Gets a list of fixtures which are applicable to the given node id.
:param str argname: name of the fixture to search for
:param str nodeid: full node id of the requesting test.
:return: list[FixtureDef]
"""
try:
fixturedefs = self._arg2fixturedefs[argname]
except KeyError:
return None
return tuple(self._matchfactories(fixturedefs, nodeid))
def _matchfactories(self, fixturedefs, nodeid):
for fixturedef in fixturedefs:
if nodes.ischildnode(fixturedef.baseid, nodeid):
yield fixturedef
| hackebrot/pytest | src/_pytest/fixtures.py | Python | mit | 49,942 |
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from tapi_server.models.base_model_ import Model
from tapi_server.models.tapi_odu_odu_named_payload_type import TapiOduOduNamedPayloadType # noqa: F401,E501
from tapi_server import util
class TapiOduOduPayloadType(Model):
"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""
def __init__(self, hex_payload_type=None, named_payload_type=None): # noqa: E501
"""TapiOduOduPayloadType - a model defined in OpenAPI
:param hex_payload_type: The hex_payload_type of this TapiOduOduPayloadType. # noqa: E501
:type hex_payload_type: int
:param named_payload_type: The named_payload_type of this TapiOduOduPayloadType. # noqa: E501
:type named_payload_type: TapiOduOduNamedPayloadType
"""
self.openapi_types = {
'hex_payload_type': int,
'named_payload_type': TapiOduOduNamedPayloadType
}
self.attribute_map = {
'hex_payload_type': 'hex-payload-type',
'named_payload_type': 'named-payload-type'
}
self._hex_payload_type = hex_payload_type
self._named_payload_type = named_payload_type
@classmethod
def from_dict(cls, dikt) -> 'TapiOduOduPayloadType':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The tapi.odu.OduPayloadType of this TapiOduOduPayloadType. # noqa: E501
:rtype: TapiOduOduPayloadType
"""
return util.deserialize_model(dikt, cls)
@property
def hex_payload_type(self):
"""Gets the hex_payload_type of this TapiOduOduPayloadType.
none # noqa: E501
:return: The hex_payload_type of this TapiOduOduPayloadType.
:rtype: int
"""
return self._hex_payload_type
@hex_payload_type.setter
def hex_payload_type(self, hex_payload_type):
"""Sets the hex_payload_type of this TapiOduOduPayloadType.
none # noqa: E501
:param hex_payload_type: The hex_payload_type of this TapiOduOduPayloadType.
:type hex_payload_type: int
"""
self._hex_payload_type = hex_payload_type
@property
def named_payload_type(self):
"""Gets the named_payload_type of this TapiOduOduPayloadType.
:return: The named_payload_type of this TapiOduOduPayloadType.
:rtype: TapiOduOduNamedPayloadType
"""
return self._named_payload_type
@named_payload_type.setter
def named_payload_type(self, named_payload_type):
"""Sets the named_payload_type of this TapiOduOduPayloadType.
:param named_payload_type: The named_payload_type of this TapiOduOduPayloadType.
:type named_payload_type: TapiOduOduNamedPayloadType
"""
self._named_payload_type = named_payload_type
| karthik-sethuraman/ONFOpenTransport | RI/flask_server/tapi_server/models/tapi_odu_odu_payload_type.py | Python | apache-2.0 | 3,032 |
#
# -*- coding: utf-8 -*-
# import subman fixture
# override plugin manager with one that provides
# the ostree content plugin
# test tree format
#
# test repo model
#
# test constructing from Content models
# ignores wrong content type
import ConfigParser
import mock
from nose.plugins.skip import SkipTest
import fixture
import subprocess
from subscription_manager.model import EntitlementSource, Entitlement, \
find_content
from subscription_manager.model.ent_cert import EntitlementCertContent
from subscription_manager.plugin.ostree import config
from subscription_manager.plugin.ostree import model
from subscription_manager.plugin.ostree import action_invoker
from rhsm import certificate2
class StubPluginManager(object):
pass
class TestOstreeGIWrapperError(fixture.SubManFixture):
def test(self):
returncode = 1
cmd = ['python', '/usr/bin/gi_wrapper.py', '--deployed-origin']
err = model.OstreeGIWrapperError(returncode=returncode,
cmd=cmd)
self.assertTrue(isinstance(err, model.OstreeGIWrapperError))
self.assertEquals(err.returncode, returncode)
self.assertEquals(err.cmd, cmd)
cmd_string = ' '.join(cmd)
self.assertEquals(err.cmd_string, cmd_string)
self.assertTrue(cmd_string in err.msg)
self.assertTrue(err.base_msg in err.msg)
self.assertTrue(err.__class__.__name__ in str(err))
self.assertTrue(cmd_string in str(err))
def test_form_called_process_error(self):
returncode = 1
cmd = "/bin/example-command --foo /tmp/nothing"
called_process_error = subprocess.CalledProcessError(returncode, cmd)
err = model.OstreeGIWrapperError.from_called_process_error(called_process_error)
self.assertTrue(isinstance(err, model.OstreeGIWrapperError))
self.assertEquals(err.cmd, cmd)
self.assertTrue(err.base_msg in err.msg)
class TestOstreeRemoteNameFromSection(fixture.SubManFixture):
def test_normal(self):
sn = r'remote "awesomeos-foo-container"'
name = model.OstreeRemote.name_from_section(sn)
self.assertTrue(name is not None)
self.assertEquals(name, "awesomeos-foo-container")
# no quotes in the name
self.assertFalse('"' in name)
def test_spaces(self):
# We consider remote names to be content labels, so
# shouldn't have space, but afaik ostree doesn't care
sn = r'remote "awesome os container"'
name = model.OstreeRemote.name_from_section(sn)
self.assertTrue(name is not None)
self.assertEquals(name, "awesome os container")
self.assertFalse('"' in name)
def test_no_remote_keyword(self):
sn = r'"some-string-that-is-wrong"'
self.assert_name_error(sn)
def test_no_quoted(self):
sn = r'remote not-a-real-name'
self.assert_name_error(sn)
def test_open_quote(self):
sn = r'remote "strine-with-open-quote'
self.assert_name_error(sn)
def test_empty_quote(self):
sn = r'remote ""'
self.assert_name_error(sn)
def assert_name_error(self, sn):
self.assertRaises(model.RemoteSectionNameParseError,
model.OstreeRemote.name_from_section,
sn)
class TestOstreeRemote(fixture.SubManFixture):
section_name = r'remote "awesomeos-content"'
example_url = 'http://example.com.not.real/content'
def assert_remote(self, remote):
self.assertTrue(isinstance(remote, model.OstreeRemote))
def test(self):
items = {'url': self.example_url,
'gpg-verify': 'true'}
ostree_remote = \
model.OstreeRemote.from_config_section(self.section_name,
items)
self.assert_remote(ostree_remote)
self.assertEquals('true', ostree_remote.gpg_verify)
self.assertEquals(self.example_url, ostree_remote.url)
def test_other_items(self):
items = {'url': self.example_url,
'a_new_key': 'a_new_value',
'tls-client-cert-path': '/etc/some/path',
'tls-client-key-path': '/etc/some/path-key',
'tls-ca-path': '/etc/rhsm/ca/redhat-uep.pem',
'gpg-verify': 'true',
'blip': 'baz'}
ostree_remote = \
model.OstreeRemote.from_config_section(self.section_name,
items)
self.assert_remote(ostree_remote)
# .url and data['url'] work
self.assertEquals(self.example_url, ostree_remote.url)
self.assertEquals(self.example_url, ostree_remote.data['url'])
self.assertTrue('a_new_key' in ostree_remote)
self.assertEquals('a_new_value', ostree_remote.data['a_new_key'])
self.assertTrue('gpg_verify' in ostree_remote)
self.assertTrue(hasattr(ostree_remote, 'gpg_verify'))
self.assertEquals('true', ostree_remote.gpg_verify)
self.assertFalse('gpg-verify' in ostree_remote)
self.assertFalse(hasattr(ostree_remote, 'gpg-verify'))
self.assertTrue(hasattr(ostree_remote, 'tls_ca_path'))
self.assertEquals('/etc/rhsm/ca/redhat-uep.pem', ostree_remote.tls_ca_path)
def test_repr(self):
# we use the dict repr now though
items = {'url': self.example_url,
'a_new_key': 'a_new_value',
'gpg-verify': 'true',
'blip': 'baz'}
ostree_remote = \
model.OstreeRemote.from_config_section(self.section_name,
items)
repr_str = repr(ostree_remote)
self.assertTrue(isinstance(repr_str, basestring))
self.assertTrue('name' in repr_str)
self.assertTrue('gpg_verify' in repr_str)
self.assertTrue(self.example_url in repr_str)
def test_map_gpg(self):
content = mock.Mock()
content.gpg = None
self.assertTrue(model.OstreeRemote.map_gpg(content))
# any file url is considered enabled
content.gpg = "file:///path/to/key"
self.assertTrue(model.OstreeRemote.map_gpg(content))
# special case the null url of "http://"
content.gpg = "http://"
self.assertFalse(model.OstreeRemote.map_gpg(content))
# regular urls are "enabled"
content.gpg = "http://some.example.com/not/blip"
self.assertTrue(model.OstreeRemote.map_gpg(content))
class TestOstreeRemoteFromEntCertContent(fixture.SubManFixture):
def _content(self):
content = certificate2.Content(content_type="ostree",
name="content-name",
label="content-test-label",
vendor="Test Vendor",
url="/test.url/test",
gpg="file:///file/gpg/key")
return content
def _cert(self):
cert = mock.Mock()
cert.path = "/path"
cert.key_path.return_value = "/key/path"
return cert
def test(self):
remote = self._ent_content_remote('file://path/to/key')
self.assertTrue(remote.gpg_verify)
def test_gpg_http(self):
remote = self._ent_content_remote('http://')
self.assertFalse(remote.gpg_verify)
def test_gpg_anything(self):
remote = self._ent_content_remote('anything')
self.assertTrue(remote.gpg_verify)
def test_gpg_none(self):
remote = self._ent_content_remote(None)
self.assertTrue(remote.gpg_verify)
def test_gpg_empty_string(self):
remote = self._ent_content_remote("")
self.assertTrue(remote.gpg_verify)
def test_gpg_no_attr(self):
content = self._content()
cert = self._cert()
ent_cert_content = EntitlementCertContent.from_cert_content(
content, cert)
del ent_cert_content.gpg
remote = model.OstreeRemote.from_ent_cert_content(ent_cert_content)
self.assertTrue(remote.gpg_verify)
def _ent_content_remote(self, gpg):
content = self._content()
content.gpg = gpg
cert = self._cert()
ent_cert_content = EntitlementCertContent.from_cert_content(
content, cert)
remote = model.OstreeRemote.from_ent_cert_content(ent_cert_content)
return remote
class TestOstreeRemotes(fixture.SubManFixture):
def test(self):
osr = model.OstreeRemotes()
self.assertTrue(hasattr(osr, 'data'))
def test_add_emtpty_ostree_remote(self):
remote = model.OstreeRemote()
remotes = model.OstreeRemotes()
remotes.add(remote)
self.assertTrue(remote in remotes)
def test_add_ostree_remote(self):
remote = model.OstreeRemote()
remote.url = 'http://example.com/test'
remote.name = 'awesomeos-remote'
remote.gpg_verify = 'true'
remotes = model.OstreeRemotes()
remotes.add(remote)
self.assertTrue(remote in remotes)
class BaseOstreeKeyFileTest(fixture.SubManFixture):
repo_cfg = ""
"""Setup env for testing ostree keyfiles ('config' and '.origin')."""
pass
class TestOstreeRepoConfig(BaseOstreeKeyFileTest):
repo_cfg = """
[remote "test-remote"]
url = https://blip.example.com
"""
def setUp(self):
super(TestOstreeRepoConfig, self).setUp()
self.repo_cfg_path = self.write_tempfile(self.repo_cfg)
self.repo_config = model.OstreeRepoConfig(
repo_file_path=self.repo_cfg_path.name)
def test_save(self):
self.repo_config.load()
self.repo_config.save()
def test_save_no_store(self):
self.repo_config.save()
class TestOstreeConfigRepoFileWriter(BaseOstreeKeyFileTest):
repo_cfg = """
[remote "test-remote"]
url = https://blip.example.com
"""
def setUp(self):
super(TestOstreeConfigRepoFileWriter, self).setUp()
self.repo_cfg_path = self.write_tempfile(self.repo_cfg)
self.repo_config = model.OstreeRepoConfig(
repo_file_path=self.repo_cfg_path.name)
self.repo_config.load()
def test_save(self):
mock_repo_file = mock.Mock()
rfw = model.OstreeConfigFileWriter(mock_repo_file)
rfw.save(self.repo_config)
self.assertTrue(mock_repo_file.save.called)
class TestKeyFileConfigParser(BaseOstreeKeyFileTest):
repo_cfg = ""
def test_defaults(self):
fid = self.write_tempfile(self.repo_cfg)
kf_cfg = config.KeyFileConfigParser(fid.name)
# There are no defaults, make sure the rhsm ones are skipped
self.assertFalse(kf_cfg.has_default('section', 'prop'))
self.assertEquals(kf_cfg.defaults(), {})
def test_items(self):
fid = self.write_tempfile(self.repo_cfg)
kf_cfg = config.KeyFileConfigParser(fid.name)
self.assertEquals(kf_cfg.items('section'), [])
class TestOstreeRepoConfigUpdates(BaseOstreeKeyFileTest):
repo_cfg = """
[section_one]
akey = 1
foo = bar
[section_two]
last_key = blippy
"""
def test_init(self):
fid = self.write_tempfile(self.repo_cfg)
ostree_config = model.OstreeRepoConfig(repo_file_path=fid.name)
new_ostree_config = model.OstreeRepoConfig(repo_file_path=fid.name)
updates = model.OstreeConfigUpdates(ostree_config, new_ostree_config)
updates.apply()
self.assertEquals(updates.orig, updates.new)
updates.save()
class TestOstreeConfigUpdatesBuilder(BaseOstreeKeyFileTest):
repo_cfg = """
[section_one]
akey = 1
foo = bar
[section_two]
last_key = blippy
"""
def test_init(self):
fid = self.write_tempfile(self.repo_cfg)
content_set = set()
ostree_config = model.OstreeConfig(repo_file_path=fid.name)
mock_content = mock.Mock()
mock_content.url = "/path/from/base/url"
mock_content.name = "mock-content-example"
mock_content.gpg = None
mock_ent_cert = mock.Mock()
mock_ent_cert.path = "/somewhere/etc/pki/entitlement/123123.pem"
mock_content.cert = mock_ent_cert
content_set.add(mock_content)
updates_builder = model.OstreeConfigUpdatesBuilder(ostree_config, content_set)
updates = updates_builder.build()
self.assertTrue(len(updates.new.remotes))
self.assertTrue(isinstance(updates.new.remotes[0], model.OstreeRemote))
self.assertEquals(updates.new.remotes[0].url, mock_content.url)
#self.assertEquals(updates.new.remotes[0].name, mock_content.name)
self.assertEquals(updates.new.remotes[0].gpg_verify, True)
class TestKeyFileConfigParserSample(BaseOstreeKeyFileTest):
repo_cfg = """
[section_one]
akey = 1
foo = bar
[section_two]
last_key = blippy
"""
def test_sections(self):
fid = self.write_tempfile(self.repo_cfg)
kf_cfg = config.KeyFileConfigParser(fid.name)
self.assert_items_equals(kf_cfg.sections(), ['section_one', 'section_two'])
self.assertEquals(len(kf_cfg.sections()), 2)
def test_items(self):
fid = self.write_tempfile(self.repo_cfg)
kf_cfg = config.KeyFileConfigParser(fid.name)
section_one_items = kf_cfg.items('section_one')
self.assertEquals(len(section_one_items), 2)
class TestReplaceRefspecRemote(fixture.SubManFixture):
def test_successful_replace(self):
refspec = 'awesomeos-controller:awesomeos-controller/awesomeos8/x86_64/controller/docker'
expected = 'newremote:awesomeos-controller/awesomeos8/x86_64/controller/docker'
self.assertEquals(expected, config.replace_refspec_remote(
refspec, 'newremote'))
def test_empty_remote(self):
refspec = ':awesomeos-controller/awesomeos8/x86_64/controller/docker'
expected = 'newremote:awesomeos-controller/awesomeos8/x86_64/controller/docker'
self.assertEquals(expected, config.replace_refspec_remote(
refspec, 'newremote'))
def test_bad_refspec(self):
refspec = 'ImNotARefSpec'
self.assertRaises(config.RefspecFormatException, config.replace_refspec_remote,
refspec, 'newremote')
class TestOstreeOriginUpdater(BaseOstreeKeyFileTest):
# Multiple remotes, one matches ref:
multi_repo_cfg = """
[remote "lame-ostree"]
url=http://lame.example.com.not.real/
branches=lame-ostree/hyperlame/x86_64/controller/docker;
gpg-verify=false
[remote "awesome-ostree"]
url=http://awesome.example.com.not.real/
branches=awesome-ostree/awesome7/x86_64/controller/docker;
gpg-verify=false
"""
# Multiple remotes, none match ref:
mismatched_multi_repo_cfg = """
[remote "lame-ostree"]
url=http://lame.example.com.not.real/
branches=lame-ostree/hyperlame/x86_64/controller/docker;
gpg-verify=false
[remote "awesome-ostree"]
url=http://awesome.example.com.not.real/
branches=awesome-ostree/awesome7/x86_64/controller/docker;
gpg-verify=true
"""
def _setup_config(self, repo_cfg, origin_cfg):
self.repo_cfg_path = self.write_tempfile(repo_cfg)
self.repo_config = model.OstreeRepoConfig(
repo_file_path=self.repo_cfg_path.name)
self.repo_config.load()
self.updater = model.OstreeOriginUpdater(self.repo_config)
self.origin_cfg_path = self.write_tempfile(origin_cfg)
self.original_get_deployed_origin = self.updater._get_deployed_origin
self.updater._get_deployed_origin = mock.Mock(
return_value=self.origin_cfg_path.name)
def _assert_origin(self, origin_parser, expected_remote):
self.assertTrue(origin_parser.has_section('origin'))
self.assertTrue('refspec' in origin_parser.options('origin'))
self.assertTrue(origin_parser.get('origin', 'refspec').
startswith(expected_remote + ":"))
# Verify we either never had, or have since removed the unconfigured-state option
def _assert_no_unconfigured_option(self, origin_parser):
self.assertTrue(origin_parser.has_section('origin'))
self.assertFalse('unconfigured-state' in origin_parser.options('origin'))
def test_one_remote_matching_ref(self):
repo_cfg = """
[remote "awesomeos-ostree-next-ostree"]
url = https://awesome.cdn/content/awesomeos/next/10/ostree/repo
gpg-verify = false
tls-client-cert-path = /etc/pki/entitlement/12345.pem
tls-client-key-path = /etc/pki/entitlement/12345-key.pem
"""
origin_cfg = """
[origin]
refspec=awesomeos-ostree-next-ostree:awesomeos-atomic/10.0-buildmaster/x86_64/standard
"""
self._setup_config(repo_cfg, origin_cfg)
self.updater.run()
# Reload the origin file and make sure it looks right:
new_origin = config.KeyFileConfigParser(
self.origin_cfg_path.name)
self._assert_origin(new_origin, 'awesomeos-ostree-next-ostree')
self._assert_no_unconfigured_option(new_origin)
# If the ref is mismatched, but we only have one:
def test_one_remote_mismatched_ref(self):
repo_cfg = """
[remote "awesomeos-atomic-ostree"]
url=http://awesome.example.com.not.real/
gpg-verify=false
"""
origin_cfg = """
[origin]
refspec=origremote:thisisnotthesamewords/awesomeos8/x86_64/controller/docker
"""
self._setup_config(repo_cfg, origin_cfg)
self.updater.run()
new_origin = config.KeyFileConfigParser(
self.origin_cfg_path.name)
# FIXME: For now, we pick the first one.
self._assert_origin(new_origin, 'awesomeos-atomic-ostree')
self._assert_no_unconfigured_option(new_origin)
# If the ref is mismatched, but we only have one, and verify
# we remove the unconfigured state
def test_one_remote_mismatched_ref_remove_unconfigured(self):
repo_cfg = """
[remote "awesomeos-atomic-ostree"]
url=http://awesome.example.com.not.real/
gpg-verify=false
"""
# ostree origins will have 'unconfigured-state' if they need setup
origin_cfg = """
[origin]
refspec=origremote:thisisnotthesamewords/awesomeos8/x86_64/controller/docker
unconfigured-state=Use "subscription-manager register" to enable online updates
"""
self._setup_config(repo_cfg, origin_cfg)
self.updater.run()
new_origin = config.KeyFileConfigParser(
self.origin_cfg_path.name)
# FIXME: For now, we pick the first one.
self._assert_origin(new_origin, 'awesomeos-atomic-ostree')
self._assert_no_unconfigured_option(new_origin)
# If the ref is mismatched, but we only have one:
def test_no_remotes(self):
repo_cfg = ""
origin_cfg = """
[origin]
refspec=origremote:thisisnotthesamewords/awesomeos8/x86_64/controller/docker
"""
self._setup_config(repo_cfg, origin_cfg)
self.updater.run()
new_origin = config.KeyFileConfigParser(
self.origin_cfg_path.name)
# No remotes, we don't change the origin at all
self._assert_origin(new_origin, 'origremote')
self._assert_no_unconfigured_option(new_origin)
# If the ref is mismatched, but we only have one:
def test_no_remotes_unconfigured(self):
repo_cfg = ""
origin_cfg = """
[origin]
refspec=origremote:thisisnotthesamewords/awesomeos8/x86_64/controller/docker
unconfigured-state=Use "subscription-manager register" to enable online updates
"""
self._setup_config(repo_cfg, origin_cfg)
self.updater.run()
new_origin = config.KeyFileConfigParser(
self.origin_cfg_path.name)
# No remotes, we don't change the origin at all
self._assert_origin(new_origin, 'origremote')
self.assertTrue('unconfigured-state' in new_origin.options('origin'))
def test_multi_remote_matching_ref(self):
repo_cfg = """
[remote "lame-ostree"]
url=https://lame.example.com.not.real/
gpg-verify=false
[remote "awesome-ostree"]
url=https://awesome.example.com.not.real:9999/foo/repo
gpg-verify=false
"""
origin_cfg = """
[origin]
refspec=origremote:awesome-ostree/awesomeos8/x86_64/controller/docker
"""
self._setup_config(repo_cfg, origin_cfg)
self.updater.run()
# Reload the origin file and make sure it looks right:
new_origin = config.KeyFileConfigParser(
self.origin_cfg_path.name)
self._assert_origin(new_origin, 'awesome-ostree')
self._assert_no_unconfigured_option(new_origin)
def test_multi_remote_mismatched_ref(self):
repo_cfg = """
[remote "lame-ostree"]
url=http://lame.example.com.not.real/
gpg-verify=false
[remote "awesome-ostree"]
url=http://awesome.example.com.not.real/
gpg-verify=false
"""
origin_cfg = """
[origin]
refspec=origremote:thisisnoteitherofthose/awesomeos8/x86_64/controller/docker
"""
self._setup_config(repo_cfg, origin_cfg)
self.updater.run()
new_origin = config.KeyFileConfigParser(
self.origin_cfg_path.name)
# Remote should have been left alone
self._assert_origin(new_origin, 'awesome-ostree')
self._assert_no_unconfigured_option(new_origin)
def test_gi_wrapper_script_error(self):
repo_cfg = """
[remote "awesome-ostree"]
url=http://awesome.example.com.not.real/
branches=awesome-ostree/awesome7/x86_64/controller/docker;
gpg-verify=false
"""
origin_cfg = """
[origin]
refspec=origremote:awesome-ostree/awesomeos8/x86_64/controller/docker
"""
self.repo_cfg_path = self.write_tempfile(repo_cfg)
self.repo_config = model.OstreeRepoConfig(
repo_file_path=self.repo_cfg_path.name)
self.repo_config.load()
self.updater = model.OstreeOriginUpdater(self.repo_config)
self.origin_cfg_path = self.write_tempfile(origin_cfg)
spe = subprocess.CalledProcessError(returncode=1,
cmd=['gi_wrapper.py', '--some-options'])
self.updater._get_deployed_origin = \
mock.Mock(side_effect=model.OstreeGIWrapperError.from_called_process_error(spe))
res = self.updater.run()
self.assertTrue(res is None)
def test_get_deployed_origin(self):
if not hasattr(subprocess, 'check_output'):
# We wouldn't need gi_wrapper on rhel6
# a) There is no ostree on rhel6
# b) rhel6 is pygtk not pygobject
raise SkipTest('This version of python does not have subprocess.check_output, so ostree gi_wrapper wont work.')
repo_cfg = """
[remote "awesome-ostree"]
url=http://awesome.example.com.not.real/
branches=awesome-ostree/awesome7/x86_64/controller/docker;
gpg-verify=false
"""
origin_cfg = """
[origin]
refspec=origremote:awesome-ostree/awesomeos8/x86_64/controller/docker
"""
self.repo_cfg_path = self.write_tempfile(repo_cfg)
self.repo_config = model.OstreeRepoConfig(
repo_file_path=self.repo_cfg_path.name)
self.repo_config.load()
self.updater = model.OstreeOriginUpdater(self.repo_config)
self.origin_cfg_path = self.write_tempfile(origin_cfg)
sub_mock = mock.Mock(side_effect=subprocess.CalledProcessError(1, 'gi_wrapper.py'))
with mock.patch('subscription_manager.plugin.ostree.model.subprocess.check_output', sub_mock):
self.assertRaises(model.OstreeGIWrapperError, self.updater._get_deployed_origin)
class BaseOstreeOriginFileTest(BaseOstreeKeyFileTest):
"""Base of tests for ostree *.origin config files."""
def _of_cfg(self):
fid = self.write_tempfile(self.repo_cfg)
self._of_cfg_instance = config.KeyFileConfigParser(
fid.name)
return self._of_cfg_instance
class TestOriginFileConfigParserEmpty(BaseOstreeOriginFileTest):
"""Test if a .origin file is empty."""
repo_cfg = ""
def test_has_origin(self):
of_cfg = self._of_cfg()
self.assertFalse(of_cfg.has_section('origin'))
self.assertEquals(of_cfg.sections(), [])
class TestOriginFileConfigParser(BaseOstreeOriginFileTest):
"""Test a normalish .origin file."""
repo_cfg = """
[origin]
refspec=awesomeos-controller:awesomeos-controller/awesomeos8/x86_64/controller/docker
"""
def test_has_origin(self):
of_cfg = self._of_cfg()
self.assertTrue(of_cfg.has_section('origin'))
self.assertEquals(len(of_cfg.sections()), 1)
def test_has_refspec(self):
of_cfg = self._of_cfg()
self.assertTrue(of_cfg.get('origin', 'refspec'))
self.assertTrue("awesomeos-controller" in of_cfg.get('origin', 'refspec'))
class TestOstreeConfigFile(BaseOstreeOriginFileTest):
repo_cfg = """
[origin]
refspec=awesomeos-controller:awesomeos-controller/awesomeos8/x86_64/controller/docker
"""
def test_init(self):
fid = self.write_tempfile(self.repo_cfg)
o_cfg = config.BaseOstreeConfigFile(fid.name)
self.assertTrue(isinstance(o_cfg, config.BaseOstreeConfigFile))
class BaseOstreeRepoFileTest(BaseOstreeKeyFileTest):
repo_cfg = ""
def _rf_cfg(self):
self.fid = self.write_tempfile(self.repo_cfg)
self._rf_cfg_instance = config.KeyFileConfigParser(self.fid.name)
return self._rf_cfg_instance
def test_init(self):
rf_cfg = self._rf_cfg()
self.assertTrue(isinstance(rf_cfg, config.KeyFileConfigParser))
def _verify_core(self, rf_cfg):
self.assertTrue(rf_cfg.has_section('core'))
self.assertTrue('repo_version' in rf_cfg.options('core'))
self.assertTrue('mode' in rf_cfg.options('core'))
def _verify_remote(self, rf_cfg, remote_section):
self.assertTrue(remote_section in rf_cfg.sections())
options = rf_cfg.options(remote_section)
self.assertFalse(options == [])
self.assertTrue(rf_cfg.has_option(remote_section, 'url'))
url = rf_cfg.get(remote_section, 'url')
self.assertTrue(url is not None)
self.assertTrue(isinstance(url, basestring))
self.assertTrue(' ' not in url)
self.assertTrue(rf_cfg.has_option(remote_section, 'gpg-verify'))
gpg_verify = rf_cfg.get(remote_section, 'gpg-verify')
self.assertTrue(gpg_verify is not None)
self.assertTrue(gpg_verify in ('true', 'false'))
self.assertTrue(rf_cfg.has_option(remote_section, 'tls-client-cert-path'))
self.assertTrue(rf_cfg.has_option(remote_section, 'tls-client-key-path'))
cert_path = rf_cfg.get(remote_section, 'tls-client-cert-path')
key_path = rf_cfg.get(remote_section, 'tls-client-key-path')
self.assertTrue(cert_path is not None)
self.assertTrue(key_path is not None)
# Could be, but not for now
self.assertTrue(cert_path != key_path)
self.assertTrue('/etc/pki/entitlement' in cert_path)
self.assertTrue('/etc/pki/entitlement' in key_path)
class TestSampleOstreeRepofileConfigParser(BaseOstreeRepoFileTest):
repo_cfg = """
[core]
repo_version=1
mode=bare
[remote "awesome-ostree-controller"]
url = https://awesome.example.com.not.real/
tls-client-cert-path = /etc/pki/entitlement/12345.pem
tls-client-key-path = /etc/pki/entitlement/12345-key.pem
gpg-verify = true
"""
def test_for_no_rhsm_defaults(self):
"""Verify that the rhsm defaults didn't sneak into the config, which is easy
since we are subclass the rhsm config parser.
"""
rf_cfg = self._rf_cfg()
sections = rf_cfg.sections()
self.assertFalse('rhsm' in sections)
self.assertFalse('server' in sections)
self.assertFalse('rhsmcertd' in sections)
def test_core(self):
rf_cfg = self._rf_cfg()
self._verify_core(rf_cfg)
def test_remote(self):
rf_cfg = self._rf_cfg()
self.assertEqual('true', rf_cfg.get('remote "awesome-ostree-controller"',
'gpg-verify'))
class TestOstreeRepofileConfigParserNotAValidFile(BaseOstreeRepoFileTest):
repo_cfg = """
id=inrozxa width=100% height=100%>
<param name=movie value="welcom
ಇದು ಮಾನ್ಯ ಸಂರಚನಾ ಕಡತದ ಅಲ್ಲ. ನಾನು ಮಾಡಲು ಪ್ರಯತ್ನಿಸುತ್ತಿರುವ ಖಚಿತವಿಲ್ಲ, ಆದರೆ ನಾವು ಈ
ಪಾರ್ಸ್ ಹೇಗೆ ಕಲ್ಪನೆಯೂ ಇಲ್ಲ. ನೀವು ಡಾಕ್ಸ್ ಓದಲು ಬಯಸಬಹುದು.
"""
def test_init(self):
# just expect any config parser ish error atm,
# rhsm.config can raise a variety of exceptions all
# subclasses from ConfigParser.Error
self.assertRaises(ConfigParser.Error, self._rf_cfg)
class TestOstreeRepoFileOneRemote(BaseOstreeRepoFileTest):
repo_cfg = """
[core]
repo_version=1
mode=bare
[remote "awesome-ostree-controller"]
url = http://awesome.example.com.not.real/
gpg-verify = false
tls-client-cert-path = /etc/pki/entitlement/12345.pem
tls-client-key-path = /etc/pki/entitlement/12345-key.pem
"""
@mock.patch('subscription_manager.plugin.ostree.config.RepoFile._get_config_parser')
def test_remote_sections(self, mock_get_config_parser):
mock_get_config_parser.return_value = self._rf_cfg()
rf = config.RepoFile('')
remotes = rf.remote_sections()
self.assertTrue('remote "awesome-ostree-controller"' in remotes)
self.assertFalse('core' in remotes)
self.assertFalse('rhsm' in remotes)
@mock.patch('subscription_manager.plugin.ostree.config.RepoFile._get_config_parser')
def test_section_is_remote(self, mock_get_config_parser):
mock_get_config_parser.return_value = self._rf_cfg()
rf = config.RepoFile('')
self.assertTrue(rf.section_is_remote('remote "awesome-ostree-controller"'))
self.assertTrue(rf.section_is_remote('remote "rhsm-ostree"'))
self.assertTrue(rf.section_is_remote('remote "localinstall"'))
self.assertFalse(rf.section_is_remote('rhsm'))
self.assertFalse(rf.section_is_remote('core'))
# string from config file is "false", not boolean False yet
self.assertEquals('false',
rf.config_parser.get('remote "awesome-ostree-controller"', 'gpg-verify'))
@mock.patch('subscription_manager.plugin.ostree.config.RepoFile._get_config_parser')
def test_section_set_remote(self, mock_get_config_parser):
mock_get_config_parser.return_value = self._rf_cfg()
rf = config.RepoFile('')
remote = model.OstreeRemote()
remote.url = "/some/path"
remote.name = "awesomeos-remote"
remote.gpg_verify = 'true'
remote.tls_client_cert_path = "/etc/pki/entitlement/54321.pem"
remote.tls_client_key_path = "/etc/pki/entitlement/54321-key.pem"
rf.set_remote(remote)
expected_proxy = "http://proxy_user:proxy_password@notaproxy.grimlock.usersys.redhat.com:3128"
repo_proxy_uri = rf.config_parser.get('remote "awesomeos-remote"', 'proxy')
self.assertEquals(expected_proxy, repo_proxy_uri)
@mock.patch('subscription_manager.plugin.ostree.config.RepoFile._get_config_parser')
def section_set_remote(self, mock_get_config_parser):
mock_get_config_parser.return_value = self._rf_cfg()
rf = config.RepoFile('')
remote = model.OstreeRemote()
remote.url = "/some/path"
remote.name = "awesomeos-remote"
remote.gpg_verify = 'true'
remote.tls_client_cert_path = "/etc/pki/entitlement/54321.pem"
remote.tls_client_key_path = "/etc/pki/entitlement/54321-key.pem"
rf.set_remote(remote)
class TestOstreeRepoFileNoRemote(BaseOstreeRepoFileTest):
repo_cfg = """
[core]
repo_version=1
mode=bare
"""
@mock.patch('subscription_manager.plugin.ostree.config.RepoFile._get_config_parser')
def test_remote_sections(self, mock_get_config_parser):
mock_get_config_parser.return_value = self._rf_cfg()
rf = config.RepoFile()
remotes = rf.remote_sections()
self.assertFalse('remote "awesmome-ostree-controller"' in remotes)
self.assertFalse('core' in remotes)
self.assertFalse('rhsm' in remotes)
self.assertEquals(remotes, [])
class TestOstreeRepoFileMultipleRemotes(BaseOstreeRepoFileTest):
repo_cfg = """
[core]
repo_version=1
mode=bare
[remote "awesomeos-7-controller"]
url = https://awesome.example.com.not.real/repo/awesomeos7/
gpg-verify = false
tls-client-cert-path = /etc/pki/entitlement/12345.pem
tls-client-key-path = /etc/pki/entitlement/12345-key.pem
[remote "awesomeos-6-controller"]
url = https://awesome.example.com.not.real/repo/awesomeos6/
gpg-verify = false
tls-client-cert-path = /etc/pki/entitlement/12345.pem
tls-client-key-path = /etc/pki/entitlement/12345-key.pem
"""
@mock.patch('subscription_manager.plugin.ostree.config.RepoFile._get_config_parser')
def test_remote_sections(self, mock_get_config_parser):
mock_get_config_parser.return_value = self._rf_cfg()
rf = config.RepoFile('')
remotes = rf.remote_sections()
self.assertTrue('remote "awesomeos-7-controller"' in remotes)
self.assertTrue('remote "awesomeos-6-controller"' in remotes)
self.assertFalse('core' in remotes)
self.assertFalse('rhsm' in remotes)
for remote in remotes:
self._verify_remote(self._rf_cfg_instance, remote)
# Unsure what we should do in this case, if we dont throw
# an error on read, we will likely squash the dupes to one
# remote on write. Which is ok?
class TestOstreeRepoFileNonUniqueRemotes(BaseOstreeRepoFileTest):
repo_cfg = """
[core]
repo_version=1
mode=bare
[remote "awesomeos-7-controller"]
url=http://awesome.example.com.not.real/repo/awesomeos7/
gpg-verify=false
tls-client-cert-path = /etc/pki/entitlement/12345.pem
tls-client-key-path = /etc/pki/entitlement/12345-key.pem
[remote "awesomeos-7-controller"]
url=http://awesome.example.com.not.real/repo/awesomeos7/
gpg-verify=false
tls-client-cert-path = /etc/pki/entitlement/12345.pem
tls-client-key-path = /etc/pki/entitlement/12345-key.pem
"""
@mock.patch('subscription_manager.plugin.ostree.config.RepoFile._get_config_parser')
def test_remote_sections(self, mock_get_config_parser):
mock_get_config_parser.return_value = self._rf_cfg()
rf = config.RepoFile('')
remotes = rf.remote_sections()
self.assertTrue('remote "awesomeos-7-controller"' in remotes)
self.assertFalse('core' in remotes)
self.assertFalse('rhsm' in remotes)
for remote in remotes:
self._verify_remote(self._rf_cfg_instance, remote)
class TestOstreeRepofileAddSectionWrite(BaseOstreeRepoFileTest):
repo_cfg = ""
def test_add_remote(self):
fid = self.write_tempfile(self.repo_cfg)
rf_cfg = config.KeyFileConfigParser(fid.name)
remote_name = 'remote "awesomeos-8-container"'
url = "https://example.com.not.real/repo"
gpg_verify = "true"
rf_cfg.add_section(remote_name)
self.assertTrue(rf_cfg.has_section(remote_name))
rf_cfg.save()
new_contents = open(fid.name, 'r').read()
self.assertTrue('awesomeos-8' in new_contents)
rf_cfg.set(remote_name, 'url', url)
rf_cfg.save()
new_contents = open(fid.name, 'r').read()
self.assertTrue(url in new_contents)
rf_cfg.set(remote_name, 'gpg-verify', gpg_verify)
rf_cfg.save()
new_contents = open(fid.name, 'r').read()
self.assertTrue('gpg-verify' in new_contents)
self.assertTrue(gpg_verify in new_contents)
self.assertTrue('gpg-verify = true' in new_contents)
new_rf_cfg = config.KeyFileConfigParser(fid.name)
self.assertTrue(new_rf_cfg.has_section(remote_name))
self.assertEquals(new_rf_cfg.get(remote_name, 'url'), url)
class TestOstreeRepoFileRemoveSectionSave(BaseOstreeRepoFileTest):
repo_cfg = """
[core]
repo_version=1
mode=bare
[remote "awesomeos-7-controller"]
url = https://awesome.example.com.not.real/repo/awesomeos7/
gpg-verify = false
[remote "awesomeos-6-controller"]
url = https://awesome.example.com.not.real/repo/awesomeos6/
gpg-verify = true
"""
def test_remove_section(self):
fid = self.write_tempfile(self.repo_cfg)
rf_cfg = config.KeyFileConfigParser(fid.name)
remote_to_remove = 'remote "awesomeos-7-controller"'
self.assertTrue(rf_cfg.has_section(remote_to_remove))
rf_cfg.remove_section(remote_to_remove)
self.assertFalse(rf_cfg.has_section(remote_to_remove))
rf_cfg.save()
self.assertFalse(rf_cfg.has_section(remote_to_remove))
new_contents = open(fid.name, 'r').read()
self.assertFalse(remote_to_remove in new_contents)
self.assertFalse('gpg-verify = false' in new_contents)
new_rf_cfg = config.KeyFileConfigParser(fid.name)
self.assertFalse(new_rf_cfg.has_section(remote_to_remove))
class TestOsTreeContents(fixture.SubManFixture):
def create_content(self, content_type, name):
""" Create dummy entitled content object. """
content = certificate2.Content(
content_type=content_type,
name="mock_content_%s" % name,
label=name,
enabled=True,
required_tags=[],
gpg="path/to/gpg",
url="http://mock.example.com/%s/" % name)
return EntitlementCertContent.from_cert_content(content)
def test_ent_source(self):
yc = self.create_content("yum", "yum_content")
oc = self.create_content("ostree", "ostree_content")
ent1 = Entitlement(contents=[yc])
ent2 = Entitlement(contents=[oc])
ent_src = EntitlementSource()
ent_src._entitlements = [ent1, ent2]
contents = find_content(ent_src,
content_type=action_invoker.OSTREE_CONTENT_TYPE)
self.assertEquals(len(contents), 1)
for content in contents:
self.assertEquals(content.content_type,
action_invoker.OSTREE_CONTENT_TYPE)
def test_ent_source_product_tags(self):
yc = self.create_content("yum", "yum_content")
oc = self.create_content("ostree", "ostree_content")
ent1 = Entitlement(contents=[yc])
ent2 = Entitlement(contents=[oc])
ent_src = EntitlementSource()
ent_src._entitlements = [ent1, ent2]
# faux product_tags to hit find_content, but no content tags
ent_src.product_tags = ['awesomeos-ostree-1', 'awesomeos-ostree-super']
contents = find_content(ent_src,
content_type=action_invoker.OSTREE_CONTENT_TYPE)
self.assertEquals(len(contents), 1)
for content in contents:
self.assertEquals(content.content_type,
action_invoker.OSTREE_CONTENT_TYPE)
def test_ent_source_product_tags_and_content_tags(self):
oc = self.create_content("ostree", "ostree_content")
oc.tags = ['awesomeos-ostree-1']
ent = Entitlement(contents=[oc])
ent_src = EntitlementSource()
ent_src._entitlements = [ent]
# faux product_tags to hit find_content, but no content tags
ent_src.product_tags = ['awesomeos-ostree-1', 'awesomeos-ostree-super']
contents = find_content(ent_src,
content_type=action_invoker.OSTREE_CONTENT_TYPE)
print "contents", contents
self.assertEquals(len(contents), 1)
for content in contents:
self.assertEquals(content.content_type,
action_invoker.OSTREE_CONTENT_TYPE)
class TestContentUpdateActionReport(fixture.SubManFixture):
def test_empty(self):
report = action_invoker.OstreeContentUpdateActionReport()
self.assertEquals(report.remote_updates, [])
def test_print_empty(self):
report = action_invoker.OstreeContentUpdateActionReport()
text = "%s" % report
self.assertTrue(text != "")
def test_updates_empty(self):
report = action_invoker.OstreeContentUpdateActionReport()
self.assertEquals(report.updates(), 0)
def test_report(self):
report = action_invoker.OstreeContentUpdateActionReport()
remotes = model.OstreeRemotes()
remote = model.OstreeRemote()
remote.url = "http://example.com"
remote.name = "example-remote"
remote.gpg_verify = "true"
remotes.add(remote)
report.remote_updates = remotes
text = "%s" % report
# FIXME: validate format
self.assertTrue(text != "")
class TestOstreeContentUpdateActionCommand(fixture.SubManFixture):
repo_cfg = """
[core]
repo_version=1
mode=bare
[remote "awesome-ostree-controller"]
url=http://awesome.example.com.not.real/
branches=awesome-ostree-controller/awesome7/x86_64/controller/docker;
gpg-verify=true
[remote "another-awesome-ostree-controller"]
url=http://another-awesome.example.com.not.real/
branches=another-awesome-ostree-controller/awesome7/x86_64/controller/docker;
gpg-verify=true
"""
@mock.patch("subscription_manager.plugin.ostree.model.OstreeConfigFileStore")
def test_empty(self, mock_file_store):
# FIXME: This does no validation
mock_repo_file = mock.Mock()
mock_repo_file.get_core.return_value = {}
mock_file_store.load.return_value = mock_repo_file
ent_src = EntitlementSource()
action = action_invoker.OstreeContentUpdateActionCommand(
ent_source=ent_src)
action.update_origin_file = mock.Mock()
action.perform()
| nguyenfilip/subscription-manager | test/test_ostree_content_plugin.py | Python | gpl-2.0 | 41,650 |
# -*- coding: utf-8 -*-
from openprocurement.tender.core.utils import optendersresource
from openprocurement.tender.openeu.views.complaint_document import TenderEUComplaintDocumentResource
@optendersresource(name='esco:Tender Complaint Documents',
collection_path='/tenders/{tender_id}/complaints/{complaint_id}/documents',
path='/tenders/{tender_id}/complaints/{complaint_id}/documents/{document_id}',
procurementMethodType='esco',
description="Tender ESCO Complaint documents")
class TenderESCOComplaintDocumentResource(TenderEUComplaintDocumentResource):
""" Tender ESCO Complaint Document Resource """
| openprocurement/openprocurement.tender.esco | openprocurement/tender/esco/views/complaint_document.py | Python | apache-2.0 | 688 |
#!/usr/bin/python
# android-build.py
# Build android
import sys
import os, os.path
import shutil
from optparse import OptionParser
CPP_SAMPLES = ['cpp-empty-test', 'cpp-tests']
LUA_SAMPLES = ['lua-empty-test', 'lua-tests']
ALL_SAMPLES = CPP_SAMPLES + LUA_SAMPLES
def get_num_of_cpu():
''' The build process can be accelerated by running multiple concurrent job processes using the -j-option.
'''
try:
platform = sys.platform
if platform == 'win32':
if 'NUMBER_OF_PROCESSORS' in os.environ:
return int(os.environ['NUMBER_OF_PROCESSORS'])
else:
return 1
else:
from numpy.distutils import cpuinfo
return cpuinfo.cpu._getNCPUs()
except Exception:
print "Can't know cpuinfo, use default 1 cpu"
return 1
def check_environment_variables():
''' Checking the environment NDK_ROOT, which will be used for building
'''
try:
NDK_ROOT = os.environ['NDK_ROOT']
except Exception:
print "NDK_ROOT not defined. Please define NDK_ROOT in your environment"
sys.exit(1)
return NDK_ROOT
def check_environment_variables_sdk():
''' Checking the environment ANDROID_SDK_ROOT, which will be used for building
'''
try:
SDK_ROOT = os.environ['ANDROID_SDK_ROOT']
except Exception:
print "ANDROID_SDK_ROOT not defined. Please define ANDROID_SDK_ROOT in your environment"
sys.exit(1)
return SDK_ROOT
def select_toolchain_version():
'''Because ndk-r8e uses gcc4.6 as default. gcc4.6 doesn't support c++11. So we should select gcc4.7 when
using ndk-r8e. But gcc4.7 is removed in ndk-r9, so we should determine whether gcc4.7 exist.
Conclution:
ndk-r8e -> use gcc4.7
ndk-r9 -> use gcc4.8
'''
ndk_root = check_environment_variables()
if os.path.isdir(os.path.join(ndk_root,"toolchains/arm-linux-androideabi-4.8")):
os.environ['NDK_TOOLCHAIN_VERSION'] = '4.8'
print "The Selected NDK toolchain version was 4.8 !"
elif os.path.isdir(os.path.join(ndk_root,"toolchains/arm-linux-androideabi-4.7")):
os.environ['NDK_TOOLCHAIN_VERSION'] = '4.7'
print "The Selected NDK toolchain version was 4.7 !"
else:
print "Couldn't find the gcc toolchain."
exit(1)
def caculate_built_samples(args):
''' Compute the sampels to be built
'cpp' for short of all cpp tests
'lua' for short of all lua tests
'''
if 'all' in args:
return ALL_SAMPLES
targets = []
if 'cpp' in args:
targets += CPP_SAMPLES
args.remove('cpp')
if 'lua' in args:
targets += LUA_SAMPLES
args.remove('lua')
targets += args
# remove duplicate elements, for example
# python android-build.py cpp hellocpp
targets = set(targets)
return list(targets)
def do_build(cocos_root, ndk_root, app_android_root, ndk_build_param,sdk_root,android_platform,build_mode):
ndk_path = os.path.join(ndk_root, "ndk-build")
# windows should use ";" to seperate module paths
platform = sys.platform
if platform == 'win32':
ndk_module_path = 'NDK_MODULE_PATH=%s;%s/external;%s/cocos' % (cocos_root, cocos_root, cocos_root)
else:
ndk_module_path = 'NDK_MODULE_PATH=%s:%s/external:%s/cocos' % (cocos_root, cocos_root, cocos_root)
num_of_cpu = get_num_of_cpu()
if ndk_build_param == None:
command = '%s -j%d -C %s %s' % (ndk_path, num_of_cpu, app_android_root, ndk_module_path)
else:
command = '%s -j%d -C %s %s %s' % (ndk_path, num_of_cpu, app_android_root, ndk_build_param, ndk_module_path)
print command
if os.system(command) != 0:
raise Exception("Build dynamic library for project [ " + app_android_root + " ] fails!")
elif android_platform is not None:
sdk_tool_path = os.path.join(sdk_root, "tools/android")
cocoslib_path = os.path.join(cocos_root, "cocos/platform/android/java")
command = '%s update lib-project -t %s -p %s' % (sdk_tool_path,android_platform,cocoslib_path)
if os.system(command) != 0:
raise Exception("update cocos lib-project [ " + cocoslib_path + " ] fails!")
command = '%s update project -t %s -p %s -s' % (sdk_tool_path,android_platform,app_android_root)
if os.system(command) != 0:
raise Exception("update project [ " + app_android_root + " ] fails!")
buildfile_path = os.path.join(app_android_root, "build.xml")
command = 'ant clean %s -f %s -Dsdk.dir=%s' % (build_mode,buildfile_path,sdk_root)
os.system(command)
def copy_files(src, dst):
for item in os.listdir(src):
path = os.path.join(src, item)
# Android can not package the file that ends with ".gz"
if not item.startswith('.') and not item.endswith('.gz') and os.path.isfile(path):
shutil.copy(path, dst)
if os.path.isdir(path):
new_dst = os.path.join(dst, item)
os.mkdir(new_dst)
copy_files(path, new_dst)
def copy_resources(target, app_android_root):
# remove app_android_root/assets if it exists
assets_dir = os.path.join(app_android_root, "assets")
if os.path.isdir(assets_dir):
shutil.rmtree(assets_dir)
os.mkdir(assets_dir)
# copy resources(cpp samples)
if target in CPP_SAMPLES:
resources_dir = os.path.join(app_android_root, "../Resources")
if os.path.isdir(resources_dir):
copy_files(resources_dir, assets_dir)
# lua samples should copy lua script
if target in LUA_SAMPLES:
resources_dir = os.path.join(app_android_root, "../../res")
assets_res_dir = os.path.join(assets_dir, "res")
os.mkdir(assets_res_dir)
if target != "lua-tests":
copy_files(resources_dir, assets_res_dir)
src_dir = os.path.join(app_android_root, "../../src")
assets_src_dir = os.path.join(assets_dir, "src")
os.mkdir(assets_src_dir)
copy_files(src_dir, assets_src_dir)
common_script_dir = os.path.join(app_android_root, "../../../../cocos/scripting/lua-bindings/script")
copy_files(common_script_dir, assets_dir)
luasocket_script_dir = os.path.join(app_android_root, "../../../../external/lua/luasocket")
for root, dirs, files in os.walk(luasocket_script_dir):
for f in files:
if os.path.splitext(f)[1] == '.lua':
fall = os.path.join(root, f)
shutil.copy(fall, assets_dir)
# lua-tests shared resources with cpp-tests
if target == "lua-tests":
resources_cocosbuilder_res_dir = os.path.join(resources_dir, "cocosbuilderRes")
assets_cocosbuilder_res_dir = os.path.join(assets_res_dir, "cocosbuilderRes")
os.mkdir(assets_cocosbuilder_res_dir)
copy_files(resources_cocosbuilder_res_dir, assets_cocosbuilder_res_dir)
resources_dir = os.path.join(app_android_root, "../../../cpp-tests/Resources")
copy_files(resources_dir, assets_res_dir)
def build_samples(target,ndk_build_param,android_platform,build_mode):
ndk_root = check_environment_variables()
sdk_root = None
select_toolchain_version()
build_targets = caculate_built_samples(target)
current_dir = os.path.dirname(os.path.realpath(__file__))
cocos_root = os.path.join(current_dir, "..")
if android_platform is not None:
sdk_root = check_environment_variables_sdk()
if android_platform.isdigit():
android_platform = 'android-'+android_platform
else:
print 'please use vaild android platform'
exit(1)
if build_mode is None:
build_mode = 'debug'
elif build_mode != 'release':
build_mode = 'debug'
app_android_root = ''
target_proj_path_map = {
"cpp-empty-test": "tests/cpp-empty-test/proj.android",
"cpp-tests": "tests/cpp-tests/proj.android",
"lua-empty-test": "tests/lua-empty-test/project/proj.android",
"lua-tests": "tests/lua-tests/project/proj.android"
}
for target in build_targets:
if target in target_proj_path_map:
app_android_root = os.path.join(cocos_root, target_proj_path_map[target])
else:
print 'unknown target: %s' % target
continue
copy_resources(target, app_android_root)
do_build(cocos_root, ndk_root, app_android_root, ndk_build_param,sdk_root,android_platform,build_mode)
# -------------- main --------------
if __name__ == '__main__':
#parse the params
usage = """
This script is mainy used for building tests built-in with cocos2d-x.
Usage: %prog [options] [cpp-empty-test|cpp-tests|lua-empty-test|lua-tests|cpp|lua|all]
If you are new to cocos2d-x, I recommend you start with cpp-empty-test, lua-empty-test.
You can combine these targets like this:
python android-build.py -p 10 cpp-empty-test lua-empty-test
Note: You should install ant to generate apk while building the andriod tests. But it is optional. You can generate apk with eclipse.
"""
parser = OptionParser(usage=usage)
parser.add_option("-n", "--ndk", dest="ndk_build_param",
help='Parameter for ndk-build')
parser.add_option("-p", "--platform", dest="android_platform",
help='Parameter for android-update. Without the parameter,the script just build dynamic library for the projects. Valid android-platform are:[10|11|12|13|14|15|16|17|18|19]')
parser.add_option("-b", "--build", dest="build_mode",
help='The build mode for java project,debug[default] or release. Get more information,please refer to http://developer.android.com/tools/building/building-cmdline.html')
(opts, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
else:
try:
build_samples(args, opts.ndk_build_param,opts.android_platform,opts.build_mode)
except Exception as e:
print e
sys.exit(1)
| cmdwin32/tileMapHomework | tillmap/cocos2d/build/android-build.py | Python | unlicense | 10,110 |
#!/usr/bin/env python
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.version_info < (2, 6):
raise NotImplementedError("Sorry, you need at least Python 2.6 or Python 3.2+ to use bottle.")
import bottle
setup(name='bottle',
version=bottle.__version__,
description='Fast and simple WSGI-framework for small web-applications.',
long_description=bottle.__doc__,
author=bottle.__author__,
author_email='marc@gsites.de',
url='http://bottlepy.org/',
py_modules=['bottle'],
scripts=['bottle.py'],
license='MIT',
platforms='any',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
],
)
| taisa007/bottle-ja | setup.py | Python | mit | 1,742 |
"""
Unit tests for the NeuroTools.signals module
"""
import matplotlib
matplotlib.use('Agg')
from NeuroTools import io
import NeuroTools.signals.spikes as spikes
import NeuroTools.signals.analogs as analogs
from NeuroTools.signals.pairs import *
import numpy, unittest, os
from NeuroTools.__init__ import check_numpy_version, check_dependency
newnum = check_numpy_version()
ENABLE_PLOTS = check_dependency('pylab')
if ENABLE_PLOTS:
import pylab
def arrays_are_equal(a, b):
a.sort()
b.sort()
eq = a==b
if isinstance(eq, bool):
return eq
else: # array
return eq.all()
class SpikeTrainTest(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testCreateSpikeTrain(self):
spk = spikes.SpikeTrain(numpy.arange(0,110,10))
assert (spk.t_start == 0) and (spk.t_stop == 100)
self.assert_( arrays_are_equal(spk.spike_times, numpy.arange(0,110,10)) )
def testCreateSpikeTrainFromList(self):
spk = spikes.SpikeTrain(range(0,110,10))
assert (spk.t_start == 0) and (spk.t_stop == 100)
self.assert_( arrays_are_equal(spk.spike_times, numpy.arange(0,110,10)) )
def testCreateSpikeTrainFull(self):
spk = spikes.SpikeTrain(numpy.arange(0,110,10), 0, 100)
assert (spk.t_start == 0) and (spk.t_stop == 100)
def testCreateWithTStartOnly(self):
spk = spikes.SpikeTrain(numpy.arange(0,110,10), t_start=20)
assert (spk.t_start == 20) and (spk.t_stop == 100)
assert arrays_are_equal( spk.spike_times, numpy.arange(20, 110, 10) )
def testCreateWithTStopOnly(self):
spk = spikes.SpikeTrain(numpy.arange(0,110,10), t_stop=70)
assert (spk.t_start == 0) and (spk.t_stop == 70)
assert arrays_are_equal( spk.spike_times, numpy.arange(0, 80, 10) )
def testCreateSpikeSmallWrongTimes(self):
self.assertRaises(Exception, spikes.SpikeTrain, numpy.arange(0,110,10), 20, 10)
def testCreateSpikeTrainNegativeTstart(self):
self.assertRaises(ValueError, spikes.SpikeTrain, numpy.arange(0,110,10), -20, 10)
def testCreateSpikeTrainNegativeSpikeTime(self):
self.assertRaises(ValueError, spikes.SpikeTrain, numpy.arange(-100,110,10))
def testCreateWithInvalidValuesInList(self):
self.assertRaises(ValueError, spikes.SpikeTrain, [0.0, "elephant", 0.3, -0.6, 0.15])
def testCopy(self):
spk = spikes.SpikeTrain(numpy.arange(0,110,10), 0, 100)
spk2 = spk.copy()
assert spk.is_equal(spk2)
def testDuration(self):
spk = spikes.SpikeTrain(numpy.arange(0,110,10), 0, 100)
assert spk.duration() == 100
def testMerge(self):
spk = spikes.SpikeTrain(numpy.arange(0,110,10))
spk2 = spikes.SpikeTrain(numpy.arange(100,210,10))
spk.merge(spk2)
assert (spk.t_stop == 200) and (len(spk) == 22)
def testTimeAxis(self):
spk = spikes.SpikeTrain(numpy.arange(0,1010,10))
if newnum:
assert len(spk.time_axis(100)) == 11
else:
assert len(spk.time_axis(100)) == 10
def testAddOffset(self):
spk = spikes.SpikeTrain(numpy.arange(0,1010,10))
spk.time_offset(50)
assert (spk.t_start == 50) and (spk.t_stop == 1050) and numpy.all(spk.spike_times == numpy.arange(50,1060,10))
def testTime_Slice(self):
spk1 = spikes.SpikeTrain(numpy.arange(0,1010,10))
spk1 = spk1.time_slice(250, 750)
assert len(numpy.extract((spk1.spike_times < 250) | (spk1.spike_times > 750), spk1.spike_times)) == 0
spk2 = spikes.SpikeTrain([0.0, 0.1, 0.3, 0.6, 0.15])
self.assert_( arrays_are_equal(spikes.SpikeTrain([0.15, 0.3]).spike_times,
spk2.time_slice(0.11,0.4).spike_times) ) # should not include 0.1
self.assert_( arrays_are_equal(spikes.SpikeTrain([0.1, 0.15, 0.3]).spike_times,
spk2.time_slice(0.10,0.4).spike_times) ) # should include 0.1
def testIsi(self):
spk = spikes.SpikeTrain(numpy.arange(0,200,10))
assert numpy.all(spk.isi() == 10)
def testMeanRate(self):
poisson_param = 1./40
isi = numpy.random.exponential(poisson_param, 1000)
poisson_times = numpy.cumsum(isi)*1000. # To convert the spikes_time in ms
spk = spikes.SpikeTrain(poisson_times)
assert 35 < spk.mean_rate() < 45
# def testMeanRateParams(self):
# # TODO: Failing
# poisson_param = 1./40
# isi = numpy.random.exponential(poisson_param, 1000)
# poisson_times = numpy.cumsum(isi)*1000. # To convert the spikes_time in ms
# spk1 = spikes.SpikeTrain(poisson_times,t_start=0,t_stop=5000)
# spk2 = spikes.SpikeTrain(range(10), t_stop=10)
# assert 30 < spk1.mean_rate() < 50
# self.assertEqual(spk2.mean_rate(), 1000.0)
# self.assertAlmostEqual(spk2.mean_rate(t_stop=4.99999999999), 1000.0, 6)
# self.assertEqual(spk2.mean_rate(t_stop=5.0), 1200.0)
def testCvIsi(self):
poisson_param = 1./40
isi = numpy.random.exponential(poisson_param, 1000)
poisson_times = numpy.cumsum(isi)*1000. # To convert the spikes_time in ms
spk1 = spikes.SpikeTrain(poisson_times)
spk2 = spikes.SpikeTrain(range(10), t_stop=10)
assert 0.9 < spk1.cv_isi() < 1.1
self.assertEqual(spk2.cv_isi(), 0)
# def testCvKL(self):
# # TODO: failing
# poisson_param = 1./10 # 1 / firing_frequency
# isi = numpy.random.exponential(poisson_param, 1000)
# poisson_times = numpy.cumsum(isi)*1000. # To convert the spikes_time in ms
# spk1 = spikes.SpikeTrain(poisson_times)
# assert 0.9 < spk1.cv_kl(bins = 1000) < 1.1
# # does not depend on bin size
# assert 0.9 < spk1.cv_kl(bins = 100) < 1.1
# # does not depend on time
# poisson_param = 1./4
# isi = numpy.random.exponential(poisson_param, 1000)
# poisson_times = numpy.cumsum(isi)*1000. # To convert the spikes_time in ms
# spk1 = spikes.SpikeTrain(poisson_times)
# assert 0.9 < spk1.cv_kl() < 1.1
# spk2 = spikes.SpikeTrain(range(10), t_stop=10)
# self.assertEqual(spk2.cv_isi(), 0)
def testHistogram(self):
poisson_param = 1./40
isi = numpy.random.exponential(poisson_param, 1000)
poisson_times = numpy.cumsum(isi)*1000. # To convert the spikes_time in ms
spk = spikes.SpikeTrain(poisson_times)
hist = spk.time_histogram(5000)
N = len(hist) - 1
assert numpy.all((30 < hist[0:N]) & (hist[0:N] < 60))
def testVictorPurpuraDistance(self):
poisson_param = 1./40
isi = numpy.random.exponential(poisson_param, 20)
poisson_times = numpy.cumsum(isi)*1000. # To convert the spikes_time in ms
spk = spikes.SpikeTrain(poisson_times)
isi = numpy.random.exponential(poisson_param, 20)
poisson_times = numpy.cumsum(isi)*1000. # To convert the spikes_time in ms
spk2 = spikes.SpikeTrain(poisson_times)
poisson_param = 1./5
isi = numpy.random.exponential(poisson_param, 20)
poisson_times = numpy.cumsum(isi)*1000. # To convert the spikes_time in ms
spk3 = spikes.SpikeTrain(poisson_times)
assert (spk.distance_victorpurpura(spk2,0.1) < spk.distance_victorpurpura(spk3,0.1)) \
and (spk.distance_victorpurpura(spk, 0.1) == 0)
def testKreuzDistance(self):
poisson_param = 1./40
isi = numpy.random.exponential(poisson_param, 20)
poisson_times = numpy.cumsum(isi)*1000. # To convert the spikes_time in ms
spk = spikes.SpikeTrain(poisson_times)
isi = numpy.random.exponential(poisson_param, 20)
poisson_times = numpy.cumsum(isi)*1000. # To convert the spikes_time in ms
spk2 = spikes.SpikeTrain(poisson_times)
poisson_param = 1./5
isi = numpy.random.exponential(poisson_param, 20)
poisson_times = numpy.cumsum(isi)*1000. # To convert the spikes_time in ms
spk3 = spikes.SpikeTrain(poisson_times)
assert (spk.distance_kreuz(spk2) < spk.distance_kreuz(spk3)) and (spk.distance_kreuz(spk) == 0)
def testFanoFactorIsi(self):
spk = spikes.SpikeTrain(numpy.arange(0,1010,10))
assert spk.fano_factor_isi() == 0.
class SpikeListTest(unittest.TestCase):
def setUp(self):
self.spikes = []
nb_cells = 10
frequencies = [nb_cells for _ in xrange(10)]
for idx in xrange(nb_cells):
param = 1. / frequencies[idx]
isi = numpy.random.exponential(param, 1000)
pspikes = numpy.cumsum(isi) * 1000. # convert to ms
for spike in pspikes:
self.spikes.append((idx, spike))
self.spk = spikes.SpikeList(self.spikes, range(10))
def tearDown(self):
pass
def testCreateSpikeList(self):
assert len(self.spk) == 10
assert numpy.all(self.spk.id_list == numpy.arange(10))
def testGetItem(self):
assert isinstance(self.spk[0], spikes.SpikeTrain)
def testSetItemWrongType(self):
self.assertRaises(Exception, self.spk.__setitem__,
0, numpy.arange(100))
def testSetItem(self):
spktrain = spikes.SpikeTrain(numpy.arange(10))
self.spk[11] = spktrain
assert len(self.spk) == 11
def testGetSlice(self):
assert len(self.spk[0:5]) == 5
def testAppend(self):
spktrain = spikes.SpikeTrain(numpy.arange(10))
self.assertRaises(Exception, self.spk.append, 0, spktrain)
def testConcatenate(self):
self.assertRaises(Exception, self.spk.concatenate, self.spk)
def testMerge(self):
spk2 = spikes.SpikeList(self.spikes, range(50,60))
self.spk.merge(spk2)
assert len(self.spk) == 20
def testId_SliceInt(self):
assert len(self.spk.id_slice(5)) == 5
def testCopy(self):
spk2 = self.spk.copy()
assert len(spk2) == len(self.spk) and (spk2[0].is_equal(self.spk[0]))
def testId_SliceList(self):
assert numpy.all(self.spk.id_slice([0,1,2,3]).id_list == [0,1,2,3])
def testTime_Slice(self):
spk = spikes.SpikeList(self.spikes,range(10), t_start=0)
new_spk = spk.time_slice(0, 1000.)
assert (new_spk.t_start == spk.t_start) and (new_spk.t_stop == 1000.)
def testAddOffset(self):
spk2 = self.spk.time_slice(0,1000)
spk2.time_offset(100)
assert (spk2.t_start == 100) and (spk2.t_stop == 1100)
def testFirstSpikeTime(self):
assert self.spk.first_spike_time() >= self.spk.t_start
def testLastSpikeTime(self):
assert self.spk.last_spike_time() <= self.spk.t_stop
def testSelect_Ids(self):
spks = []
nb_cells = 3
frequencies = [5, 40, 40]
for idx in xrange(nb_cells):
param = 1. / frequencies[idx]
isi = numpy.random.exponential(param, 100)
pspikes = numpy.cumsum(isi) * 1000. # convert to ms
for spike in pspikes:
spks.append((idx, spike))
spk = spikes.SpikeList(spks,range(3),0,1000)
assert len(spk.select_ids("cell.mean_rate() < 20")) == 1
def testIsis(self):
pass
def testCV_Isis(self):
assert 0.8 < numpy.mean(self.spk.cv_isi()) < 1.2
def testCVKL(self):
assert 0.8 < numpy.mean(self.spk.cv_kl()) < 1.2
def testCVLocal(self):
assert 0.8 < self.spk.cv_local() < 1.2
def testMeanRate(self):
assert 5 < self.spk.mean_rate() < 15
def testMeanRates(self):
correct = True
rates = self.spk.mean_rates()
for idx in xrange(len(self.spk.id_list)):
if not(5 < rates[idx] < 15):
correct = False
assert correct
def testMeanRateStd(self):
assert self.spk.mean_rate_std() >= 0
def testMeanRateVarianceAndCovariance(self):
assert (abs(self.spk.mean_rate_variance(10) - self.spk.mean_rate_covariance(self.spk, 10)) < 0.01)
def testSaveWrongFormat(self):
self.assertRaises(Exception, self.spk.save, 2.3)
def testSaveAndLoadTxt(self):
self.spk.save("tmp.txt")
spk2 = spikes.load_spikelist("tmp.txt")
assert len(spk2) == len(self.spk)
def testSaveAndLoadTxtTimePart(self):
self.spk.save("tmp.txt")
spk2 = spikes.load_spikelist("tmp.txt", t_start = 0, t_stop= 50)
assert (spk2.t_start == 0) and (spk2.t_stop == 50)
def testSaveAndLoadTxtIdsPart(self):
self.spk.save("tmp.txt")
spk2 = spikes.load_spikelist("tmp.txt", id_list=[1,2,3])
assert numpy.all(spk2.id_list == [1,2,3])
def testSaveAndLoadTxtIdsPartInt(self):
file = io.StandardTextFile("tmp.txt")
self.spk.save(file)
spk2 = spikes.load_spikelist(file, id_list=5)
assert numpy.all(spk2.id_list == [0,1,2,3,4])
def testSaveAndLoadPickle(self):
file = io.StandardPickleFile("tmp.pickle")
self.spk.save(file)
spk2 = spikes.load_spikelist(file)
assert len(spk2) == len(self.spk)
def testSaveAndLoadPickleTimePart(self):
file = io.StandardPickleFile("tmp.pickle")
self.spk.save(file)
spk2 = spikes.load_spikelist(file, t_start = 0, t_stop= 50)
assert (spk2.t_start == 0) and (spk2.t_stop == 50)
# def testSaveAndLoadPickleIdsPart(self):
# file = io.StandardPickleFile("tmp.pickle")
# self.spk.save(file)
# spk2 = spikes.load_spikelist(file, id_list=[1,2,3])
# assert numpy.all(spk2.id_list == [1,2,3])
# def testSaveAndLoadPickleIdsPartInt(self):
# file = io.StandardPickleFile("tmp.pickle")
# self.spk.save(file)
# spk2 = spikes.load_spikelist(file, id_list=5)
# assert numpy.all(spk2.id_list == [0,1,2,3,4])
def testPairwise_Pearson_CorrCoeff(self):
x1,y1 = self.spk.pairwise_pearson_corrcoeff(10, time_bin=1.)
assert x1 < y1
def testRawData(self):
data = self.spk.raw_data()
assert (data.shape[0] > 0) and (data.shape[1] == 2)
# def testVictorPurpuraDistance(self):
# # TODO: failing
# d_spike = self.spk.distance_victorpurpura(5, cost=0.2)
# d_rate = self.spk.distance_victorpurpura(5, cost=0.8)
# d_self = self.spk.distance_victorpurpura(10, cost = 0.5)
# assert (d_rate != d_spike) and d_self == 0
# def testKreuzDistance(self):
# # TODO: failing
# d_self = self.spk.distance_kreuz(10)
# assert d_self == 0
def testCrossCorrZero(self):
cc1 = self.spk.pairwise_cc_zero(5, AutoPairs(self.spk, self.spk), time_bin=0.1)
cc2 = self.spk.pairwise_cc_zero(5, RandomPairs(self.spk, self.spk), time_bin=0.1)
assert (0 <= cc1 <= 1) and (cc1 > cc2)
# def testFanoFactor(self):
# # TODO: failing
# assert 0.9 < self.spk.fano_factor(5) < 1.1
def testIdOffset(self):
self.spk.id_offset(100)
assert numpy.all(self.spk.id_list == numpy.arange(100,110))
class LoadSpikeListTest(unittest.TestCase):
def setUp(self):
self.spikes=[]
nb_cells = 50
frequencies = numpy.random.uniform(0, 50, nb_cells)
for idx in xrange(nb_cells):
param = 1./frequencies[idx]
isi = numpy.random.exponential(param, 100)
pspikes = numpy.cumsum(isi)*1000. # To convert the spikes_time in ms
for spike in pspikes:
self.spikes.append((idx, spike))
self.spk = spikes.SpikeList(self.spikes, range(nb_cells), 0, 4000)
self.spk.save("tmp2.txt")
def testLoadSpikeList(self):
spk = spikes.load_spikelist("tmp2.txt")
assert (len(spk) == 50) and (spk.mean_rate() > 0)
def testLoadSpikeListWithIds(self):
spk = spikes.load_spikelist("tmp2.txt", id_list=range(30,40))
assert (len(spk) == 10) and (spk.mean_rate() > 0)
def testLoadSpikeListWithTime(self):
spk = spikes.load_spikelist("tmp2.txt", id_list=range(30,40), t_start=0, t_stop=100)
assert (len(spk) == 10) and (spk.mean_rate() > 0) and (spk.time_parameters() == (0,100))
def testLoadForSpikeList(self):
spk = spikes.load("tmp2.txt",'s')
assert (len(spk) == 50) and (spk.mean_rate() > 0)
# TODO: Evaluate if pyNN should be integrated, and how
# class PyNNInterface(unittest.TestCase):
# def setUp(self):
# if not os.path.exists("Simulation"):
# os.mkdir("Simulation")
# else:
# os.system("rm -rf Simulation/*.*")
# import pyNN.nest2 as pynn
# import pyNN.recording as rec
# pynn.setup()
# p1 = pynn.Population(10, pynn.IF_cond_exp)
# pynn.Projection(p1,p1, pynn.AllToAllConnector(weights=0.1))
# stim = pynn.Population(1, pynn.SpikeSourcePoisson)
# pynn.Projection(stim, p1, pynn.AllToAllConnector(weights=0.1))
# p2 = pynn.Population(10, pynn.IF_cond_exp)
# pynn.Projection(p1,p2, pynn.AllToAllConnector(weights=0.1))
# p1.record_v()
# p1.record()
# p1.record_c()
# p2.record_v()
# p2.record()
# p2.record_c()
# pynn.run(100)
# p1.printSpikes("Simulation/p1.spikes")
# p1.print_v("Simulation/p1.v")
# p1.print_c("Simulation/p1.c")
# p2.printSpikes("Simulation/p2.spikes")
# p2.print_v("Simulation/p2.v")
# p2.print_c("Simulation/p2.c")
# def testLoadFirstPopulationData(self):
# spks = spikes.load("Simulation/p1.spikes",'s')
# vm = spikes.load("Simulation/p1.v",'v')
# ge, gi = spikes.load("Simulation/p1.c",'g')
# assert len(spks) == 10 and len(vm) == 10 and len(ge) == 10 and len(gi) == 10
# def testLoadSecondPopulationData(self):
# spks = spikes.load("Simulation/p2.spikes",'s')
# vm = spikes.load("Simulation/p2.v",'v')
# ge, gi = spikes.load("Simulation/p2.c",'g')
# assert len(spks) == 10 and len(vm) == 10 and len(ge) == 10 and len(gi) == 10
class SpikeListGraphicTest(unittest.TestCase):
def setUp(self):
self.spikes=[]
nb_cells = 50
frequencies = numpy.random.uniform(0, 50, 50)
for idx in xrange(nb_cells):
param = 1./frequencies[idx]
isi = numpy.random.exponential(param, 100)
pspikes = numpy.cumsum(isi)*1000. # To convert the spikes_time in ms
for spike in pspikes:
self.spikes.append((idx, spike))
self.spk = spikes.SpikeList(self.spikes, range(50), 0, 4000)
try:
os.mkdir("Plots")
except Exception:
pass
def testGraphics(self):
self.spk.isi_hist(display=pylab.subplot(221), kwargs={'color':'red'})
self.spk.cv_isi_hist(10, display=pylab.subplot(222))
self.spk.rate_distribution(20, normalize=True, display=pylab.subplot(223), kwargs={})
self.spk.firing_rate(100, display=pylab.subplot(224),kwargs={'linewidth':2})
pylab.savefig("Plots/SpikeList_various.png")
pylab.close()
def testRasterPlots(self):
self.spk.raster_plot(id_list = 30, display=pylab.subplot(221), kwargs={'color':'red'})
self.spk.raster_plot(id_list = range(50), display=pylab.subplot(222))
self.spk.raster_plot(t_start = 200, display=pylab.subplot(223), kwargs={})
self.spk.raster_plot(t_stop = 1000,display=pylab.subplot(224),kwargs={'marker':'+'})
pylab.savefig("Plots/SpikeList_rasters.png")
pylab.close()
def testActivityMap(self):
self.spk.dimensions = [5, 10]
self.spk.activity_map(t_start = 1000, t_stop = 2000, display=pylab.subplot(211), kwargs={'interpolation':'bicubic'})
positions = pylab.rand(2, 50)
self.spk.activity_map(float_positions = positions, display=pylab.subplot(212))
pylab.savefig("Plots/SpikeList_activitymaps.png")
pylab.close()
def testPairwiseCC(self):
self.spk.pairwise_cc(50, time_bin=10., average=True, display=pylab.subplot(221))
self.spk.pairwise_cc(50, time_bin=10., average=False, display=pylab.subplot(222))
self.spk.pairwise_cc(50, RandomPairs(self.spk, self.spk), time_bin=10., average=True, display=pylab.subplot(223))
self.spk.pairwise_cc(50, AutoPairs(self.spk, self.spk), time_bin=10., display=pylab.subplot(224))
pylab.savefig("Plots/SpikeList_pairwise_cc.png")
pylab.close()
def testPairwiseCCZero(self):
self.spk.pairwise_cc_zero(50, time_bin=10., time_window=100, display=pylab.subplot(311))
self.spk.pairwise_cc_zero(50, RandomPairs(self.spk, self.spk), time_bin=10., time_window=200, display=pylab.subplot(312))
self.spk.pairwise_cc_zero(50, AutoPairs(self.spk, self.spk), time_bin=10., time_window=200, display=pylab.subplot(313))
pylab.savefig("Plots/SpikeList_pairwise_cc_zero.png")
pylab.close()
def testActivityMovie(self):
self.spk.dimensions = [5, 10]
self.spk.activity_movie(t_start = 1000, t_stop = 2000, time_bin = 100, kwargs={'interpolation':'bicubic'},
output="Plots/SpikeList_activitymovie.mpg")
if __name__ == "__main__":
unittest.main()
| NeuralEnsemble/NeuroTools | test/test_spikes.py | Python | gpl-2.0 | 21,780 |
import re
import subprocess
import os
def get_git_info(path='.', abort_dirty=True):
info = {}
if not is_git(path):
return None
if abort_dirty and not is_clean(path):
return None
info['url'] = get_repo_url(path)
info['commit'] = get_commit(path)
return info
def is_git(path='.'):
p = subprocess.Popen(
['git', 'status'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
cwd=path)
p.wait()
return (p.returncode == 0)
def is_clean(path='.'):
p = subprocess.Popen(
['git', 'status', '-s'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=path)
stdout, _ = p.communicate()
if not p.returncode == 0:
return False
return (stdout.strip() == '')
def get_repo_url(path='.', remove_user=True):
p = subprocess.Popen(
['git', 'config', '--get', 'remote.origin.url'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=path)
stdout, _ = p.communicate()
if p.returncode != 0:
return None
url = stdout.strip()
if remove_user:
url = re.sub('(?<=://).*@', '', url.decode('utf-8'))
return url
def get_branch(path='.'):
p = subprocess.Popen(
['git', 'rev-parse', '--abbrev-ref', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=path)
stdout, _ = p.communicate()
if p.returncode == 0:
return None
return stdout.strip().decode('utf8')
def get_commit(path='.'):
p = subprocess.Popen(
['git', 'rev-parse', 'HEAD'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=path)
stdout, _ = p.communicate()
if p.returncode != 0:
return None
return stdout.strip().decode('utf8')
def get_my_repo_url():
mypath = os.path.dirname(os.path.realpath(__file__))
repo = get_repo_url(mypath)
if repo is None:
repo = "https://github.com/studioml/studio"
return repo
def get_my_branch():
mypath = os.path.dirname(os.path.realpath(__file__))
branch = get_branch(mypath)
if branch is None:
branch = "master"
return branch
def get_my_checkout_target():
mypath = os.path.dirname(os.path.realpath(__file__))
target = get_commit(mypath)
if target is None:
target = get_my_branch()
return target
| studioml/studio | studio/git_util.py | Python | apache-2.0 | 2,399 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: imagenet-resnet.py
import cv2
import sys
import argparse
import numpy as np
import os
import multiprocessing
import tensorflow as tf
from tensorflow.contrib.layers import variance_scaling_initializer
from tensorpack import *
from tensorpack.utils.stats import RatioCounter
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
TOTAL_BATCH_SIZE = 256
INPUT_SHAPE = 224
DEPTH = None
class Model(ModelDesc):
def __init__(self, data_format='NCHW'):
if data_format == 'NCHW':
assert tf.test.is_gpu_available()
self.data_format = data_format
def _get_inputs(self):
# uint8 instead of float32 is used as input type to reduce copy overhead.
# It might hurt the performance a liiiitle bit.
# The pretrained models were trained with float32.
return [InputDesc(tf.uint8, [None, INPUT_SHAPE, INPUT_SHAPE, 3], 'input'),
InputDesc(tf.int32, [None], 'label')]
def _build_graph(self, inputs):
image, label = inputs
image = tf.cast(image, tf.float32) * (1.0 / 255)
# Wrong mean/std are used for compatibility with pre-trained models.
# Should actually add a RGB-BGR conversion here.
image_mean = tf.constant([0.485, 0.456, 0.406], dtype=tf.float32)
image_std = tf.constant([0.229, 0.224, 0.225], dtype=tf.float32)
image = (image - image_mean) / image_std
if self.data_format == 'NCHW':
image = tf.transpose(image, [0, 3, 1, 2])
def shortcut(l, n_in, n_out, stride):
if n_in != n_out:
return Conv2D('convshortcut', l, n_out, 1, stride=stride)
else:
return l
def basicblock(l, ch_out, stride, preact):
ch_in = l.get_shape().as_list()[1]
if preact == 'both_preact':
l = BNReLU('preact', l)
input = l
elif preact != 'no_preact':
input = l
l = BNReLU('preact', l)
else:
input = l
l = Conv2D('conv1', l, ch_out, 3, stride=stride, nl=BNReLU)
l = Conv2D('conv2', l, ch_out, 3)
return l + shortcut(input, ch_in, ch_out, stride)
def bottleneck(l, ch_out, stride, preact):
ch_in = l.get_shape().as_list()[1]
if preact == 'both_preact':
l = BNReLU('preact', l)
input = l
elif preact != 'no_preact':
input = l
l = BNReLU('preact', l)
else:
input = l
l = Conv2D('conv1', l, ch_out, 1, nl=BNReLU)
l = Conv2D('conv2', l, ch_out, 3, stride=stride, nl=BNReLU)
l = Conv2D('conv3', l, ch_out * 4, 1)
return l + shortcut(input, ch_in, ch_out * 4, stride)
def layer(l, layername, block_func, features, count, stride, first=False):
with tf.variable_scope(layername):
with tf.variable_scope('block0'):
l = block_func(l, features, stride,
'no_preact' if first else 'both_preact')
for i in range(1, count):
with tf.variable_scope('block{}'.format(i)):
l = block_func(l, features, 1, 'default')
return l
cfg = {
18: ([2, 2, 2, 2], basicblock),
34: ([3, 4, 6, 3], basicblock),
50: ([3, 4, 6, 3], bottleneck),
101: ([3, 4, 23, 3], bottleneck)
}
defs, block_func = cfg[DEPTH]
with argscope(Conv2D, nl=tf.identity, use_bias=False,
W_init=variance_scaling_initializer(mode='FAN_OUT')), \
argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format=self.data_format):
logits = (LinearWrap(image)
.Conv2D('conv0', 64, 7, stride=2, nl=BNReLU)
.MaxPooling('pool0', shape=3, stride=2, padding='SAME')
.apply(layer, 'group0', block_func, 64, defs[0], 1, first=True)
.apply(layer, 'group1', block_func, 128, defs[1], 2)
.apply(layer, 'group2', block_func, 256, defs[2], 2)
.apply(layer, 'group3', block_func, 512, defs[3], 2)
.BNReLU('bnlast')
.GlobalAvgPooling('gap')
.FullyConnected('linear', 1000, nl=tf.identity)())
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=label)
loss = tf.reduce_mean(loss, name='xentropy-loss')
wrong = prediction_incorrect(logits, label, 1, name='wrong-top1')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top1'))
wrong = prediction_incorrect(logits, label, 5, name='wrong-top5')
add_moving_summary(tf.reduce_mean(wrong, name='train-error-top5'))
wd_cost = regularize_cost('.*/W', l2_regularizer(1e-4), name='l2_regularize_loss')
add_moving_summary(loss, wd_cost)
self.cost = tf.add_n([loss, wd_cost], name='cost')
def _get_optimizer(self):
lr = get_scalar_var('learning_rate', 0.1, summary=True)
return tf.train.MomentumOptimizer(lr, 0.9, use_nesterov=True)
def get_data(train_or_test):
isTrain = train_or_test == 'train'
datadir = args.data
ds = dataset.ILSVRC12(datadir, train_or_test,
shuffle=True if isTrain else False, dir_structure='original')
if isTrain:
class Resize(imgaug.ImageAugmentor):
"""
crop 8%~100% of the original image
See `Going Deeper with Convolutions` by Google.
"""
def _augment(self, img, _):
h, w = img.shape[:2]
area = h * w
for _ in range(10):
targetArea = self.rng.uniform(0.08, 1.0) * area
aspectR = self.rng.uniform(0.75, 1.333)
ww = int(np.sqrt(targetArea * aspectR))
hh = int(np.sqrt(targetArea / aspectR))
if self.rng.uniform() < 0.5:
ww, hh = hh, ww
if hh <= h and ww <= w:
x1 = 0 if w == ww else self.rng.randint(0, w - ww)
y1 = 0 if h == hh else self.rng.randint(0, h - hh)
out = img[y1:y1 + hh, x1:x1 + ww]
out = cv2.resize(out, (224, 224), interpolation=cv2.INTER_CUBIC)
return out
out = cv2.resize(img, (224, 224), interpolation=cv2.INTER_CUBIC)
return out
augmentors = [
Resize(),
imgaug.RandomOrderAug(
[imgaug.Brightness(30, clip=False),
imgaug.Contrast((0.8, 1.2), clip=False),
imgaug.Saturation(0.4, rgb=False),
# rgb-bgr conversion
imgaug.Lighting(0.1,
eigval=[0.2175, 0.0188, 0.0045][::-1],
eigvec=np.array(
[[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]],
dtype='float32')[::-1, ::-1]
)]),
imgaug.Clip(),
imgaug.Flip(horiz=True),
imgaug.ToUint8()
]
else:
augmentors = [
imgaug.ResizeShortestEdge(256),
imgaug.CenterCrop((224, 224)),
imgaug.ToUint8()
]
ds = AugmentImageComponent(ds, augmentors, copy=False)
if isTrain:
ds = PrefetchDataZMQ(ds, min(20, multiprocessing.cpu_count()))
ds = BatchData(ds, BATCH_SIZE, remainder=not isTrain)
return ds
def get_config(fake=False, data_format='NCHW'):
if fake:
dataset_train = dataset_val = FakeData(
[[64, 224, 224, 3], [64]], 1000, random=False, dtype='uint8')
else:
dataset_train = get_data('train')
dataset_val = get_data('val')
return TrainConfig(
model=Model(data_format=data_format),
dataflow=dataset_train,
callbacks=[
ModelSaver(),
InferenceRunner(dataset_val, [
ClassificationError('wrong-top1', 'val-error-top1'),
ClassificationError('wrong-top5', 'val-error-top5')]),
ScheduledHyperParamSetter('learning_rate',
[(30, 1e-2), (60, 1e-3), (85, 1e-4), (95, 1e-5)]),
HumanHyperParamSetter('learning_rate'),
],
steps_per_epoch=5000,
max_epoch=110,
)
def eval_on_ILSVRC12(model_file, data_dir):
ds = get_data('val')
pred_config = PredictConfig(
model=Model(),
session_init=get_model_loader(model_file),
input_names=['input', 'label'],
output_names=['wrong-top1', 'wrong-top5']
)
pred = SimpleDatasetPredictor(pred_config, ds)
acc1, acc5 = RatioCounter(), RatioCounter()
for o in pred.get_result():
batch_size = o[0].shape[0]
acc1.feed(o[0].sum(), batch_size)
acc5.feed(o[1].sum(), batch_size)
print("Top1 Error: {}".format(acc1.ratio))
print("Top5 Error: {}".format(acc5.ratio))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.', required=True)
parser.add_argument('--data', help='ILSVRC dataset dir')
parser.add_argument('--load', help='load model')
parser.add_argument('--fake', help='use fakedata to test or benchmark this model', action='store_true')
parser.add_argument('--data_format', help='specify NCHW or NHWC',
type=str, default='NCHW')
parser.add_argument('-d', '--depth', help='resnet depth',
type=int, default=18, choices=[18, 34, 50, 101])
parser.add_argument('--eval', action='store_true')
args = parser.parse_args()
DEPTH = args.depth
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.eval:
BATCH_SIZE = 128 # something that can run on one gpu
eval_on_ILSVRC12(args.load, args.data)
sys.exit()
NR_GPU = get_nr_gpu()
BATCH_SIZE = TOTAL_BATCH_SIZE // NR_GPU
logger.set_logger_dir(
os.path.join('train_log', 'imagenet-resnet-d' + str(DEPTH)))
logger.info("Running on {} GPUs. Batch size per GPU: {}".format(NR_GPU, BATCH_SIZE))
config = get_config(fake=args.fake, data_format=args.data_format)
if args.load:
config.session_init = SaverRestore(args.load)
config.nr_tower = NR_GPU
SyncMultiGPUTrainerParameterServer(config).train()
| haamoon/tensorpack | examples/ResNet/imagenet-resnet.py | Python | apache-2.0 | 10,934 |
import os
import django
from channels.routing import get_default_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timestrap.settings.docker")
django.setup()
application = get_default_application()
| cdubz/timestrap | timestrap/asgi.py | Python | bsd-2-clause | 217 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example LatestOnlyOperator and TriggerRule interactions
"""
import datetime as dt
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.latest_only_operator import LatestOnlyOperator
from airflow.utils.dates import days_ago
from airflow.utils.trigger_rule import TriggerRule
dag = DAG(
dag_id='latest_only_with_trigger',
schedule_interval=dt.timedelta(hours=4),
start_date=days_ago(2),
tags=['example']
)
latest_only = LatestOnlyOperator(task_id='latest_only', dag=dag)
task1 = DummyOperator(task_id='task1', dag=dag)
task2 = DummyOperator(task_id='task2', dag=dag)
task3 = DummyOperator(task_id='task3', dag=dag)
task4 = DummyOperator(task_id='task4', dag=dag, trigger_rule=TriggerRule.ALL_DONE)
latest_only >> task1 >> [task3, task4]
task2 >> [task3, task4]
| wileeam/airflow | airflow/example_dags/example_latest_only_with_trigger.py | Python | apache-2.0 | 1,630 |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
from Plugins.Extensions.OpenWebif.local import tstrings
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.426455
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/mobile/movies.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class movies(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(movies, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
write(u'''<html>\r
<head>\r
\t<title>OpenWebif</title>\r
\t<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />\r
\t<meta name="viewport" content="user-scalable=no, width=device-width"/>\r
\t<meta name="apple-mobile-web-app-capable" content="yes" />\r
\t<link rel="stylesheet" type="text/css" href="/css/jquery.mobile-1.0.min.css" media="screen"/>\r
\t<link rel="stylesheet" type="text/css" href="/css/iphone.css" media="screen"/>\r
\t<script src="/js/jquery-1.6.2.min.js"></script>\r
\t<script src="/js/jquery.mobile-1.0.min.js"></script>\r
</head>\r
<body> \r
\t<div data-role="page">\r
\r
\t\t<div id="header">\r
\t\t\t<div class="button" onClick="history.back()">''')
_v = VFFSL(SL,"tstrings",True)['back'] # u"$tstrings['back']" on line 17, col 49
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['back']")) # from line 17, col 49.
write(u'''</div>\r
\t\t\t<h1><a style="color:#FFF;text-decoration:none;" href=\'/mobile\'>OpenWebif</a></h1>
\t\t\t<h2>''')
_v = VFFSL(SL,"tstrings",True)['movies'] # u"$tstrings['movies']" on line 19, col 8
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['movies']")) # from line 19, col 8.
write(u'''</h2>\r
\t\t</div>\r
\r
\t\t<div data-role="fieldcontain">\r
\t\t <select name="select-choice-1" id="select-choice-moviedir" onChange="window.location.href=\'/mobile/movies?dirname=\'+escape(options[selectedIndex].value);">\r
\t\t\t <option value="''')
_v = VFFSL(SL,"directory",True) # u'$directory' on line 24, col 21
if _v is not None: write(_filter(_v, rawExpr=u'$directory')) # from line 24, col 21.
write(u'''">''')
_v = VFFSL(SL,"directory",True) # u'$directory' on line 24, col 33
if _v is not None: write(_filter(_v, rawExpr=u'$directory')) # from line 24, col 33.
write(u'''</option>\r
''')
for bookmark in VFFSL(SL,"bookmarks",True): # generated from line 25, col 6
write(u'''\t\t\t <option value="''')
_v = VFFSL(SL,"bookmark",True) # u'$bookmark' on line 26, col 21
if _v is not None: write(_filter(_v, rawExpr=u'$bookmark')) # from line 26, col 21.
write(u'''">''')
_v = VFFSL(SL,"bookmark",True) # u'$bookmark' on line 26, col 32
if _v is not None: write(_filter(_v, rawExpr=u'$bookmark')) # from line 26, col 32.
write(u'''</option>\r
''')
write(u'''\t\t </select>\r
\t\t</div>\r
\r
\t\t<div id="contentContainer">\r
\t\t\t<ul data-role="listview" data-inset="true" data-theme="d">\r
\t\t\t\t<li data-role="list-divider" role="heading" data-theme="b">''')
_v = VFFSL(SL,"tstrings",True)['movies'] # u"$tstrings['movies']" on line 33, col 64
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['movies']")) # from line 33, col 64.
write(u'''</li>\r
''')
for movie in VFFSL(SL,"movies",True): # generated from line 34, col 5
if VFFSL(SL,"movie.eventname",True) != "": # generated from line 35, col 5
write(u'''\t\t\t\t<li>''')
_v = VFFSL(SL,"movie.eventname",True) # u'$movie.eventname' on line 36, col 9
if _v is not None: write(_filter(_v, rawExpr=u'$movie.eventname')) # from line 36, col 9.
write(u'''</li>\r
''')
else: # generated from line 37, col 5
write(u'''\t\t\t\t<li>''')
_v = VFFSL(SL,"movie.filename",True) # u'$movie.filename' on line 38, col 9
if _v is not None: write(_filter(_v, rawExpr=u'$movie.filename')) # from line 38, col 9.
write(u'''</li>\r
''')
write(u'''\t\t\t</ul>\r
\t\t</div>\r
\r
\t\t<div id="footer">\r
\t\t\t<p>OpenWebif Mobile</p>\r
\t\t\t<a onclick="document.location.href=\'/index?mode=fullpage\';return false;" href="#">''')
_v = VFFSL(SL,"tstrings",True)['show_full_openwebif'] # u"$tstrings['show_full_openwebif']" on line 46, col 86
if _v is not None: write(_filter(_v, rawExpr=u"$tstrings['show_full_openwebif']")) # from line 46, col 86.
write(u'''</a>\r
\t\t</div>\r
\t\t\r
\t</div>\r
</body>\r
</html>\r
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_movies= 'respond'
## END CLASS DEFINITION
if not hasattr(movies, '_initCheetahAttributes'):
templateAPIClass = getattr(movies, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(movies)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=movies()).run()
| MOA-2011/enigma2-plugin-extensions-openwebif | plugin/controllers/views/mobile/movies.py | Python | gpl-2.0 | 8,464 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse_lazy
from django.conf import settings
from rest_framework import serializers
from rest_flex_fields import FlexFieldsModelSerializer
from rest_flex_fields.serializers import FlexFieldsSerializerMixin
from easy_thumbnails.templatetags.thumbnail import thumbnail_url
from profiles.apiv2.serializers import ProfileSerializer
from ..models import (
Artist,
Label,
Release,
Media,
Playlist,
PlaylistItem,
PlaylistItemPlaylist,
)
SITE_URL = getattr(settings, "SITE_URL")
class ImageSerializer(serializers.ImageField):
def to_representation(self, instance):
if not instance:
return
return "{}{}".format(SITE_URL, thumbnail_url(instance, "thumbnail_240"))
class ArtistSerializer(
FlexFieldsModelSerializer, serializers.HyperlinkedModelSerializer
):
url = serializers.HyperlinkedIdentityField(
view_name="api:artist-detail", lookup_field="uuid"
)
ct = serializers.CharField(source="get_ct")
detail_url = serializers.URLField(source="get_absolute_url")
image = ImageSerializer(source="main_image")
class Meta:
model = Artist
depth = 1
fields = ["url", "ct", "created", "updated", "id", "detail_url", "uuid", "name", "image"]
class LabelSerializer(
FlexFieldsModelSerializer, serializers.HyperlinkedModelSerializer
):
url = serializers.HyperlinkedIdentityField(
view_name="api:label-detail", lookup_field="uuid"
)
ct = serializers.CharField(source="get_ct")
detail_url = serializers.URLField(source="get_absolute_url")
image = ImageSerializer(source="main_image")
class Meta:
model = Label
depth = 1
fields = ["url", "ct", "created", "updated", "id", "detail_url", "uuid", "name", "image"]
class MediaSerializer(serializers.HyperlinkedModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name="api:media-detail", lookup_field="uuid"
)
ct = serializers.CharField(source="get_ct")
detail_url = serializers.URLField(source="get_absolute_url")
duration = serializers.FloatField(source="master_duration")
artist = serializers.HyperlinkedRelatedField(
many=False, read_only=True, view_name="api:artist-detail", lookup_field="uuid"
)
release = serializers.HyperlinkedRelatedField(
many=False, read_only=True, view_name="api:release-detail", lookup_field="uuid"
)
artist_display = serializers.CharField(source="get_artist_display")
release_display = serializers.SerializerMethodField()
image = ImageSerializer(source="release.main_image")
def get_release_display(self, obj, **kwargs):
return obj.release.name if obj.release else None
assets = serializers.SerializerMethodField()
def get_assets(self, obj, **kwargs):
# TODO: propperly serialize assets
stream_url = reverse_lazy(
"mediaasset-format",
kwargs={"media_uuid": obj.uuid, "quality": "default", "encoding": "mp3"},
)
waveform_url = reverse_lazy(
"mediaasset-waveform", kwargs={"media_uuid": obj.uuid, "type": "w"}
)
assets = {
"stream": "{}{}".format(SITE_URL, stream_url),
"waveform": "{}{}".format(SITE_URL, waveform_url),
}
# TODO: check if this is a good idea...
# request asset generation for media
# print('request asset generation for {}'.format(obj))
# Format.objects.get_or_create_for_media(media=obj)
# Waveform.objects.get_or_create_for_media(media=obj, type=Waveform.WAVEFORM)
return assets
class Meta:
model = Media
depth = 1
fields = [
"url",
"ct",
"created",
"updated",
"id",
"detail_url",
"uuid",
"image",
"name",
"duration",
"assets",
"isrc",
"artist_display",
"release_display",
"artist",
"release",
]
class ReleaseSerializer(
FlexFieldsSerializerMixin, serializers.HyperlinkedModelSerializer
):
url = serializers.HyperlinkedIdentityField(
view_name="api:release-detail", lookup_field="uuid"
)
ct = serializers.CharField(source="get_ct")
image = ImageSerializer(source="main_image")
detail_url = serializers.URLField(source="get_absolute_url")
releasedate = serializers.CharField(source="releasedate_approx")
media = MediaSerializer(many=True, read_only=True, source="get_media")
artist_display = serializers.CharField(source="get_artist_display")
# label = serializers.HyperlinkedRelatedField(
# many=False,
# read_only=True,
# view_name='api:label-detail', lookup_field="uuid"
# )
label = LabelSerializer(
read_only=True,
)
# TODO: `items` is used for player only. find a way to unify this.
items = serializers.SerializerMethodField()
def get_items(self, obj, **kwargs):
items = []
for media in obj.get_media():
serializer = MediaSerializer(
media, context={"request": self.context["request"]}
)
items.append({"content": serializer.data})
return items
class Meta:
model = Release
depth = 1
fields = [
"url",
"ct",
"uuid",
"created",
"updated",
"id",
"detail_url",
"name",
"image",
"releasedate",
"artist_display",
"media",
"label",
# TODO: `items` is used for player only. find a way to unify this.
"items",
]
# expandable_fields = {
# 'label': (LabelSerializer, {'read_only': True})
# }
class PlaylistItemField(serializers.RelatedField):
"""
A custom field to use for the `item` generic relationship.
"""
def to_representation(self, value):
"""
Serialize tagged objects to a simple textual representation.
"""
if isinstance(value, Media):
# return 'Media: {}'.format(value.pk)
serializer = MediaSerializer(
value, context={"request": self.context["request"]}
)
elif isinstance(value, Media):
return "Jingle: {}".format(value.pk)
else:
raise Exception("Unexpected type of tagged object")
return serializer.data
class PlaylistItemSerializer(serializers.ModelSerializer):
# http://www.django-rest-framework.org/api-guide/relations/#generic-relationships
content = PlaylistItemField(read_only=True, source="content_object")
class Meta:
model = PlaylistItem
depth = 1
fields = ["content"]
class PlaylistItemPlaylistSerializer(serializers.ModelSerializer):
# item = PlaylistItemSerializer(read_only=True)
content = serializers.SerializerMethodField()
def get_content(self, obj, **kwargs):
# TODO: implement for `Jingle`
if isinstance(obj.item.content_object, Media):
serializer = MediaSerializer(
instance=Media.objects.get(pk=obj.item.content_object.pk),
many=False,
context={"request": self.context["request"]},
)
elif isinstance(obj.item.content_object, Media):
serializer = MediaSerializer(
instance=Media.objects.get(pk=obj.item.content_object.pk),
many=False,
context={"request": self.context["request"]},
)
else:
raise Exception("Unexpected type of tagged object")
return serializer.data
class Meta:
model = PlaylistItemPlaylist
depth = 1
fields = [
# 'item',
"content",
"position",
"cue_in",
"cue_out",
"fade_in",
"fade_out",
"fade_cross",
]
class PlaylistSerializer(FlexFieldsModelSerializer):
url = serializers.HyperlinkedIdentityField(
view_name="api:playlist-detail", lookup_field="uuid"
)
ct = serializers.CharField(source="get_ct")
image = ImageSerializer(source="main_image")
detail_url = serializers.URLField(source="get_absolute_url")
items = PlaylistItemPlaylistSerializer(source="playlist_items", many=True)
tags = serializers.StringRelatedField(many=True)
user = serializers.SerializerMethodField(source="user")
item_appearances = serializers.SerializerMethodField()
dayparts = serializers.SerializerMethodField()
def get_user(self, obj):
if not (obj.user and getattr(obj.user, "profile")):
return
return ProfileSerializer(obj.user.profile, context=self.context).data
def get_item_appearances(self, obj, **kwargs):
items = [
"{}:{}".format(co.content_object.get_ct(), co.content_object.uuid)
for co in obj.get_items()
]
return items
def get_dayparts(self, obj, **kwargs):
return [
{"day": dp.day, "start": dp.time_start, "end": dp.time_end}
for dp in obj.dayparts.active()
]
class Meta:
model = Playlist
depth = 1
fields = [
"url",
"ct",
"uuid",
"id",
"detail_url",
"name",
"series_display",
"image",
"tags",
"user",
"mixdown_file",
"items",
"item_appearances",
"num_media",
"duration",
"dayparts",
]
| hzlf/openbroadcast.org | website/apps/alibrary/apiv2/serializers.py | Python | gpl-3.0 | 9,901 |
def OPCODE(value):
global OPCODE_SENT
for k,v in OPCODE_SENT.iteritems():
if v is value:
return int(k)
break
OPCODE_RECV = dict()
OPCODE_RECV["0"] = "CoreProtocol"
OPCODE_RECV["1"] = "OptionsInfo"
OPCODE_RECV["3"] = "DefineSearches"
OPCODE_RECV["4"] = "ResultInfo"
OPCODE_RECV["5"] = "SearchResult"
OPCODE_RECV["9"] = "FileUpdateAvailability"
OPCODE_RECV["10"] = "FileAddSource"
OPCODE_RECV["12"] = "ServerUser"
OPCODE_RECV["13"] = "ServerState"
OPCODE_RECV["15"] = "ClientInfo"
OPCODE_RECV["16"] = "ClientState"
OPCODE_RECV["19"] = "ConsoleMessage"
OPCODE_RECV["20"] = "NetworkInfo"
OPCODE_RECV["21"] = "UserInfo"
OPCODE_RECV["22"] = "RoomInfo"
OPCODE_RECV["23"] = "RoomMessage"
OPCODE_RECV["24"] = "RoomAddUser"
OPCODE_RECV["26"] = "ServerInfo"
OPCODE_RECV["27"] = "MessageFromClient"
OPCODE_RECV["28"] = "ConnectedServers"
OPCODE_RECV["31"] = "RoomInfo"
OPCODE_RECV["34"] = "SharedFileUpload"
OPCODE_RECV["35"] = "SharedFileUnshared"
OPCODE_RECV["36"] = "AddSectionOption"
OPCODE_RECV["38"] = "AddPluginOption"
OPCODE_RECV["46"] = "FileDownloadUpdate"
OPCODE_RECV["47"] = "BadPassword"
OPCODE_RECV["48"] = "SharedFileInfo"
OPCODE_RECV["49"] = "ClientStats"
OPCODE_RECV["50"] = "FileRemoveSource"
OPCODE_RECV["51"] = "CleanTables"
OPCODE_RECV["52"] = "FileInfo"
OPCODE_RECV["53"] = "DownloadingFiles"
OPCODE_RECV["54"] = "DownloadedFiles"
OPCODE_RECV["55"] = "Uploaders"
OPCODE_RECV["56"] = "Pending"
OPCODE_RECV["57"] = "Search"
OPCODE_RECV["58"] = "Version"
OPCODE_RECV["59"] = "Stats"
OPCODE_SENT = dict()
OPCODE_SENT["0"] = "ProtocolVersion"
OPCODE_SENT["1"] = "ConnectMore"
OPCODE_SENT["2"] = "CleanOldServers"
OPCODE_SENT["3"] = "KillServer"
OPCODE_SENT["4"] = "ExtendedSearch"
OPCODE_SENT["8"] = "DlLink"
OPCODE_SENT["9"] = "RemoveServer"
OPCODE_SENT["10"] = "SaveOptions"
OPCODE_SENT["11"] = "RemoveDownload"
OPCODE_SENT["12"] = "GetServerUsers"
OPCODE_SENT["13"] = "SaveFileAs"
OPCODE_SENT["14"] = "AddClientFriend"
OPCODE_SENT["15"] = "AddUserFriend"
OPCODE_SENT["16"] = "RemoveFriend"
OPCODE_SENT["17"] = "RemoveAllFriends"
OPCODE_SENT["18"] = "FindFriend"
OPCODE_SENT["19"] = "ViewUsers"
OPCODE_SENT["20"] = "ConnectAll"
OPCODE_SENT["21"] = "ConnectServer"
OPCODE_SENT["22"] = "DisconnectServer"
OPCODE_SENT["23"] = "SwitchDownload"
OPCODE_SENT["24"] = "VerifyAllChunks"
OPCODE_SENT["25"] = "QueryFormat"
OPCODE_SENT["26"] = "ModifyMp3Tags"
OPCODE_SENT["27"] = "CloseSearch"
OPCODE_SENT["28"] = "SetOption"
OPCODE_SENT["29"] = "ConsoleCommand"
OPCODE_SENT["30"] = "Preview"
OPCODE_SENT["31"] = "ConnectFriend"
OPCODE_SENT["32"] = "GetServerUsers"
OPCODE_SENT["33"] = "GetClientFiles"
OPCODE_SENT["34"] = "GetFileLocations"
OPCODE_SENT["35"] = "GetServerInfo"
OPCODE_SENT["36"] = "GetClientInfo"
OPCODE_SENT["37"] = "GetFileInfo"
OPCODE_SENT["38"] = "GetUserInfo"
OPCODE_SENT["40"] = "EnableNetwork"
OPCODE_SENT["41"] = "BrowseUser"
OPCODE_SENT["42"] = "SearchQuery"
OPCODE_SENT["43"] = "MessageToClient"
OPCODE_SENT["44"] = "GetConnectedServers"
OPCODE_SENT["45"] = "GetDownloadingFiles"
OPCODE_SENT["46"] = "GetDownloadedFiles"
OPCODE_SENT["47"] = "GuiExtensions"
OPCODE_SENT["49"] = "RefreshUploadStats"
OPCODE_SENT["50"] = "Download"
OPCODE_SENT["51"] = "SetFilePriority"
OPCODE_SENT["52"] = "PassWord"
OPCODE_SENT["53"] = "CloseSearch"
OPCODE_SENT["54"] = "AddServer"
OPCODE_SENT["55"] = "MessageVersions"
OPCODE_SENT["56"] = "RenameFile"
OPCODE_SENT["57"] = "GetUploaders"
OPCODE_SENT["58"] = "GetPending"
OPCODE_SENT["59"] = "GetSearches"
OPCODE_SENT["60"] = "GetSearch"
OPCODE_SENT["61"] = "ConnectClient"
OPCODE_SENT["62"] = "DisconnectClient"
OPCODE_SENT["63"] = "NetworkMessage"
OPCODE_SENT["64"] = "InterestedInSources"
OPCODE_SENT["65"] = "GetVersion"
OPCODE_SENT["68"] = "GetStats"
| tassia/DonkeySurvey | src/GUIProtoDefinitions.py | Python | gpl-3.0 | 3,753 |
x, y = int(5), int(4)
| python-security/pyt | examples/example_inputs/assignment_multiple_assign_call.py | Python | gpl-2.0 | 22 |
# Not used now
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SSL_DISABLE = False
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_RECORD_QUERIES = True
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
FLASKY_MAIL_SUBJECT_PREFIX = '[Flasky]'
FLASKY_MAIL_SENDER = 'Flasky Admin <flasky@example.com>'
FLASKY_ADMIN = os.environ.get('FLASKY_ADMIN')
FLASKY_POSTS_PER_PAGE = 20
FLASKY_FOLLOWERS_PER_PAGE = 50
FLASKY_COMMENTS_PER_PAGE = 30
FLASKY_SLOW_DB_QUERY_TIME=0.5
@staticmethod
def init_app(app):
pass
class DevelopmentConfig(Config):
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
class TestingConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get('TEST_DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data-test.sqlite')
WTF_CSRF_ENABLED = False
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
@classmethod
def init_app(cls, app):
Config.init_app(app)
# email errors to the administrators
import logging
from logging.handlers import SMTPHandler
credentials = None
secure = None
if getattr(cls, 'MAIL_USERNAME', None) is not None:
credentials = (cls.MAIL_USERNAME, cls.MAIL_PASSWORD)
if getattr(cls, 'MAIL_USE_TLS', None):
secure = ()
mail_handler = SMTPHandler(
mailhost=(cls.MAIL_SERVER, cls.MAIL_PORT),
fromaddr=cls.FLASKY_MAIL_SENDER,
toaddrs=[cls.FLASKY_ADMIN],
subject=cls.FLASKY_MAIL_SUBJECT_PREFIX + ' Application Error',
credentials=credentials,
secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
class HerokuConfig(ProductionConfig):
SSL_DISABLE = bool(os.environ.get('SSL_DISABLE'))
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# handle proxy server headers
from werkzeug.contrib.fixers import ProxyFix
app.wsgi_app = ProxyFix(app.wsgi_app)
# log to stderr
import logging
from logging import StreamHandler
file_handler = StreamHandler()
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
class UnixConfig(ProductionConfig):
@classmethod
def init_app(cls, app):
ProductionConfig.init_app(app)
# log to syslog
import logging
from logging.handlers import SysLogHandler
syslog_handler = SysLogHandler()
syslog_handler.setLevel(logging.WARNING)
app.logger.addHandler(syslog_handler)
config = {
'development': DevelopmentConfig,
'testing': TestingConfig,
'production': ProductionConfig,
'heroku': HerokuConfig,
'unix': UnixConfig,
'default': DevelopmentConfig
}
| sysuwangrui/Flask-BBS | config.py | Python | mit | 3,310 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
from odoo.exceptions import ValidationError
from odoo.osv import expression
try:
from cn2an import an2cn
except ImportError:
an2cn = None
class AccountMove(models.Model):
_inherit = 'account.move'
fapiao = fields.Char(string='Fapiao Number', size=8, copy=False, tracking=True)
@api.constrains('fapiao')
def _check_fapiao(self):
for record in self:
if record.fapiao and (len(record.fapiao) != 8 or not record.fapiao.isdecimal()):
raise ValidationError(_("Fapiao number is an 8-digit number. Please enter a correct one."))
@api.model
def check_cn2an(self):
return an2cn
@api.model
def _convert_to_amount_in_word(self, number):
"""Convert number to ``amount in words`` for Chinese financial usage."""
if not self.check_cn2an():
return None
return an2cn(number, 'rmb')
def _count_attachments(self):
domains = [[('res_model', '=', 'account.move'), ('res_id', '=', self.id)]]
statement_ids = self.line_ids.mapped('statement_id')
payment_ids = self.line_ids.mapped('payment_id')
if statement_ids:
domains.append([('res_model', '=', 'account.bank.statement'), ('res_id', 'in', statement_ids)])
if payment_ids:
domains.append([('res_model', '=', 'account.payment'), ('res_id', 'in', payment_ids)])
return self.env['ir.attachment'].search_count(expression.OR(domains))
| jeremiahyan/odoo | addons/l10n_cn/models/account_move.py | Python | gpl-3.0 | 1,602 |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pyslvs_ui/synthesis/structure_synthesis/structure_widget.ui'
#
# Created by: PyQt5 UI code generator 5.13.2
#
# WARNING! All changes made in this file will be lost!
from qtpy import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(533, 654)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icons/number.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
Form.setWindowIcon(icon)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.edges_label = QtWidgets.QLabel(Form)
self.edges_label.setObjectName("edges_label")
self.horizontalLayout_4.addWidget(self.edges_label)
self.edges_text = QtWidgets.QLineEdit(Form)
self.edges_text.setReadOnly(True)
self.edges_text.setObjectName("edges_text")
self.horizontalLayout_4.addWidget(self.edges_text)
self.expr_copy = QtWidgets.QPushButton(Form)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/icons/copy.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.expr_copy.setIcon(icon1)
self.expr_copy.setObjectName("expr_copy")
self.horizontalLayout_4.addWidget(self.expr_copy)
self.expr_add_collection = QtWidgets.QPushButton(Form)
self.expr_add_collection.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/icons/collections.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.expr_add_collection.setIcon(icon2)
self.expr_add_collection.setObjectName("expr_add_collection")
self.horizontalLayout_4.addWidget(self.expr_add_collection)
self.from_mechanism_button = QtWidgets.QPushButton(Form)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/icons/merge_from.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.from_mechanism_button.setIcon(icon3)
self.from_mechanism_button.setAutoDefault(True)
self.from_mechanism_button.setDefault(True)
self.from_mechanism_button.setObjectName("from_mechanism_button")
self.horizontalLayout_4.addWidget(self.from_mechanism_button)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.line = QtWidgets.QFrame(Form)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.main_splitter = QtWidgets.QSplitter(Form)
self.main_splitter.setOrientation(QtCore.Qt.Vertical)
self.main_splitter.setObjectName("main_splitter")
self.verticalLayoutWidget = QtWidgets.QWidget(self.main_splitter)
self.verticalLayoutWidget.setObjectName("verticalLayoutWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setObjectName("gridLayout")
self.nj_label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.nj_label.setObjectName("nj_label")
self.gridLayout.addWidget(self.nj_label, 0, 1, 1, 1)
self.nl_label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.nl_label.setObjectName("nl_label")
self.gridLayout.addWidget(self.nl_label, 0, 0, 1, 1)
self.nl_input = QtWidgets.QSpinBox(self.verticalLayoutWidget)
self.nl_input.setMinimum(4)
self.nl_input.setObjectName("nl_input")
self.gridLayout.addWidget(self.nl_input, 2, 0, 1, 1)
self.nj_input = QtWidgets.QSpinBox(self.verticalLayoutWidget)
self.nj_input.setMinimum(4)
self.nj_input.setObjectName("nj_input")
self.gridLayout.addWidget(self.nj_input, 2, 1, 1, 1)
self.dof_label = QtWidgets.QLabel(self.verticalLayoutWidget)
self.dof_label.setObjectName("dof_label")
self.gridLayout.addWidget(self.dof_label, 0, 2, 1, 1)
self.dof = QtWidgets.QSpinBox(self.verticalLayoutWidget)
self.dof.setEnabled(False)
self.dof.setMinimum(-99)
self.dof.setProperty("value", 1)
self.dof.setObjectName("dof")
self.gridLayout.addWidget(self.dof, 2, 2, 1, 1)
self.keep_dof = QtWidgets.QCheckBox(self.verticalLayoutWidget)
self.keep_dof.setChecked(True)
self.keep_dof.setObjectName("keep_dof")
self.gridLayout.addWidget(self.keep_dof, 0, 3, 1, 1)
self.graph_degenerate = QtWidgets.QComboBox(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.graph_degenerate.sizePolicy().hasHeightForWidth())
self.graph_degenerate.setSizePolicy(sizePolicy)
self.graph_degenerate.setObjectName("graph_degenerate")
self.graph_degenerate.addItem("")
self.graph_degenerate.addItem("")
self.graph_degenerate.addItem("")
self.gridLayout.addWidget(self.graph_degenerate, 2, 3, 1, 1)
self.verticalLayout_2.addLayout(self.gridLayout)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.number_synthesis_button = QtWidgets.QPushButton(self.verticalLayoutWidget)
self.number_synthesis_button.setAutoDefault(True)
self.number_synthesis_button.setObjectName("number_synthesis_button")
self.horizontalLayout_2.addWidget(self.number_synthesis_button)
self.assortment_clear_button = QtWidgets.QPushButton(self.verticalLayoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.assortment_clear_button.sizePolicy().hasHeightForWidth())
self.assortment_clear_button.setSizePolicy(sizePolicy)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/icons/clean.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.assortment_clear_button.setIcon(icon4)
self.assortment_clear_button.setObjectName("assortment_clear_button")
self.horizontalLayout_2.addWidget(self.assortment_clear_button)
self.verticalLayout_2.addLayout(self.horizontalLayout_2)
self.link_assortment_list = QtWidgets.QTreeWidget(self.verticalLayoutWidget)
self.link_assortment_list.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.link_assortment_list.setIndentation(10)
self.link_assortment_list.setObjectName("link_assortment_list")
self.verticalLayout_2.addWidget(self.link_assortment_list)
self.verticalLayoutWidget_2 = QtWidgets.QWidget(self.main_splitter)
self.verticalLayoutWidget_2.setObjectName("verticalLayoutWidget_2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.verticalLayoutWidget_2)
self.verticalLayout_3.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.graph_engine_text = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.graph_engine_text.setObjectName("graph_engine_text")
self.horizontalLayout.addWidget(self.graph_engine_text)
self.graph_engine = QtWidgets.QComboBox(self.verticalLayoutWidget_2)
self.graph_engine.setObjectName("graph_engine")
self.horizontalLayout.addWidget(self.graph_engine)
self.reload_atlas = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.reload_atlas.setText("")
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/icons/data_update.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.reload_atlas.setIcon(icon5)
self.reload_atlas.setObjectName("reload_atlas")
self.horizontalLayout.addWidget(self.reload_atlas)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem)
self.graph_link_as_node = QtWidgets.QCheckBox(self.verticalLayoutWidget_2)
self.graph_link_as_node.setObjectName("graph_link_as_node")
self.horizontalLayout.addWidget(self.graph_link_as_node)
self.graph_show_label = QtWidgets.QCheckBox(self.verticalLayoutWidget_2)
self.graph_show_label.setChecked(True)
self.graph_show_label.setObjectName("graph_show_label")
self.horizontalLayout.addWidget(self.graph_show_label)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.horizontalLayout_5 = QtWidgets.QHBoxLayout()
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.structure_synthesis_all_button = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.structure_synthesis_all_button.setMaximumSize(QtCore.QSize(100, 16777215))
self.structure_synthesis_all_button.setAutoDefault(True)
self.structure_synthesis_all_button.setObjectName("structure_synthesis_all_button")
self.horizontalLayout_5.addWidget(self.structure_synthesis_all_button)
self.structure_synthesis_button = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
self.structure_synthesis_button.setObjectName("structure_synthesis_button")
self.horizontalLayout_5.addWidget(self.structure_synthesis_button)
self.structure_list_clear_button = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.structure_list_clear_button.sizePolicy().hasHeightForWidth())
self.structure_list_clear_button.setSizePolicy(sizePolicy)
self.structure_list_clear_button.setIcon(icon4)
self.structure_list_clear_button.setObjectName("structure_list_clear_button")
self.horizontalLayout_5.addWidget(self.structure_list_clear_button)
self.verticalLayout_3.addLayout(self.horizontalLayout_5)
self.structure_list = QtWidgets.QListWidget(self.verticalLayoutWidget_2)
self.structure_list.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.structure_list.setIconSize(QtCore.QSize(200, 200))
self.structure_list.setResizeMode(QtWidgets.QListView.Adjust)
self.structure_list.setViewMode(QtWidgets.QListView.IconMode)
self.structure_list.setUniformItemSizes(True)
self.structure_list.setObjectName("structure_list")
self.verticalLayout_3.addWidget(self.structure_list)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.save_edges = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/icons/save_file.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.save_edges.setIcon(icon6)
self.save_edges.setObjectName("save_edges")
self.horizontalLayout_3.addWidget(self.save_edges)
self.save_atlas = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(":/icons/picture.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.save_atlas.setIcon(icon7)
self.save_atlas.setObjectName("save_atlas")
self.horizontalLayout_3.addWidget(self.save_atlas)
self.edges2atlas_button = QtWidgets.QPushButton(self.verticalLayoutWidget_2)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(":/icons/edges_to_atlas.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.edges2atlas_button.setIcon(icon8)
self.edges2atlas_button.setIconSize(QtCore.QSize(40, 16))
self.edges2atlas_button.setObjectName("edges2atlas_button")
self.horizontalLayout_3.addWidget(self.edges2atlas_button)
self.verticalLayout_3.addLayout(self.horizontalLayout_3)
self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.time_title_label = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.time_title_label.setObjectName("time_title_label")
self.horizontalLayout_6.addWidget(self.time_title_label)
self.time_label = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.time_label.setObjectName("time_label")
self.horizontalLayout_6.addWidget(self.time_label)
self.paint_time_title_label = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.paint_time_title_label.setObjectName("paint_time_title_label")
self.horizontalLayout_6.addWidget(self.paint_time_title_label)
self.paint_time_label = QtWidgets.QLabel(self.verticalLayoutWidget_2)
self.paint_time_label.setObjectName("paint_time_label")
self.horizontalLayout_6.addWidget(self.paint_time_label)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem1)
self.verticalLayout_3.addLayout(self.horizontalLayout_6)
self.verticalLayout.addWidget(self.main_splitter)
self.retranslateUi(Form)
self.graph_degenerate.setCurrentIndex(1)
self.graph_engine.setCurrentIndex(-1)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.edges_label.setText(_translate("Form", "Edge Set:"))
self.expr_copy.setStatusTip(_translate("Form", "Copy expression."))
self.expr_add_collection.setStatusTip(_translate("Form", "Add to collection."))
self.from_mechanism_button.setStatusTip(_translate("Form", "Analyze current mechanism from canvas."))
self.nj_label.setToolTip(_translate("Form", "Number of joints"))
self.nj_label.setText(_translate("Form", "NJ (?)"))
self.nl_label.setToolTip(_translate("Form", "Number of links"))
self.nl_label.setText(_translate("Form", "NL (?)"))
self.dof_label.setToolTip(_translate("Form", "Degree of freedom"))
self.dof_label.setText(_translate("Form", "DOF (?)"))
self.keep_dof.setStatusTip(_translate("Form", "Keep the degrees of freedom when adjusting numbers."))
self.keep_dof.setText(_translate("Form", "Keep the DOF"))
self.graph_degenerate.setItemText(0, _translate("Form", "Only degenerate"))
self.graph_degenerate.setItemText(1, _translate("Form", "No degenerate"))
self.graph_degenerate.setItemText(2, _translate("Form", "All"))
self.number_synthesis_button.setStatusTip(_translate("Form", "Find the possible number of different joints."))
self.number_synthesis_button.setText(_translate("Form", "Number Synthesis"))
self.link_assortment_list.headerItem().setText(0, _translate("Form", "Link Assortment / Contracted Link Assortment"))
self.link_assortment_list.headerItem().setText(1, _translate("Form", "Count"))
self.graph_engine_text.setText(_translate("Form", "Engine: "))
self.graph_engine.setStatusTip(_translate("Form", "Layout engine from NetworkX."))
self.reload_atlas.setToolTip(_translate("Form", "Re-layout"))
self.graph_link_as_node.setStatusTip(_translate("Form", "Show the edges as vertices."))
self.graph_link_as_node.setText(_translate("Form", "Link as node"))
self.graph_show_label.setText(_translate("Form", "Show labels"))
self.structure_synthesis_all_button.setStatusTip(_translate("Form", "Find the structure of mechanism from all numbers."))
self.structure_synthesis_all_button.setText(_translate("Form", "Find All"))
self.structure_synthesis_button.setText(_translate("Form", "Find by Assortment"))
self.save_edges.setStatusTip(_translate("Form", "Save the edges of atlas to text file."))
self.save_edges.setText(_translate("Form", "Save as list"))
self.save_atlas.setStatusTip(_translate("Form", "Save the atlas to image file."))
self.save_atlas.setText(_translate("Form", "Save as image"))
self.edges2atlas_button.setStatusTip(_translate("Form", "Load the edges data from text file, then save them to image files."))
self.time_title_label.setText(_translate("Form", "Find in:"))
self.paint_time_title_label.setText(_translate("Form", "Painted in:"))
from pyslvs_ui import icons_rc
| KmolYuan/Pyslvs-PyQt5 | pyslvs_ui/synthesis/structure_synthesis/structure_widget_ui.py | Python | agpl-3.0 | 17,172 |
import nox
PYTHON_VERSIONS = ["3.8", "3.9"]
PACKAGE = "abilian"
@nox.session(python=PYTHON_VERSIONS)
def pytest(session):
session.run("poetry", "install", external="True")
session.install("psycopg2-binary")
session.run("yarn", external="True")
session.run("pip", "check")
session.run("pytest", "-q")
@nox.session(python="python3.8")
def lint(session):
session.run("poetry", "install", "-q")
session.install("poetry", "psycopg2-binary")
session.run("yarn", external="True")
session.run("make", "lint-ci")
@nox.session(python="3.8")
def typeguard(session):
session.install("psycopg2-binary")
session.run("poetry", "install", "-q", external="True")
session.run("yarn", external="True")
session.run("pytest", f"--typeguard-packages={PACKAGE}")
| abilian/abilian-sbe | noxfile.py | Python | lgpl-2.1 | 802 |
import glob
import json
import nltk
from nltk import word_tokenize
class DataSanitizer:
@staticmethod
def sanitize():
reviews = []
total_reviews = 0
raw_files_urls = glob.glob("raw_data/*.json")
for raw_files_url in raw_files_urls:
raw_reviews = json.load(open(raw_files_url))
total_reviews += len(raw_reviews)
# Clean the review data and author name using NLTK
for review in raw_reviews:
tokens = word_tokenize(review["review"])
if(len(tokens) > 350):
reviews.append(review)
print("Filtered Reviews : ", len(reviews))
print("Total Reviews : ", total_reviews)
f = open('data.json', 'w')
f.write(json.dumps(reviews))
f.close() | rberman/PSA | DataSanitizer.py | Python | mit | 683 |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup for tree-math."""
import setuptools
base_requires = [
'jax',
]
tests_requires = [
'absl-py',
'jaxlib',
'numpy>=1.17',
'pytest',
]
setuptools.setup(
name='tree-math',
description='Mathematical operations for JAX pytrees',
version='0.1.0 ',
license='Apache 2.0',
author='Google LLC',
author_email='noreply@google.com',
install_requires=base_requires,
extras_require={
'tests': tests_requires,
},
url='https://github.com/google/tree-math',
packages=setuptools.find_packages(),
python_requires='>=3',
)
| google/tree-math | setup.py | Python | apache-2.0 | 1,243 |
# -*- coding: utf-8 -*-
#
# rotterdam documentation build configuration file, created by
# sphinx-quickstart on Fri Aug 1 11:54:37 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinxcontrib.spelling',
'sphinxcontrib.blockdiag',
'sphinxcontrib.seqdiag'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'rotterdam'
copyright = u'2014, the rotterdam authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'rotterdamdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'rotterdam.tex', u'rotterdam Documentation',
u'the rotterdam authors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rotterdam', u'rotterdam Documentation',
[u'the rotterdam authors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'rotterdam', u'rotterdam Documentation',
u'the rotterdam authors', 'rotterdam', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for spelling output -------------------------------------------
spelling_word_list_filename = 'spelling_wordlist.txt'
# -- Options for seqdiag, blockdiag... -------------------------------------
diag_fontpath = os.path.join(os.path.dirname(__file__), "DejaVuSans.ttf")
seqdiag_fontpath = blockdiag_fontpath = diag_fontpath
seqdiag_antialias = blockdiag_antialias = True
| lvh/rotterdam | docs/conf.py | Python | apache-2.0 | 8,780 |
from django import forms
from django.contrib.gis.geos import Point
from widgets import AddAnotherWidgetWrapper
from django.core.exceptions import ValidationError
from .models import (Site, CycleResultSet, Monitor, ProgrammeResources,
ProgrammeImage)
class SiteForm(forms.ModelForm):
latitude = forms.DecimalField(
min_value=-90,
max_value=90,
required=False,
)
longitude = forms.DecimalField(
min_value=-180,
max_value=180,
required=False,
)
class Meta(object):
model = Site
exclude = []
widgets = {'coordinates': forms.HiddenInput()}
def __init__(self, *args, **kwargs):
if args: # If args exist
data = args[0]
if data['latitude'] and data['longitude']:
latitude = float(data['latitude'])
longitude = float(data['longitude'])
data['coordinates'] = Point(longitude, latitude)
if 'instance' in kwargs and kwargs['instance'] is not None and kwargs['instance'].coordinates:
coordinates = kwargs['instance'].coordinates.tuple
initial = kwargs.get('initial', {})
initial['longitude'] = coordinates[0]
initial['latitude'] = coordinates[1]
kwargs['initial'] = initial
super(SiteForm, self).__init__(*args, **kwargs)
class CycleResultSetForm(forms.ModelForm):
site_option_name = forms.CharField(widget=forms.TextInput)
class Meta(object):
model = CycleResultSet
exclude = []
def __init__(self, *args, **kwargs):
super(CycleResultSetForm, self).__init__(*args, **kwargs)
crs = kwargs.get('instance', None)
if crs:
partner = crs.partner
else:
partner = None
self.fields['monitors'].queryset = Monitor.objects.filter(
partner=partner)
self.fields[
'site_option_name'].help_text = "This is the name of the option for this site in the form, e.g. for 'Folweni clinic' it's probably 'folweni' (without the single quotes). You can find the names of options in the relevant Survey admin page."
class CRSFromKoboForm(forms.Form):
def __init__(self, *args, **kwargs):
facilities = kwargs.pop('facilities')
super(CRSFromKoboForm, self).__init__(*args, **kwargs)
for i, facility in enumerate(facilities):
crs_field = forms.ModelChoiceField(
queryset=CycleResultSet.objects.order_by('site__name').all(),
label=facility['label'])
crs_field.widget = AddAnotherWidgetWrapper(crs_field.widget,
CycleResultSet)
self.fields['crs_%d' % i] = crs_field
self.fields['facility_%d' % i] = forms.CharField(
widget=forms.HiddenInput(), initial=facility['name'])
self.fields['num_facilities'] = forms.CharField(
widget=forms.HiddenInput(), initial=len(facilities))
class ProgrammeResourcesForm(forms.ModelForm):
class Meta:
model = ProgrammeResources
exclude = ('document_extension', )
def clean(self):
link = self.cleaned_data.get('link')
document = self.cleaned_data.get('document')
order_no = self.cleaned_data.get('order')
resource = self.cleaned_data.get('resource')
programme = self.cleaned_data.get('programme')
if resource.name == 'Link' and link is None:
raise ValidationError('Enter a link')
if resource.name == 'Reports' and document is None:
raise ValidationError('Upload a document')
if resource.name == 'Survey Instrument' and document is None:
raise ValidationError('Upload a document')
if link and document:
raise ValidationError(
"You cant have an External link and a Document")
if ProgrammeResources.objects.filter(
order=order_no, resource=resource,
programme=programme).exists():
raise ValidationError(
'A Resource already exists for this order number')
if resource.name == 'Links' and document:
raise ValidationError(
'A resource of type Link cannot have a document, expecting a link'
)
if resource.name == 'Reports' and link:
raise ValidationError(
'A resource of type Reports cannot have a link, expecting a document'
)
if resource.name == 'Survey Instrument' and link:
raise ValidationError(
'A resource of type Survey Instrument cannot have a link, expecting a document'
)
return self.cleaned_data
class ProgrammeImageForm(forms.ModelForm):
class Meta:
model = ProgrammeImage
fields = '__all__'
def clean(self):
featured = self.cleaned_data.get('featured')
programme = self.cleaned_data.get('programme')
if featured:
if ProgrammeImage\
.objects\
.filter(programme=programme, featured=True):
raise ValidationError(
"An image in this programme is already marked as a featured image"
)
return self.cleaned_data
| Code4SA/umibukela | umibukela/forms.py | Python | mit | 5,345 |
# Generated by Django 2.2.10 on 2020-02-04 09:02
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import main.models.user
import timezone_field.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='GroupExtras',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='extras', to='auth.Group')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=150, verbose_name='last name')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('is_email_verified', models.BooleanField(default=False)),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('username', models.CharField(default=main.models.user.uuid4_hex, error_messages={'unique': 'A user with that UID already exists.'}, help_text='Required.', max_length=150, unique=True, verbose_name='username')),
('password_changed_at', models.DateTimeField(blank=True, null=True)),
('timezone', timezone_field.fields.TimeZoneField(default='UTC')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'permissions': (('can_view_dashboard', 'Can view dashboard'),),
},
managers=[
('objects', main.models.user.UserModelManager()),
],
),
]
| makeev/django-boilerplate | back/main/migrations/0001_initial.py | Python | mit | 3,330 |
# -*- coding: utf-8 -*-
#-------------------------------------------------
#-- reconstruction workbench
#--
#-- microelly 2016 v 0.1
#--
#-- GNU Lesser General Public License (LGPL)
#-------------------------------------------------
from say import *
import cv2
import reconstruction.mpl
reload(reconstruction.mpl)
# import cProfile
def context(ed,x,y):
''' calculate the context of a point'''
if ed[x,y]==0:
return [0,0]
try:
plus=ed[x+1,y]+ed[x,y+1]+ed[x-1,y]+ed[x,y-1]
cross=ed[x+1,y+1]+ed[x-1,y-1]+ed[x-1,y+1]+ed[x+1,y-1]
ivals = 2*ed[x+1,y]+ed[x,y+1]+64*ed[x-1,y]+128*ed[x,y-1]
ivals += 4*ed[x+1,y+1] + 32*ed[x-1,y-1] + 16*ed[x-1,y+1] + 8*ed[x+1,y-1]
except:
plus=0
cross=0
ivals=0
return [plus + cross,ivals]
colortab=[
[255,255,255], # 0
[0,255,255], # 1
[0,255,0], # 2
[255,0,255], # 3
[0,255,255], # 4
[0,0,255], # 5
[0,0,255], # 6
[0,0,255], # 7
[0,0,255] # 8
]
def run(ed,cimg,ed2,showPics=True):
startpts=[]; dreier=[]; vieler=[]
l,w=ed.shape
counts=[0,0,0,0,0,0,0,0,0,0]
for x in range(l):
for y in range(w):
[ix,ivals] =context(ed,x,y)
if ix==1:
startpts.append([x,y])
elif ix==3:
dreier.append((x,y))
elif ix>=4:
vieler.append((x,y))
counts[ix] += 1
cimg[x,y]=colortab[ix]
if ix<5:
ed2[x,y]=ix*40
else:
ed2[x,y]=255
print counts
if showPics: cv2.imshow('context image',cimg)
return [startpts,dreier,vieler]
# colorate the points
def colvieler(vieler,cimg):
for p in vieler:
x=p[0]
y=p[1]
cv2.circle(cimg,(y,x),4,(255,0,0),-1)
def coldreier(dreier,cimg):
for p in dreier:
x=p[0]
y=p[1]
cv2.circle(cimg,(y,x),2,(255,0,255),-1)
def colstart(startpts,cimg):
for p in startpts:
x=p[0]
y=p[1]
cv2.circle(cimg,(y,x),2,(55,55,0),-1)
def runpath(ed,x,y):
''' find path starting at x,y '''
path=[(x,y)]
fin=False
while not fin:
fin=True
for p in [(x+1,y),(x+1,y+1),(x+1,y-1),(x,y+1),(x,y-1),(x-1,y),(x-1,y-1),(x-1,y+1)]:
if p not in path and ed[p[1]][p[0]]>0:
ed[p[1]][p[0]]=0
path.append(p)
fin=False
x,y = p[0],p[1]
break
return path
def findpathlist(ed,showPics=True):
''' generate list of pathes '''
pathlist=[]
w,l=ed.shape
for x in range(l):
for y in range(w):
if ed[y][x] :
path=runpath(ed,x,y)
if len(path)>4:
if showPics:
cv2.imshow('remaining points',ed)
cv2.waitKey(1)
Gui.updateGui()
pathlist.append(path)
return pathlist
def xylist(pa):
''' convert pointlist to coord lists for matplot'''
x=[]
y=[]
for p in pa:
x.append(p[0])
y.append(p[1])
return [x,y]
def part(pa,i):
points=[FreeCAD.Vector(p[0],p[1],i*10) for p in pa]
pol=Part.makePolygon(points)
Part.show(pol)
t=App.ActiveDocument.ActiveObject
Gui.updateGui()
return t
'''
runnign mean
http://stackoverflow.com/questions/13728392/moving-average-or-running-mean
Fortunately, numpy includes a convolve function which we can use to speed things up.
The running mean is equivalent to convolving x with a vector that is N long,
with all members equal to 1/N. The numpy implementation of convolve includes
the starting transient, so you have to remove the first N-1 points:
'''
def runningMeanFast(x, N):
return np.convolve(x, np.ones((N,))/N)[(N-1):]
class PathFinder():
def run(self,minPathPoints,showPics,obj):
try:
img=self.img
except:
sayexc("kein image self.img")
fn=self.fn
img = cv2.imread(fn,0)
if obj.useCanny:
edges = cv2.Canny(img,10,255)
else:
edges = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
ed2=edges
ed2 = 0*ed2
ed= edges >0
ed = 1* ed
# classify the points
cimg = cv2.cvtColor(edges,cv2.COLOR_GRAY2BGR)
[startpts,dreier,vieler]=run(ed,cimg,ed2,showPics)
colvieler(vieler,cimg)
coldreier(dreier,cimg)
colstart(startpts,cimg)
self.imgOut=cimg
self.imgOut=255-cimg
if showPics: cv2.imshow('Canny Edge Detection',ed2)
pl=findpathlist(ed2,showPics)
pl2=[]
# draw a path map
mplw=reconstruction.mpl.MatplotlibWidget()
print ("processed pathes ...")
for i,pa in enumerate(pl):
# skip the short pathes
if len(pa)>minPathPoints:
[xl,yl]=xylist(pa)
yl=np.array(yl)
mplw.plot(xl,-yl)
print [i,len(pa)]
pl2.append(pa)
mplw.show()
FreeCAD.mplw2=mplw
sels=0
for i,pa in enumerate(pl2):
sels += 1
t=part(pa,i)
print (len(pl)," pathes found", sels," pathes selected")
Gui.SendMsgToActiveView("ViewFit")
Gui.activeDocument().activeView().viewBottom()
Gui.SendMsgToActiveView("ViewFit")
return pl2
| microelly2/reconstruction | reconstruction/pathfinder.py | Python | lgpl-3.0 | 4,547 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009, 2013 Zuza Software Foundation
#
# This file is part of Pootle.
#
# Pootle is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# Pootle is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# Pootle; if not, see <http://www.gnu.org/licenses/>.
from django import template
register = template.Library()
@register.inclusion_tag('terminology/term_edit.html', takes_context=True)
def render_term_edit(context, form):
template_vars = {
'unit': form.instance,
'form': form,
'language': context['language'],
'source_language': context['source_language'],
}
return template_vars
| arky/pootle-dev | pootle/apps/pootle_terminology/templatetags/terminology_tags.py | Python | gpl-2.0 | 1,119 |
# Copyright (C) 2012,2013,2016
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
**************************************
pathintegral - nuclear quantum effects
**************************************
- method to automatically run the system including nuclear quantum effects using the Feynman path-integral
!!WARNING: THIS IS STILL AN EXPERIMENTAL FEATURE!!
This method creates, based on the supplied topology of the system, an path-integral representation with P beads.
The path-integral system is a fully classical analog, which has to be run at an effective temperature P*T.
The method needs the following parameters:
* allParticles
particles of the sytem
* props
particle properties
* types
types, e.g. read from the gromacs parser
* system
* exclusions
non-bonded exclusions
* integrator
* langevin
langevin integrator
* rcut
the cutoff used for the rings non-bonded interactions
* P
the Trotter Number (number of imaginary time slices)
* polymerInitR
polymer radius for setting up ring in 2d plane
* hbar
hbar in gromacs units [kJ/mol ps]
* disableVVl
disable Virtual Verlet List (slow but safe). If false, the neighbour search is based on the VirtualParticles extension, which contain
the rings. This speeds up neighbour search significantly.
"""
import copy
import math
import espressopp
from espressopp import Real3D, Int3D
def createPathintegralSystem(allParticles,
props,
types,
system,
exclusions,
integrator,
langevin,
rcut,
P,
polymerInitR=0.01,
hbar=0.063507807,
disableVVL=False
):
# Turns the classical system into a Pathintegral system with P beads
numtypes=max(types)+1
num_cla_part=len(allParticles)
## make a dictionary for properties
##(TODO: better to use esp++ particle ?)
propDict={}
for p in props: propDict.update({p:len(propDict)})
piParticles=[]
ringids={} #dict with key: classical particle id, value vector of ids in the ring polymer
vptuples=[]
if not disableVVL:
vcl=espressopp.CellList()
ftpl = espressopp.FixedTupleList(system.storage)
#vvl=espressopp.VirtualVerletList(system, rcut, ftpl)
vvl=espressopp.VirtualVerletList(system, rcut, ftpl)
# create a cell list which will store the virtual particles after domain decomposition
vvl.setCellList(vcl)
## some data structures that will be usefull later
## ringids has all imaginary time beads belonging to a classical bead pid
## allParticlesById is used to acces particles properties by pid
allParticlesById={}
for p in allParticles:
pid=p[propDict['id']]
ringids.update({pid:[]})
allParticlesById.update({pid:p})
for i in xrange(1,P):
for p in allParticles:
pid=p[propDict['id']]
newparticle=copy.deepcopy(p)
# set types accoring to imag time index
newparticle[propDict['type']]=newparticle[propDict['type']]+numtypes*i
# set positions
newpos=newparticle[propDict['pos']]
newpos[0]=newpos[0]+polymerInitR*math.cos(i*2*math.pi/P)-polymerInitR
newpos[1]=newpos[1]+polymerInitR*math.sin(i*2*math.pi/P)
newid=len(allParticles)+len(piParticles)+1
newparticle[propDict['id']]=newid
piParticles.append(newparticle)
ringids[pid].append(newid)
if not disableVVL:
iVerletLists={}
for i in xrange(1,P+1):
iVerletLists.update({i:espressopp.VerletList(system, 0, rebuild=False)})
iVerletLists[i].disconnect()
## map types to sub-verlet lists using the VirtualVerletList classical
## classical types are in types
## type at imaginary time i=t+numtypes*i
for i in xrange(1,P+1):
tt=[]
for j in xrange(0, numtypes):
pitype=types[j]+numtypes*(i-1)
tt.append(pitype)
#print i, "mapped", tt, " to ", iVerletLists[i]
vvl.mapTypeToVerletList(tt, iVerletLists[1])
system.storage.addParticles(piParticles, *props)
#print "1 PYTHON IMG 1947", system.storage.getParticle(1947).pos, system.storage.getParticle(1947).imageBox
#print "RINGIDS", ringids
# store each ring in a FixedTupleList
if not disableVVL:
vParticles=[]
vptype=numtypes*(P+1)+1 # this is the type assigned to virtual particles
for k, v in ringids.iteritems():
cog=allParticlesById[k][propDict['pos']]
for pid in v:
cog=cog+allParticlesById[k][propDict['pos']]
cog=cog/(len(v)+1)
#create a virtual particle for each ring
vpprops = ['id', 'pos', 'v', 'type', 'mass', 'q']
vpid=len(allParticles)+len(piParticles)+len(vParticles)+1
part = [vpid ,cog,Real3D(0, 0, 0), vptype, 0, 0]
vParticles.append(part)
# first item in tuple is the virtual particle id:
t=[vpid]
t.append(k)
t=t+v
vptuples.append(t)
#print "VPARTICLE", part, "TUPLE", t
system.storage.addParticles(vParticles, *vpprops)
#always decpmpose before adding tuples
system.storage.decompose()
for t in vptuples:
ftpl.addTuple(t)
extVP = espressopp.integrator.ExtVirtualParticles(system, vcl)
extVP.addVirtualParticleTypes([vptype])
extVP.setFixedTupleList(ftpl)
integrator.addExtension(extVP)
# expand non-bonded potentials
numInteraction=system.getNumberOfInteractions()
for n in xrange(numInteraction):
interaction=system.getInteraction(n)
## TODO: in case of VVL: clone interaction, add potential!
print "expanding interaction", interaction
if interaction.bondType() == espressopp.interaction.Nonbonded:
for i in xrange(P):
for j in xrange(numtypes):
for k in xrange(numtypes):
pot=interaction.getPotential(j, k)
interaction.setPotential(numtypes*i+j, numtypes*i+k, pot)
print "Interaction", numtypes*i+j, numtypes*i+k, pot
if not disableVVL:
vl=interaction.getVerletList()
#print "VL has", vl.totalSize(),"disconnecting"
vl.disconnect()
interaction.setVerletList(iVerletLists[1])
if interaction.bondType() == espressopp.interaction.Pair:
bond_fpl=interaction.getFixedPairList()
cla_bonds=[]
# loop over bond lists returned by each cpu
for l in bond_fpl.getBonds():
cla_bonds.extend(l)
#print "CLA BONDS", bond_fpl.size()
for i in xrange(1, P):
tmp=0
for b in cla_bonds:
# create additional bonds for this imag time
bond_fpl.add(b[0]+num_cla_part*i, b[1]+num_cla_part*i)
tmp+=1
#print "trying to add", tmp, "bonds"
#print "i=", i, " PI BONDS", bond_fpl.size()
if interaction.bondType() == espressopp.interaction.Angular:
angle_ftl=interaction.getFixedTripleList()
# loop over triple lists returned by each cpu
cla_angles=[]
for l in angle_ftl.getTriples():
cla_angles.extend(l)
#print "CLA_ANGLES", cla_angles
for i in xrange(1, P):
for a in cla_angles:
# create additional angles for this imag time
angle_ftl.add(a[0]+num_cla_part*i,
a[1]+num_cla_part*i, a[2]+num_cla_part*i)
if interaction.bondType() == espressopp.interaction.Dihedral:
dihedral_fql=interaction.getFixedQuadrupleList()
cla_dihedrals=[]
for l in dihedral_fql.getQuadruples():
cla_dihedrals.extend(l)
for i in xrange(1, P):
for d in cla_dihedrals:
# create additional dihedrals for this imag time
dihedral_fql.add(d[0]+num_cla_part*i,
d[1]+num_cla_part*i, d[2]+num_cla_part*i, d[3]+num_cla_part*i)
piexcl=[]
for i in xrange(1, P):
for e in exclusions:
# create additional exclusions for this imag time
piexcl.append((e[0]+num_cla_part*i, e[1]+num_cla_part*i))
exclusions.extend(piexcl)
if not disableVVL:
vvl.exclude(exclusions)
# now we analyze how many unique different masses are in the system as we have to create an harmonic spring interaction for each of them
unique_masses=[]
for p in allParticles:
mass=p[propDict['mass']]
if not mass in unique_masses:
unique_masses.append(mass)
kineticTermInteractions={} # key: mass value: corresponding harmonic spring interaction
for m in unique_masses:
fpl=espressopp.FixedPairList(system.storage)
k=m*P*P*langevin.temperature*langevin.temperature/(hbar*hbar)
pot=espressopp.interaction.Harmonic(k,0.0)
interb = espressopp.interaction.FixedPairListHarmonic(system, fpl, pot)
system.addInteraction(interb)
kineticTermInteractions.update({m:interb})
for idcla, idpi in ringids.iteritems():
p=allParticlesById[idcla]
mass=p[propDict['mass']]
interactionList=kineticTermInteractions[mass].getFixedPairList() #find the appropriate interaction based on the mass
# harmonic spring between atom at imag-time i and imag-time i+1
for i in xrange(len(idpi)-1):
interactionList.add(idpi[i],idpi[i+1])
#close the ring
interactionList.add(idcla,idpi[0])
interactionList.add(idcla,idpi[len(idpi)-1])
# instead of scaling the potentials, we scale the temperature!
langevin.temperature = langevin.temperature*P
if not disableVVL:
return iVerletLists
| kkreis/espressopp | src/tools/pathintegral.py | Python | gpl-3.0 | 9,666 |
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
"""Core rules for Pants to operate correctly.
These are always activated and cannot be disabled.
"""
from pants.core.goals import fmt, lint, package, repl, run, tailor, test, typecheck
from pants.core.target_types import ArchiveTarget, Files, GenericTarget, RelocatedFiles, Resources
from pants.core.target_types import rules as target_type_rules
from pants.core.util_rules import (
archive,
distdir,
external_tool,
filter_empty_sources,
pants_bin,
source_files,
stripped_source_files,
subprocess_environment,
)
from pants.goal import stats_aggregator
from pants.source import source_root
def rules():
return [
# goals
*fmt.rules(),
*lint.rules(),
*package.rules(),
*repl.rules(),
*run.rules(),
*test.rules(),
*typecheck.rules(),
*tailor.rules(),
# util_rules
*distdir.rules(),
*filter_empty_sources.rules(),
*pants_bin.rules(),
*source_files.rules(),
*stripped_source_files.rules(),
*archive.rules(),
*external_tool.rules(),
*subprocess_environment.rules(),
*source_root.rules(),
*target_type_rules(),
*stats_aggregator.rules(),
]
def target_types():
return [ArchiveTarget, Files, GenericTarget, Resources, RelocatedFiles]
| jsirois/pants | src/python/pants/core/register.py | Python | apache-2.0 | 1,474 |
import sys
import maya.OpenMaya as OpenMaya
import maya.OpenMayaMPx as OpenMayaMPx
from maya.mel import eval as meval
from mesh_maya_tube import MayaTube
kPluginNodeTypeName = "tubeDeformer"
tubeDeformerId = OpenMaya.MTypeId(0x0020A52C)
class tubeDeformer(OpenMayaMPx.MPxNode):
# class variables
#firsttime == 1
segments = OpenMaya.MObject()
circleSegments = OpenMaya.MObject()
curve = OpenMaya.MObject()
profileScale = OpenMaya.MObject()
profileRotate = OpenMaya.MObject()
taper = OpenMaya.MObject()
twist = OpenMaya.MObject()
growth = OpenMaya.MObject()
profile = OpenMaya.MObject()
#output = OpenMaya.MObject()
outputMesh = OpenMaya.MObject()
fixType = OpenMaya.MObject()
cap = OpenMaya.MObject()
scaleCorners = OpenMaya.MObject()
evenDistribute = OpenMaya.MObject()
reverse = OpenMaya.MObject()
def __init__(self):
OpenMayaMPx.MPxNode.__init__(self)
def compute(self,plug,dataBlock):
if plug == tubeDeformer.outputMesh:
segmentsHandle = dataBlock.inputValue( self.segments )
segmentsValue = segmentsHandle.asInt()
circleSegmentsHandle = dataBlock.inputValue( self.circleSegments )
circleSegmentsValue = circleSegmentsHandle.asInt()
if circleSegmentsValue<2: circleSegmentsValue=0
fixHandle = dataBlock.inputValue( self.fixType )
fixValue = fixHandle.asInt()
capHandle = dataBlock.inputValue( self.cap )
capValue = capHandle.asShort()
scaleCornersHandle = dataBlock.inputValue( self.scaleCorners )
scaleCornersValue = scaleCornersHandle.asShort()
evenDistributeHandle = dataBlock.inputValue( self.evenDistribute )
evenDistributeValue = evenDistributeHandle.asShort()
reverseHandle = dataBlock.inputValue( self.reverse )
reverseValue = reverseHandle.asShort()
profileScaleHandle = dataBlock.inputValue( self.profileScale )
profileScaleValue = profileScaleHandle.asDouble()
profileRotateHandle = dataBlock.inputValue( self.profileRotate )
profileRotateValue = profileRotateHandle.asDouble()
taperHandle = dataBlock.inputValue( self.taper )
taperValue = taperHandle.asDouble()
twistHandle = dataBlock.inputValue( self.twist )
twistValue = twistHandle.asDouble()
growthHandle = dataBlock.inputValue( self.growth )
growthValue = growthHandle.asDouble()
curveHandle = dataBlock.inputValue( self.curve )
curveObject = curveHandle.asNurbsCurve()
curve_fn = OpenMaya.MFnNurbsCurve(curveObject)
profileHandle = dataBlock.inputValue( self.profile )
if profileHandle.data().hasFn(OpenMaya.MFn.kMesh):
profileObject = profileHandle.asMesh()
profile_fn = OpenMaya.MFnMesh(profileObject)
circleSegmentsValue = 0
elif profileHandle.data().hasFn(OpenMaya.MFn.kNurbsCurve):
profileObject = profileHandle.asNurbsCurve()
profile_fn = OpenMaya.MFnNurbsCurve(profileObject)
circleSegmentsValue = 0
else:
profile_fn = 0
circleSegmentsValue = max(circleSegmentsValue,2)
outputHandle = dataBlock.outputValue(self.outputMesh)
dataCreator = OpenMaya.MFnMeshData()
newOutputData = dataCreator.create()
tube = MayaTube(curveFn = curve_fn, scale = profileScaleValue, profileFn = profile_fn, parent= newOutputData, scale_corners = 1-scaleCornersValue,\
segments=segmentsValue, cylinder_segments= circleSegmentsValue, fix =fixValue, rotate = profileRotateValue,taper = taperValue,twist = twistValue,\
cap = capValue, even = evenDistributeValue, growth = growthValue, reverse = reverseValue)
outputHandle.setMObject(newOutputData)
dataBlock.setClean(plug)
else:
return OpenMaya.kUnknownParameter
def nodeCreator():
return OpenMayaMPx.asMPxPtr( tubeDeformer() )
# initializer
def nodeInitializer():
gAttr = OpenMaya.MFnGenericAttribute()
tubeDeformer.profile = gAttr.create( "profile", "prof")
gAttr.addDataAccept( OpenMaya.MFnData.kMesh )
gAttr.addDataAccept( OpenMaya.MFnData.kNurbsCurve )
gAttr.setHidden(False)
tubeDeformer.curve = gAttr.create( "curve", "curve")
gAttr.addDataAccept( OpenMaya.MFnData.kNurbsCurve )
gAttr.setHidden(False)
nAttr = OpenMaya.MFnNumericAttribute()
tubeDeformer.profileScale = nAttr.create( "profileScale", "psc", OpenMaya.MFnNumericData.kDouble, 1.0)
nAttr.setKeyable(True)
nAttr.setStorable(True)
nAttr.setSoftMin(0)
nAttr.setSoftMax(4)
tubeDeformer.profileRotate = nAttr.create( "profileRotate", "prot", OpenMaya.MFnNumericData.kDouble, 0.0)
nAttr.setKeyable(True)
nAttr.setStorable(True)
nAttr.setSoftMin(0)
nAttr.setSoftMax(360)
tubeDeformer.twist = nAttr.create( "profileTwist", "twist", OpenMaya.MFnNumericData.kDouble, 0.0)
nAttr.setKeyable(True)
nAttr.setStorable(True)
nAttr.setSoftMin(0)
nAttr.setSoftMax(360)
tubeDeformer.taper = nAttr.create( "profileTaper", "taper", OpenMaya.MFnNumericData.kDouble, 0.0)
nAttr.setKeyable(True)
nAttr.setStorable(True)
nAttr.setSoftMin(0)
nAttr.setSoftMax(1)
tubeDeformer.growth = nAttr.create( "growth", "g", OpenMaya.MFnNumericData.kDouble, 1.0)
nAttr.setKeyable(True)
nAttr.setStorable(True)
nAttr.setMin(0)
nAttr.setMax(1)
tubeDeformer.segments = nAttr.create( "segments", "segs", OpenMaya.MFnNumericData.kInt, 2)
nAttr.setKeyable(True)
nAttr.setStorable(True)
nAttr.setMin(3)
nAttr.setSoftMax(10)
tubeDeformer.circleSegments = nAttr.create( "circleSegments", "circlesegs", OpenMaya.MFnNumericData.kInt, 8)
nAttr.setKeyable(True)
nAttr.setStorable(True)
nAttr.setMin(1)
nAttr.setSoftMax(16)
tubeDeformer.fixType = nAttr.create( "fixType", "fix", OpenMaya.MFnNumericData.kInt, 0)
nAttr.setMin(0)
nAttr.setMax(3)
#mAttr = OpenMaya.MFnMatrixAttribute()
#tubeDeformer.profileMatrix = mAttr.create("profileMatrix", "profmatr", OpenMaya.MFnNumericData.kFloat )
#mAttr.setHidden(True)
rAttr = OpenMaya.MRampAttribute()
tubeDeformer.profileRamp = rAttr.createCurveRamp("profileRamp", "pr")
eAttr = OpenMaya.MFnEnumAttribute()
tubeDeformer.cap = eAttr.create("cap", "cap", 0)
eAttr.addField("off", 0);
eAttr.addField("on", 1);
eAttr.setHidden(False);
eAttr.setStorable(True);
tubeDeformer.scaleCorners = eAttr.create("scaleCorners", "sc", 0)
eAttr.addField("off", 0);
eAttr.addField("on", 1);
eAttr.setHidden(False);
eAttr.setStorable(True);
tubeDeformer.evenDistribute = eAttr.create("even", "ev", 0)
eAttr.addField("off", 0);
eAttr.addField("on", 1);
eAttr.setHidden(False);
eAttr.setStorable(True);
tubeDeformer.reverse = eAttr.create("reverse", "rv", 0)
eAttr.addField("off", 0);
eAttr.addField("on", 1);
eAttr.setHidden(False);
eAttr.setStorable(True);
typedAttr = OpenMaya.MFnTypedAttribute()
tubeDeformer.outputMesh = typedAttr.create("outputMesh", "out", OpenMaya.MFnData.kMesh)
# add attribute
tubeDeformer.addAttribute( tubeDeformer.profile )
tubeDeformer.addAttribute( tubeDeformer.curve )
tubeDeformer.addAttribute( tubeDeformer.profileScale )
tubeDeformer.addAttribute( tubeDeformer.profileRotate )
tubeDeformer.addAttribute( tubeDeformer.taper )
tubeDeformer.addAttribute( tubeDeformer.twist )
tubeDeformer.addAttribute( tubeDeformer.growth )
tubeDeformer.addAttribute( tubeDeformer.segments )
tubeDeformer.addAttribute( tubeDeformer.fixType )
#tubeDeformer.addAttribute( tubeDeformer.profileMatrix )
tubeDeformer.addAttribute( tubeDeformer.profileRamp )
tubeDeformer.addAttribute( tubeDeformer.cap )
tubeDeformer.addAttribute( tubeDeformer.scaleCorners )
tubeDeformer.addAttribute( tubeDeformer.evenDistribute )
tubeDeformer.addAttribute( tubeDeformer.reverse )
tubeDeformer.addAttribute( tubeDeformer.circleSegments )
tubeDeformer.addAttribute( tubeDeformer.outputMesh )
tubeDeformer.attributeAffects( tubeDeformer.curve, tubeDeformer.outputMesh )
tubeDeformer.attributeAffects( tubeDeformer.profile, tubeDeformer.outputMesh )
tubeDeformer.attributeAffects( tubeDeformer.profileScale, tubeDeformer.outputMesh )
tubeDeformer.attributeAffects( tubeDeformer.profileRotate, tubeDeformer.outputMesh )
tubeDeformer.attributeAffects( tubeDeformer.taper, tubeDeformer.outputMesh )
tubeDeformer.attributeAffects( tubeDeformer.twist, tubeDeformer.outputMesh )
tubeDeformer.attributeAffects( tubeDeformer.growth, tubeDeformer.outputMesh )
tubeDeformer.attributeAffects( tubeDeformer.segments, tubeDeformer.outputMesh )
tubeDeformer.attributeAffects( tubeDeformer.circleSegments, tubeDeformer.outputMesh )
tubeDeformer.attributeAffects( tubeDeformer.fixType, tubeDeformer.outputMesh )
tubeDeformer.attributeAffects( tubeDeformer.cap, tubeDeformer.outputMesh )
tubeDeformer.attributeAffects( tubeDeformer.scaleCorners, tubeDeformer.outputMesh )
tubeDeformer.attributeAffects( tubeDeformer.evenDistribute, tubeDeformer.outputMesh )
tubeDeformer.attributeAffects( tubeDeformer.reverse, tubeDeformer.outputMesh )
# initialize the script plug-in
def initializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject, "Zhekichan", "0.0.1", "Any")
try:
mplugin.registerNode( kPluginNodeTypeName, tubeDeformerId, nodeCreator, nodeInitializer )
except:
sys.stderr.write( "Failed to register node: %s\n" % kPluginNodeTypeName )
# uninitialize the script plug-in
def uninitializePlugin(mobject):
mplugin = OpenMayaMPx.MFnPlugin(mobject)
try:
mplugin.deregisterNode( tubeDeformerId )
except:
sys.stderr.write( "Failed to unregister node: %s\n" % kPluginNodeTypeName )
mel = '''
global proc tubeDeformer()
{
string $sel[] = `ls -sl -tr`;
if (size($sel)==2)
{
string $curve = $sel[0];
string $profile = $sel[1];
string $curveshape[] = `listRelatives -s $curve`;
string $profileshape[] = `listRelatives -s $profile`;
//string $tubedeformer[] = `deformer -typ "tubeDeformer" -n "tubeDeformer" $profile`;
string $tubedeformer = `createNode "tubeDeformer"`;
connectAttr -f ($curveshape[0]+".worldSpace[0]") ($tubedeformer+".curve");
string $get = `objectType $profileshape[0]`;
if($get=="nurbsCurve")
{
connectAttr -f ($profileshape[0]+".worldSpace[0]") ($tubedeformer+".profile");
}
if($get=="mesh")
{
connectAttr -f ($profileshape[0]+".worldMesh[0]") ($tubedeformer+".profile");
}
string $outMesh = `createNode mesh`;
connectAttr -f ($tubedeformer+".outputMesh") ($outMesh+".inMesh");
sets -e -fe "initialShadingGroup" $outMesh;
}
else
{
//error "please select curve and profile: first curve then the mesh that should be deformed.";
string $curve = $sel[0];
string $curveshape[] = `listRelatives -s $curve`;
string $tubedeformer = `createNode "tubeDeformer"`;
connectAttr -f ($curveshape[0]+".worldSpace[0]") ($tubedeformer+".curve");
string $outMesh = `createNode mesh`;
connectAttr -f ($tubedeformer+".outputMesh") ($outMesh+".inMesh");
setAttr ($tubedeformer+".circleSegments") 8;
sets -e -fe "initialShadingGroup" $outMesh;
}
}
global proc AEtubeDeformerNew( string $attributeName1, string $attributeName2, string $attributeName3, string $attributeName4) {
checkBoxGrp -numberOfCheckBoxes 1 -label "Cap holes" capctr;
checkBoxGrp -numberOfCheckBoxes 1 -label "Don't scale corners" zupactr;
checkBoxGrp -numberOfCheckBoxes 1 -label "Even distribution" evenctr;
checkBoxGrp -numberOfCheckBoxes 1 -label "Reverse" reversectr;
connectControl -index 2 capctr ($attributeName1);
connectControl -index 2 zupactr ($attributeName2);
connectControl -index 2 evenctr ($attributeName3);
connectControl -index 2 reversectr ($attributeName4);
}
global proc AEtubeDeformerReplace( string $attributeName1, string $attributeName2, string $attributeName3, string $attributeName4) {
connectControl -index 2 capctr ($attributeName1);
connectControl -index 2 zupactr ($attributeName2);
connectControl -index 2 evenctr ($attributeName3);
connectControl -index 2 reversectr ($attributeName4);
}
global proc AEtubeDeformerTemplate( string $nodeName )
{
// the following controls will be in a scrollable layout
editorTemplate -beginScrollLayout;
// add a bunch of common properties
editorTemplate -beginLayout "Tube Deformer Attributes" -collapse 0;
editorTemplate -callCustom "AEtubeDeformerNew" "AEtubeDeformerReplace" "cap" "scaleCorners" "even" "reverse";
editorTemplate -addSeparator;
editorTemplate -addControl "profileScale" ;
editorTemplate -addControl "profileRotate" ;
editorTemplate -addControl "twist" ;
editorTemplate -addControl "taper" ;
editorTemplate -addControl "segments" ;
editorTemplate -addControl "fixType" ;
editorTemplate -addControl "circleSegments" ;
editorTemplate -addControl "growth" ;
//AEaddRampControl "profileRamp" ;
editorTemplate -endLayout;
// include/call base class/node attributes
AEdependNodeTemplate $nodeName;
// add any extra attributes that have been added
editorTemplate -addExtraControls;
editorTemplate -endScrollLayout;
}
'''
meval( mel ) | ainaerco/meshTools | python/tubeDeformer.py | Python | gpl-2.0 | 13,150 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import posixpath
import sys
import warnings
from pyarrow.util import implements, _DEPR_MSG
from pyarrow.filesystem import FileSystem
import pyarrow.lib as lib
class HadoopFileSystem(lib.HadoopFileSystem, FileSystem):
"""
FileSystem interface for HDFS cluster.
See pyarrow.hdfs.connect for full connection details
"""
def __init__(self, host="default", port=0, user=None, kerb_ticket=None,
driver='libhdfs', extra_conf=None):
warnings.warn(
_DEPR_MSG.format(
"hdfs.HadoopFileSystem", "2.0.0", "fs.HadoopFileSystem"),
DeprecationWarning, stacklevel=2)
if driver == 'libhdfs':
_maybe_set_hadoop_classpath()
self._connect(host, port, user, kerb_ticket, extra_conf)
def __reduce__(self):
return (HadoopFileSystem, (self.host, self.port, self.user,
self.kerb_ticket, self.extra_conf))
def _isfilestore(self):
"""
Return True if this is a Unix-style file store with directories.
"""
return True
@implements(FileSystem.isdir)
def isdir(self, path):
return super().isdir(path)
@implements(FileSystem.isfile)
def isfile(self, path):
return super().isfile(path)
@implements(FileSystem.delete)
def delete(self, path, recursive=False):
return super().delete(path, recursive)
def mkdir(self, path, **kwargs):
"""
Create directory in HDFS.
Parameters
----------
path : str
Directory path to create, including any parent directories.
Notes
-----
libhdfs does not support create_parents=False, so we ignore this here
"""
return super().mkdir(path)
@implements(FileSystem.rename)
def rename(self, path, new_path):
return super().rename(path, new_path)
@implements(FileSystem.exists)
def exists(self, path):
return super().exists(path)
def ls(self, path, detail=False):
"""
Retrieve directory contents and metadata, if requested.
Parameters
----------
path : str
HDFS path to retrieve contents of.
detail : bool, default False
If False, only return list of paths.
Returns
-------
result : list of dicts (detail=True) or strings (detail=False)
"""
return super().ls(path, detail)
def walk(self, top_path):
"""
Directory tree generator for HDFS, like os.walk.
Parameters
----------
top_path : str
Root directory for tree traversal.
Returns
-------
Generator yielding 3-tuple (dirpath, dirnames, filename)
"""
contents = self.ls(top_path, detail=True)
directories, files = _libhdfs_walk_files_dirs(top_path, contents)
yield top_path, directories, files
for dirname in directories:
yield from self.walk(self._path_join(top_path, dirname))
def _maybe_set_hadoop_classpath():
import re
if re.search(r'hadoop-common[^/]+.jar', os.environ.get('CLASSPATH', '')):
return
if 'HADOOP_HOME' in os.environ:
if sys.platform != 'win32':
classpath = _derive_hadoop_classpath()
else:
hadoop_bin = '{}/bin/hadoop'.format(os.environ['HADOOP_HOME'])
classpath = _hadoop_classpath_glob(hadoop_bin)
else:
classpath = _hadoop_classpath_glob('hadoop')
os.environ['CLASSPATH'] = classpath.decode('utf-8')
def _derive_hadoop_classpath():
import subprocess
find_args = ('find', '-L', os.environ['HADOOP_HOME'], '-name', '*.jar')
find = subprocess.Popen(find_args, stdout=subprocess.PIPE)
xargs_echo = subprocess.Popen(('xargs', 'echo'),
stdin=find.stdout,
stdout=subprocess.PIPE)
jars = subprocess.check_output(('tr', "' '", "':'"),
stdin=xargs_echo.stdout)
hadoop_conf = os.environ["HADOOP_CONF_DIR"] \
if "HADOOP_CONF_DIR" in os.environ \
else os.environ["HADOOP_HOME"] + "/etc/hadoop"
return (hadoop_conf + ":").encode("utf-8") + jars
def _hadoop_classpath_glob(hadoop_bin):
import subprocess
hadoop_classpath_args = (hadoop_bin, 'classpath', '--glob')
return subprocess.check_output(hadoop_classpath_args)
def _libhdfs_walk_files_dirs(top_path, contents):
files = []
directories = []
for c in contents:
scrubbed_name = posixpath.split(c['name'])[1]
if c['kind'] == 'file':
files.append(scrubbed_name)
else:
directories.append(scrubbed_name)
return directories, files
def connect(host="default", port=0, user=None, kerb_ticket=None,
extra_conf=None):
"""
Connect to an HDFS cluster. All parameters are optional and should
only be set if the defaults need to be overridden.
Authentication should be automatic if the HDFS cluster uses Kerberos.
However, if a username is specified, then the ticket cache will likely
be required.
Parameters
----------
host : NameNode. Set to "default" for fs.defaultFS from core-site.xml.
port : NameNode's port. Set to 0 for default or logical (HA) nodes.
user : Username when connecting to HDFS; None implies login user.
kerb_ticket : Path to Kerberos ticket cache.
extra_conf : dict, default None
extra Key/Value pairs for config; Will override any
hdfs-site.xml properties
Notes
-----
The first time you call this method, it will take longer than usual due
to JNI spin-up time.
Returns
-------
filesystem : HadoopFileSystem
"""
warnings.warn(
_DEPR_MSG.format("hdfs.connect", "2.0.0", "fs.HadoopFileSystem"),
DeprecationWarning, stacklevel=2
)
return _connect(
host=host, port=port, user=user, kerb_ticket=kerb_ticket,
extra_conf=extra_conf
)
def _connect(host="default", port=0, user=None, kerb_ticket=None,
extra_conf=None):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fs = HadoopFileSystem(host=host, port=port, user=user,
kerb_ticket=kerb_ticket,
extra_conf=extra_conf)
return fs
| xhochy/arrow | python/pyarrow/hdfs.py | Python | apache-2.0 | 7,221 |
import time
from amberclient.common import amber_client
from amberclient.common.listener import Listener
from amberclient.dummy import dummy
__author__ = 'paoolo'
class DummyListener(Listener):
def handle(self, response):
print str(response)
if __name__ == '__main__':
ip = raw_input('IP (default: 127.0.0.1): ')
ip = '127.0.0.1' if ip is None or len(ip) == 0 else ip
client = amber_client.AmberClient(ip)
proxy = dummy.DummyProxy(client, 0)
status = proxy.get_status()
status.wait_available()
print 'enable: %s, message: %s' % (status.get_enable(), status.get_message())
proxy.set_enable(True)
proxy.set_message('Hello')
status = proxy.get_status()
status.wait_available()
print 'enable: %s, message: %s' % (status.get_enable(), status.get_message())
time.sleep(1)
proxy.subscribe(DummyListener())
time.sleep(1)
proxy.unsubscribe()
proxy.terminate_proxy()
client.terminate_client() | project-capo/amber-python-clients | src/amberclient/examples/dummy_example.py | Python | mit | 982 |
#!/usr/bin/env python
# --!-- coding: utf8 --!--
import os
from collections import OrderedDict
from PyQt5.QtCore import QSize
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QWidget, QListWidgetItem, QFileDialog
from manuskript import exporter
from manuskript.ui.exporters.exportersManager_ui import Ui_ExportersManager
from manuskript.ui import style as S
class exportersManager(QWidget, Ui_ExportersManager):
exportersMightHaveChanged = pyqtSignal()
def __init__(self, parent=None):
QWidget.__init__(self, parent)
self.setupUi(self)
self.lblExporterName.setStyleSheet(S.titleLabelSS())
# Var
self.currentExporter = None
# Populates lite
self.lstExporters.clear()
for E in exporter.exporters:
item = QListWidgetItem(QIcon(E.icon), E.name)
self.lstExporters.addItem(item)
# UI
for i in range(self.lstExporters.count()):
item = self.lstExporters.item(i)
item.setSizeHint(QSize(item.sizeHint().width(), 42))
item.setTextAlignment(Qt.AlignCenter)
self.lstExporters.setMaximumWidth(150)
self.lstExporters.setMinimumWidth(150)
self.lstExporters.currentTextChanged.connect(self.updateUi)
self.lstExportTo.currentTextChanged.connect(self.updateFormatDescription)
self.lstExporters.setCurrentRow(0)
self.btnSetPath.clicked.connect(self.setAppPath)
self.txtPath.editingFinished.connect(self.updateAppPath)
def updateUi(self, name):
E = exporter.getExporterByName(name)
self.currentExporter = E
if not E:
self.stack.setEnabled(False)
return
self.stack.setEnabled(True)
# Updates name and description
self.lblExporterName.setText(E.name)
self.lblExporterDescription.setText(E.description)
# Updates formats
self.lstExportTo.clear()
for f in E.exportTo:
item = QListWidgetItem(QIcon.fromTheme(f.icon), f.name)
self.lstExportTo.addItem(item)
self.grpExportTo.layout().setStretch(0, 4)
self.grpExportTo.layout().setStretch(1, 6)
self.lstExportTo.setCurrentRow(0)
# Updates path & version
self.grpPath.setVisible(E.name != "Manuskript") # We hide if exporter is manuskript
# Installed
if E.isValid() == 2:
self.lblStatus.setText(self.tr("Installed"))
self.lblStatus.setStyleSheet("color: darkGreen;")
self.lblHelpText.setVisible(False)
self.lblVersion.setVisible(True)
self.lblVersionName.setVisible(True)
elif E.isValid() == 1:
self.lblStatus.setText(self.tr("Custom"))
self.lblStatus.setStyleSheet("color: darkOrange;")
self.lblHelpText.setVisible(False)
self.lblVersion.setVisible(True)
self.lblVersionName.setVisible(True)
else:
self.lblStatus.setText(self.tr("Not found"))
self.lblStatus.setStyleSheet("color: red;")
self.lblHelpText.setVisible(True)
self.lblHelpText.setText(self.tr("{} not found. Install it, or set path manually.").format(name))
self.lblVersion.setVisible(False)
self.lblVersionName.setVisible(False)
# Version
self.lblVersion.setText(E.version())
# Path
if E.path():
self.txtPath.setText(E.path())
else:
self.txtPath.setText(E.customPath)
def updateFormatDescription(self, name):
if self.currentExporter:
f = self.currentExporter.getFormatByName(name)
if not f:
self.lblExportToDescription.setText("")
else:
desc = "<b>{}:</b> {}".format(
name,
f.description)
if not f.isValid():
desc += "<br><br>" + \
self.tr("<b>Status:</b> uninstalled.") + \
"<br><br>" + \
self.tr("<b>Requires:</b> ") + f.InvalidBecause
self.lblExportToDescription.setText(desc)
def setAppPath(self):
if self.currentExporter:
E = self.currentExporter
fn = QFileDialog.getOpenFileName(self,
caption=self.tr("Set {} executable path.").format(E.cmd),
directory=E.customPath)
if fn[0]:
self.updateAppPath(fn[0])
def updateAppPath(self, path=""):
if not path:
path = self.txtPath.text()
if self.currentExporter:
E = self.currentExporter
E.setCustomPath(path)
self.txtPath.setText(E.customPath)
self.updateUi(E.name)
self.exportersMightHaveChanged.emit()
| gedakc/manuskript | manuskript/ui/exporters/exportersManager.py | Python | gpl-3.0 | 4,984 |
#
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.mlnxos import mlnxos_l3_interface
from units.modules.utils import set_module_args
from .mlnxos_module import TestMlnxosModule, load_fixture
class TestMlnxosL3InterfaceModule(TestMlnxosModule):
module = mlnxos_l3_interface
def setUp(self):
super(TestMlnxosL3InterfaceModule, self).setUp()
self.mock_get_config = patch.object(
mlnxos_l3_interface.MlnxosL3InterfaceModule,
"_get_interfaces_config")
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch(
'ansible.module_utils.network.mlnxos.mlnxos.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestMlnxosL3InterfaceModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def _execute_module(self, failed=False, changed=False, commands=None, sort=True):
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
commands_res = result.get('commands')
if sort:
self.assertEqual(sorted(commands), sorted(commands_res), commands_res)
else:
self.assertEqual(commands, commands_res, commands_res)
return result
def load_fixture(self, config_file):
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def load_eth_ifc_fixture(self):
config_file = 'mlnxos_l3_interface_show.cfg'
self.load_fixture(config_file)
def load_vlan_ifc_fixture(self):
config_file = 'mlnxos_l3_vlan_interface_show.cfg'
self.load_fixture(config_file)
def test_vlan_ifc_no_change(self):
set_module_args(dict(name='Vlan 1002', state='present',
ipv4='172.3.12.4/24'))
self.load_vlan_ifc_fixture()
self._execute_module(changed=False)
def test_vlan_ifc_remove(self):
set_module_args(dict(name='Vlan 1002', state='absent'))
commands = ['interface vlan 1002 no ip address']
self.load_vlan_ifc_fixture()
self._execute_module(changed=True, commands=commands)
def test_vlan_ifc_update(self):
set_module_args(dict(name='Vlan 1002', state='present',
ipv4='172.3.13.4/24'))
commands = ['interface vlan 1002 ip address 172.3.13.4/24']
self.load_vlan_ifc_fixture()
self._execute_module(changed=True, commands=commands)
def test_eth_ifc_no_change(self):
set_module_args(dict(name='Eth1/5', state='present',
ipv4='172.3.12.4/24'))
self.load_eth_ifc_fixture()
self._execute_module(changed=False)
def test_eth_ifc_remove(self):
set_module_args(dict(name='Eth1/5', state='absent'))
commands = ['interface ethernet 1/5 no ip address']
self.load_eth_ifc_fixture()
self._execute_module(changed=True, commands=commands)
def test_eth_ifc_update(self):
set_module_args(dict(name='Eth1/5', state='present',
ipv4='172.3.13.4/24'))
commands = ['interface ethernet 1/5 ip address 172.3.13.4/24']
self.load_eth_ifc_fixture()
self._execute_module(changed=True, commands=commands)
def test_eth_ifc_add_ip(self):
set_module_args(dict(name='Eth1/6', state='present',
ipv4='172.3.14.4/24'))
commands = ['interface ethernet 1/6 no switchport force',
'interface ethernet 1/6 ip address 172.3.14.4/24']
self.load_eth_ifc_fixture()
self._execute_module(changed=True, commands=commands)
| le9i0nx/ansible | test/units/modules/network/mlnxos/test_mlnxos_l3_interface.py | Python | gpl-3.0 | 4,186 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
from pylib import valgrind_tools
from pylib.base import base_test_result
from pylib.base import test_run
from pylib.base import test_collection
class LocalDeviceTestRun(test_run.TestRun):
def __init__(self, env, test_instance):
super(LocalDeviceTestRun, self).__init__(env, test_instance)
self._tools = {}
#override
def RunTests(self):
tests = self._GetTests()
def run_tests_on_device(dev, tests):
r = base_test_result.TestRunResults()
for test in tests:
result = self._RunTest(dev, test)
if isinstance(result, base_test_result.BaseTestResult):
r.AddResult(result)
elif isinstance(result, list):
r.AddResults(result)
else:
raise Exception('Unexpected result type: %s' % type(result).__name__)
if isinstance(tests, test_collection.TestCollection):
tests.test_completed()
return r
tries = 0
results = base_test_result.TestRunResults()
all_fail_results = {}
while tries < self._env.max_tries and tests:
logging.debug('try %d, will run %d tests:', tries, len(tests))
for t in tests:
logging.debug(' %s', t)
if self._ShouldShard():
tc = test_collection.TestCollection(self._CreateShards(tests))
try_results = self._env.parallel_devices.pMap(
run_tests_on_device, tc).pGet(None)
else:
try_results = self._env.parallel_devices.pMap(
run_tests_on_device, tests).pGet(None)
for try_result in try_results:
for result in try_result.GetAll():
if result.GetType() in (base_test_result.ResultType.PASS,
base_test_result.ResultType.SKIP):
results.AddResult(result)
else:
all_fail_results[result.GetName()] = result
results_names = set(r.GetName() for r in results.GetAll())
tests = [t for t in tests if self._GetTestName(t) not in results_names]
tries += 1
all_unknown_test_names = set(self._GetTestName(t) for t in tests)
all_failed_test_names = set(all_fail_results.iterkeys())
unknown_tests = all_unknown_test_names.difference(all_failed_test_names)
failed_tests = all_failed_test_names.intersection(all_unknown_test_names)
if unknown_tests:
results.AddResults(
base_test_result.BaseTestResult(
u, base_test_result.ResultType.UNKNOWN)
for u in unknown_tests)
if failed_tests:
results.AddResults(all_fail_results[f] for f in failed_tests)
return results
def GetTool(self, device):
if not str(device) in self._tools:
self._tools[str(device)] = valgrind_tools.CreateTool(
self._env.tool, device)
return self._tools[str(device)]
def _CreateShards(self, tests):
raise NotImplementedError
def _GetTestName(self, test):
return test
def _GetTests(self):
raise NotImplementedError
def _RunTest(self, device, test):
raise NotImplementedError
def _ShouldShard(self):
raise NotImplementedError
| guorendong/iridium-browser-ubuntu | build/android/pylib/local/device/local_device_test_run.py | Python | bsd-3-clause | 3,224 |
# The code for changing pages was derived from: http://stackoverflow.com/questions/7546050/switch-between-two-frames-in-tkinter
# License: http://creativecommons.org/licenses/by-sa/3.0/
import matplotlib
matplotlib.use("TkAgg")
#from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import tkinter as tk
from tkinter import ttk
LARGE_FONT= ("Verdana", 12)
class SeaofBTCapp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
#tk.Tk.iconbitmap(self, default="clienticon.ico")
tk.Tk.wm_title(self, "Sea of BTC client")
container = tk.Frame(self)
container.pack(side="top", fill="both", expand = True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
frame = StartPage(container, self)
self.frames[StartPage] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(StartPage)
def show_frame(self, cont):
frame = self.frames[cont]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self,parent)
label = tk.Label(self, text="Start Page", font=LARGE_FONT)
label.pack(pady=10,padx=10)
button = ttk.Button(self, text="Take Picture",
command=lambda: print("picture taken"))
button.pack()
button = ttk.Button(self, text="Turn Right",
command=lambda: print("turn right"))
button.pack()
button = ttk.Button(self, text="Turn Left",
command=lambda: print("turn left"))
button.pack()
app = SeaofBTCapp()
app.mainloop()
| AIAA-BOR-2017/ground-station | tkinter_gui_example.py | Python | mit | 1,877 |
"""
Copyright (C) 2013-2018 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
"""
import ruamel.yaml
from calliope.core.util.logging import log_time
from calliope import exceptions
from calliope.backend import checks
import numpy as np
import xarray as xr
import calliope.backend.pyomo.model as run_pyomo
import calliope.backend.pyomo.interface as pyomo_interface
def run(model_data, timings, build_only=False):
"""
Parameters
----------
model_data : xarray.Dataset
Pre-processed dataset of Calliope model data.
timings : dict
Stores timings of various stages of model processing.
build_only : bool, optional
If True, the backend only constructs its in-memory representation
of the problem rather than solving it. Used for debugging and
testing.
"""
BACKEND = {
'pyomo': run_pyomo
}
INTERFACE = {
'pyomo': pyomo_interface
}
run_backend = model_data.attrs['run.backend']
if model_data.attrs['run.mode'] == 'plan':
results, backend = run_plan(
model_data, timings,
backend=BACKEND[run_backend], build_only=build_only
)
elif model_data.attrs['run.mode'] == 'operate':
results, backend = run_operate(
model_data, timings,
backend=BACKEND[run_backend], build_only=build_only
)
return results, backend, INTERFACE[run_backend].BackendInterfaceMethods
def run_plan(model_data, timings, backend, build_only, backend_rerun=False):
log_time(timings, 'run_start', comment='Backend: starting model run')
if not backend_rerun:
backend_model = backend.generate_model(model_data)
log_time(
timings, 'run_backend_model_generated', time_since_start=True,
comment='Backend: model generated'
)
else:
backend_model = backend_rerun
solver = model_data.attrs['run.solver']
solver_io = model_data.attrs.get('run.solver_io', None)
solver_options = {
k.split('.')[-1]: v
for k, v in model_data.attrs.items() if '.solver_options.' in k
}
save_logs = model_data.attrs.get('run.save_logs', None)
if build_only:
results = xr.Dataset()
else:
log_time(
timings, 'run_solver_start',
comment='Backend: sending model to solver'
)
results = backend.solve_model(
backend_model, solver=solver,
solver_io=solver_io, solver_options=solver_options, save_logs=save_logs
)
log_time(
timings, 'run_solver_exit', time_since_start=True,
comment='Backend: solver finished running'
)
termination = backend.load_results(backend_model, results)
log_time(
timings, 'run_results_loaded',
comment='Backend: loaded results'
)
results = backend.get_result_array(backend_model, model_data)
results.attrs['termination_condition'] = termination
log_time(
timings, 'run_solution_returned', time_since_start=True,
comment='Backend: generated solution array'
)
return results, backend_model
def run_operate(model_data, timings, backend, build_only):
"""
For use when mode is 'operate', to allow the model to be built, edited, and
iteratively run within Pyomo.
"""
log_time(timings, 'run_start',
comment='Backend: starting model run in operational mode')
defaults = ruamel.yaml.load(model_data.attrs['defaults'], Loader=ruamel.yaml.Loader)
operate_params = ['purchased'] + [
i.replace('_max', '') for i in defaults if i[-4:] == '_max'
]
# Capacity results (from plan mode) can be used as the input to operate mode
if (any(model_data.filter_by_attrs(is_result=1).data_vars) and
model_data.attrs.get('run.operation.use_cap_results', False)):
# Anything with is_result = 1 will be ignored in the Pyomo model
for varname, varvals in model_data.data_vars.items():
if varname in operate_params:
varvals.attrs['is_result'] = 1
varvals.attrs['operate_param'] = 1
else:
cap_max = xr.merge([
v.rename(k.replace('_max', ''))
for k, v in model_data.data_vars.items() if '_max' in k
])
cap_equals = xr.merge([
v.rename(k.replace('_equals', ''))
for k, v in model_data.data_vars.items() if '_equals' in k
])
caps = cap_max.update(cap_equals)
for cap in caps.data_vars.values():
cap.attrs['is_result'] = 1
cap.attrs['operate_param'] = 1
model_data.update(caps)
# Storage initial is carried over between iterations, so must be defined along with storage
if ('loc_techs_store' in model_data.dims.keys() and
'storage_initial' not in model_data.data_vars.keys()):
model_data['storage_initial'] = (
xr.DataArray([0 for loc_tech in model_data.loc_techs_store.values],
dims='loc_techs_store')
)
model_data['storage_initial'].attrs['is_result'] = 0
exceptions.ModelWarning(
'Initial stored energy not defined, set to zero for all '
'loc::techs in loc_techs_store, for use in iterative optimisation'
)
# Operated units is carried over between iterations, so must be defined in a milp model
if ('loc_techs_milp' in model_data.dims.keys() and
'operated_units' not in model_data.data_vars.keys()):
model_data['operated_units'] = (
xr.DataArray([0 for loc_tech in model_data.loc_techs_milp.values],
dims='loc_techs_milp')
)
model_data['operated_units'].attrs['is_result'] = 1
model_data['operated_units'].attrs['operate_param'] = 1
exceptions.ModelWarning(
'daily operated units not defined, set to zero for all '
'loc::techs in loc_techs_milp, for use in iterative optimisation'
)
comments, warnings, errors = checks.check_operate_params(model_data)
exceptions.print_warnings_and_raise_errors(warnings=warnings, errors=errors)
# Initialize our variables
solver = model_data.attrs['run.solver']
solver_io = model_data.attrs.get('run.solver_io', None)
solver_options = model_data.attrs.get('run.solver_options', None)
save_logs = model_data.attrs.get('run.save_logs', None)
window = model_data.attrs['run.operation.window']
horizon = model_data.attrs['run.operation.horizon']
window_to_horizon = horizon - window
# get the cumulative sum of timestep resolution, to find where we hit our window and horizon
timestep_cumsum = model_data.timestep_resolution.cumsum('timesteps').to_pandas()
# get the timesteps at which we start and end our windows
window_ends = timestep_cumsum.where(
(timestep_cumsum % window == 0) | (timestep_cumsum == timestep_cumsum[-1])
)
window_starts = timestep_cumsum.where(
(~np.isnan(window_ends.shift(1))) | (timestep_cumsum == timestep_cumsum[0])
).dropna()
window_ends = window_ends.dropna()
horizon_ends = timestep_cumsum[timestep_cumsum.isin(window_ends.values + window_to_horizon)]
if not any(window_starts):
raise exceptions.ModelError(
'Not enough timesteps or incorrect timestep resolution to run in '
'operational mode with an optimisation window of {}'.format(window)
)
# We will only update timseries parameters
timeseries_data_vars = [
k for k, v in model_data.data_vars.items() if 'timesteps' in v.dims
and v.attrs['is_result'] == 0
]
# Loop through each window, solve over the horizon length, and add result to
# result_array we only go as far as the end of the last horizon, which may
# clip the last bit of data
result_array = []
# track whether each iteration finds an optimal solution or not
terminations = []
if build_only:
iterations = [0]
else:
iterations = range(len(window_starts))
for i in iterations:
start_timestep = window_starts.index[i]
# Build full model in first instance
if i == 0:
warmstart = False
end_timestep = horizon_ends.index[i]
timesteps = slice(start_timestep, end_timestep)
window_model_data = model_data.loc[dict(timesteps=timesteps)]
log_time(
timings, 'model_gen_1',
comment='Backend: generating initial model'
)
backend_model = backend.generate_model(window_model_data)
# Build the full model in the last instance(s),
# where number of timesteps is less than the horizon length
elif i > len(horizon_ends) - 1:
warmstart = False
end_timestep = window_ends.index[i]
timesteps = slice(start_timestep, end_timestep)
window_model_data = model_data.loc[dict(timesteps=timesteps)]
log_time(
timings, 'model_gen_{}'.format(i + 1),
comment=(
'Backend: iteration {}: generating new model for '
'end of timeseries, with horizon = {} timesteps'
.format(i + 1, window_ends[i] - window_starts[i])
)
)
backend_model = backend.generate_model(window_model_data)
# Update relevent Pyomo Params in intermediate instances
else:
warmstart = True
end_timestep = horizon_ends.index[i]
timesteps = slice(start_timestep, end_timestep)
window_model_data = model_data.loc[dict(timesteps=timesteps)]
log_time(
timings, 'model_gen_{}'.format(i + 1),
comment='Backend: iteration {}: updating model parameters'.format(i + 1)
)
# Pyomo model sees the same timestamps each time, we just change the
# values associated with those timestamps
for var in timeseries_data_vars:
# New values
var_series = window_model_data[var].to_series().dropna().replace('inf', np.inf)
# Same timestamps
var_series.index = backend_model.__calliope_model_data__['data'][var].keys()
var_dict = var_series.to_dict()
# Update pyomo Param with new dictionary
for k, v in getattr(backend_model, var).items():
if k in var_dict:
v.set_value(var_dict[k])
if not build_only:
log_time(
timings, 'model_run_{}'.format(i + 1), time_since_start=True,
comment='Backend: iteration {}: sending model to solver'.format(i + 1)
)
# After iteration 1, warmstart = True, which should speed up the process
# Note: Warmstart isn't possible with GLPK (dealt with later on)
_results = backend.solve_model(
backend_model, solver=solver, solver_io=solver_io,
solver_options=solver_options, save_logs=save_logs, warmstart=warmstart,
)
log_time(
timings, 'run_solver_exit_{}'.format(i + 1), time_since_start=True,
comment='Backend: iteration {}: solver finished running'.format(i + 1)
)
# xarray dataset is built for each iteration
_termination = backend.load_results(backend_model, _results)
terminations.append(_termination)
_results = backend.get_result_array(backend_model, model_data)
# We give back the actual timesteps for this iteration and take a slice
# equal to the window length
_results['timesteps'] = window_model_data.timesteps.copy()
# We always save the window data. Until the last window(s) this will crop
# the window_to_horizon timesteps. In the last window(s), optimistion will
# only be occurring over a window length anyway
_results = _results.loc[dict(timesteps=slice(None, window_ends.index[i]))]
result_array.append(_results)
# Set up initial storage for the next iteration
if 'loc_techs_store' in model_data.dims.keys():
storage_initial = _results.storage.loc[{'timesteps': window_ends.index[i]}].drop('timesteps')
model_data['storage_initial'].loc[storage_initial.coords] = storage_initial.values
for k, v in backend_model.storage_initial.items():
v.set_value(storage_initial.to_series().dropna().to_dict()[k])
# Set up total operated units for the next iteration
if 'loc_techs_milp' in model_data.dims.keys():
operated_units = _results.operating_units.sum('timesteps').astype(np.int)
model_data['operated_units'].loc[{}] += operated_units.values
for k, v in backend_model.operated_units.items():
v.set_value(operated_units.to_series().dropna().to_dict()[k])
log_time(
timings, 'run_solver_exit_{}'.format(i + 1), time_since_start=True,
comment='Backend: iteration {}: generated solution array'.format(i + 1)
)
if build_only:
results = xr.Dataset()
else:
# Concatenate results over the timestep dimension to get a single
# xarray Dataset of interest
results = xr.concat(result_array, dim='timesteps')
if all(i == 'optimal' for i in terminations):
results.attrs['termination_condition'] = 'optimal'
else:
results.attrs['termination_condition'] = ','.join(terminations)
log_time(
timings, 'run_solution_returned', time_since_start=True,
comment='Backend: generated full solution array'
)
return results, backend_model
| brynpickering/calliope | calliope/backend/run.py | Python | apache-2.0 | 14,098 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""API for using the tf.data service.
This module contains:
1. tf.data server implementations for running the tf.data service.
2. APIs for registering datasets with the tf.data service and reading from
the registered datasets.
The tf.data service provides the following benefits:
- Horizontal scaling of tf.data input pipeline processing to solve input
bottlenecks.
- Data coordination for distributed training. Coordinated reads
enable all replicas to train on similar-length examples across each global
training step, improving step times in synchronous training.
- Dynamic balancing of data across training replicas.
>>> dispatcher = tf.data.experimental.service.DispatchServer()
>>> dispatcher_address = dispatcher.target.split("://")[1]
>>> worker = tf.data.experimental.service.WorkerServer(
... tf.data.experimental.service.WorkerConfig(
... dispatcher_address=dispatcher_address))
>>> dataset = tf.data.Dataset.range(10)
>>> dataset = dataset.apply(tf.data.experimental.service.distribute(
... processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
... service=dispatcher.target))
>>> print(list(dataset.as_numpy_iterator()))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
## Setup
This section goes over how to set up the tf.data service.
### Run tf.data servers
The tf.data service consists of one dispatch server and `n` worker servers.
tf.data servers should be brought up alongside your training jobs, then brought
down when the jobs are finished.
Use `tf.data.experimental.service.DispatchServer` to start a dispatch server,
and `tf.data.experimental.service.WorkerServer` to start worker servers. Servers
can be run in the same process for testing purposes, or scaled up on separate
machines.
See https://github.com/tensorflow/ecosystem/tree/master/data_service for an
example of using Google Kubernetes Engine (GKE) to manage the tf.data service.
Note that the server implementation in
[tf_std_data_server.py](https://github.com/tensorflow/ecosystem/blob/master/data_service/tf_std_data_server.py)
is not GKE-specific, and can be used to run the tf.data service in other
contexts.
### Custom ops
If your dataset uses custom ops, these ops need to be made available to tf.data
servers by calling
[load_op_library](https://www.tensorflow.org/api_docs/python/tf/load_op_library)
from the dispatcher and worker processes at startup.
## Usage
Users interact with tf.data service by programmatically registering their
datasets with tf.data service, then creating datasets that read from the
registered datasets. The
[register_dataset](https://www.tensorflow.org/api_docs/python/tf/data/experimental/service/register_dataset)
function registers a dataset, then the
[from_dataset_id](https://www.tensorflow.org/api_docs/python/tf/data/experimental/service/from_dataset_id)
function creates a new dataset which reads from the registered dataset.
The
[distribute](https://www.tensorflow.org/api_docs/python/tf/data/experimental/service/distribute)
function wraps `register_dataset` and `from_dataset_id` into a single convenient
transformation which registers its input dataset and then reads from it.
`distribute` enables tf.data service to be used with a one-line code change.
However, it assumes that the dataset is created and consumed by the same entity
and this assumption might not always be valid or desirable. In particular, in
certain scenarios, such as distributed training, it might be desirable to
decouple the creation and consumption of the dataset (via `register_dataset`
and `from_dataset_id` respectively) to avoid having to create the dataset on
each of the training workers.
### Example
#### `distribute`
To use the `distribute` transformation, apply the transformation after the
prefix of your input pipeline that you would like to be executed using tf.data
service (typically at the end).
```
dataset = ... # Define your dataset here.
# Move dataset processing from the local machine to the tf.data service
dataset = dataset.apply(
tf.data.experimental.service.distribute(
processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
service=FLAGS.tf_data_service_address,
job_name="shared_job"))
# Any transformations added after `distribute` will be run on the local machine.
dataset = dataset.prefetch(1)
```
The above code will create a tf.data service "job", which iterates through the
dataset to generate data. To share the data from a job across multiple clients
(e.g. when using TPUStrategy or MultiWorkerMirroredStrategy), set a common
`job_name` across all clients.
#### `register_dataset` and `from_dataset_id`
`register_dataset` registers a dataset with the tf.data service, returning a
dataset id for the registered dataset. `from_dataset_id` creates a dataset that
reads from the registered dataset. These APIs can be used to reduce dataset
building time for distributed training. Instead of building the dataset on all
training workers, we can build the dataset just once and then register the
dataset using `register_dataset`. Then all workers can call `from_dataset_id`
without needing to build the dataset themselves.
```
dataset = ... # Define your dataset here.
dataset_id = tf.data.experimental.service.register_dataset(
service=FLAGS.tf_data_service_address,
dataset=dataset)
# Use `from_dataset_id` to create per-worker datasets.
per_worker_datasets = {}
for worker in workers:
per_worker_datasets[worker] = tf.data.experimental.service.from_dataset_id(
processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
service=FLAGS.tf_data_service_address,
dataset_id=dataset_id,
job_name="shared_job")
```
### Processing Modes
`processing_mode` specifies how to shard a dataset among tf.data service
workers. tf.data service supports `OFF`, `DYNAMIC`, `FILE`, `DATA`,
`FILE_OR_DATA`, `HINT` sharding policies.
OFF: No sharding will be performed. The entire input dataset will be processed
independently by each of the tf.data service workers. For this reason, it is
important to shuffle data (e.g. filenames) non-deterministically, so that each
worker will process the elements of the dataset in a different order. This mode
can be used to distribute datasets that aren't splittable.
If a worker is added or restarted during ShardingPolicy.OFF processing, the
worker will instantiate a new copy of the dataset and begin producing data from
the beginning.
#### Dynamic Sharding
DYNAMIC: In this mode, tf.data service divides the dataset into two components:
a source component that generates "splits" such as filenames, and a processing
component that takes splits and outputs dataset elements. The source component
is executed in a centralized fashion by the tf.data service dispatcher, which
generates different splits of input data. The processing component is executed
in a parallel fashion by the tf.data service workers, each operating on a
different set of input data splits.
For example, consider the following dataset:
```
dataset = tf.data.Dataset.from_tensor_slices(filenames)
dataset = dataset.interleave(TFRecordDataset)
dataset = dataset.map(preprocess_fn)
dataset = dataset.batch(batch_size)
dataset = dataset.apply(
tf.data.experimental.service.distribute(
processing_mode=tf.data.experimental.service.ShardingPolicy.DYNAMIC,
...))
```
The `from_tensor_slices` will be run on the dispatcher, while the `interleave`,
`map`, and `batch` will be run on tf.data service workers. The workers will pull
filenames from the dispatcher for processing. To process a dataset with
dynamic sharding, the dataset must have a splittable source, and all of
its transformations must be compatible with splitting. While most sources and
transformations support splitting, there are exceptions, such as custom datasets
which may not implement the splitting API. Please file a Github issue if you
would like to use distributed epoch processing for a currently unsupported
dataset source or transformation.
If no workers are restarted during training, dynamic sharding mode will visit
every example exactly once. If workers are restarted during training, the splits
they were processing will not be fully visited. The dispatcher maintains a
cursor through the dataset's splits. Assuming fault tolerance is enabled (See
"Fault Tolerance" below), the dispatcher will store cursor state in write-ahead
logs so that the cursor can be restored in case the dispatcher is restarted
mid-training. This provides an at-most-once visitation guarantee in the presence
of server restarts.
#### Static Sharding
The following are static sharding policies. The semantics are similar to
`tf.data.experimental.AutoShardPolicy`. These policies require:
* The tf.data service cluster is configured with a fixed list of workers
in DispatcherConfig.
* Each client only reads from the local tf.data service worker.
If a worker is restarted while performing static sharding, the worker will
begin processing its shard again from the beginning.
FILE: Shards by input files (i.e. each worker will get a fixed set of files to
process). When this option is selected, make sure that there is at least as
many files as workers. If there are fewer input files than workers, a runtime
error will be raised.
DATA: Shards by elements produced by the dataset. Each worker will process the
whole dataset and discard the portion that is not for itself. Note that for
this mode to correctly partition the dataset elements, the dataset needs to
produce elements in a deterministic order.
FILE_OR_DATA: Attempts FILE-based sharding, falling back to DATA-based
sharding on failure.
HINT: Looks for the presence of `shard(SHARD_HINT, ...)` which is treated as a
placeholder to replace with `shard(num_workers, worker_index)`.
For backwards compatibility, `processing_mode` may also be set to the strings
`"parallel_epochs"` or `"distributed_epoch"`, which are respectively equivalent
to `ShardingPolicy.OFF` and `ShardingPolicy.DYNAMIC`.
### Jobs
A tf.data service "job" refers to the process of reading from a dataset managed
by the tf.data service, using one or more data consumers. Jobs are created when
iterating over datasets that read from tf.data service. The data produced by a
job is determined by (1) dataset associated with the job and (2) the job's
processing mode. For example, if a job is created for the dataset
`Dataset.range(5)`, and the processing mode is `ShardingPolicy.OFF`, each
tf.data worker will produce the elements `{0, 1, 2, 3, 4}` for the job,
resulting in the
job producing `5 * num_workers` elements. If the processing mode is
`ShardingPolicy.DYNAMIC`, the job will only produce `5` elements.
One or more consumers can consume data from a job. By default, jobs are
"anonymous", meaning that only the consumer which created the job can read from
it. To share the output of a job across multiple consumers, you can set a common
`job_name`.
### Fault Tolerance
By default, the tf.data dispatch server stores its state in-memory, making it a
single point of failure during training. To avoid this, pass
`fault_tolerant_mode=True` when creating your `DispatchServer`. Dispatcher
fault tolerance requires `work_dir` to be configured and accessible from the
dispatcher both before and after restart (e.g. a GCS path). With fault tolerant
mode enabled, the dispatcher will journal its state to the work directory so
that no state is lost when the dispatcher is restarted.
WorkerServers may be freely restarted, added, or removed during training. At
startup, workers will register with the dispatcher and begin processing all
outstanding jobs from the beginning.
### Usage with tf.distribute
tf.distribute is the TensorFlow API for distributed training. There are
several ways to use tf.data with tf.distribute:
`strategy.experimental_distribute_dataset`,
`strategy.distribute_datasets_from_function`, and (for PSStrategy)
`coordinator.create_per_worker_dataset`. The following sections give code
examples for each.
In general we recommend using
`tf.data.experimental.service.{register_dataset,from_dataset_id}` over
`tf.data.experimental.service.distribute` for two reasons:
- The dataset only needs to be constructed and optimized once, instead of once
per worker. This can significantly reduce startup time, because the current
`experimental_distribute_dataset` and `distribute_datasets_from_function`
implementations create and optimize worker datasets sequentially.
- If a dataset depends on lookup tables or variables that are only present on
one host, the dataset needs to be registered from that host. Typically this
only happens when resources are placed on the chief or worker 0. Registering
the dataset from the chief will avoid issues with depending on remote
resources.
#### strategy.experimental_distribute_dataset
Nothing special is required when using
`strategy.experimental_distribute_dataset`, just apply `register_dataset` and
`from_dataset_id` as above, making sure to specify a `job_name` so that all
workers consume from the same tf.data service job.
```
dataset = ... # Define your dataset here.
dataset_id = tf.data.experimental.service.register_dataset(
service=FLAGS.tf_data_service_address,
dataset=dataset)
dataset = tf.data.experimental.service.from_dataset_id(
processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
service=FLAGS.tf_data_service_address,
dataset_id=dataset_id,
job_name="shared_job")
dataset = strategy.experimental_distribute_dataset(dataset)
```
#### strategy.distribute_datasets_from_function
First, make sure the dataset produced by the `dataset_fn` does not depend on the
`input_context` for the training worker on which it is run. Instead of each
worker building its own (sharded) dataset, one worker should register an
unsharded dataset, and the remaining workers should consume data from that
dataset.
```
dataset = dataset_fn()
dataset_id = tf.data.experimental.service.register_dataset(
service=FLAGS.tf_data_service_address,
dataset=dataset)
def new_dataset_fn(input_context):
del input_context
return tf.data.experimental.service.from_dataset_id(
processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
service=FLAGS.tf_data_service_address,
dataset_id=dataset_id,
job_name="shared_job")
dataset = strategy.distribute_datasets_from_function(new_dataset_fn)
```
#### coordinator.create_per_worker_dataset
`create_per_worker_dataset` works the same as
`distribute_datasets_from_function`.
```
dataset = dataset_fn()
dataset_id = tf.data.experimental.service.register_dataset(
service=FLAGS.tf_data_service_address,
dataset=dataset)
def new_dataset_fn(input_context):
del input_context
return tf.data.experimental.service.from_dataset_id(
processing_mode=tf.data.experimental.service.ShardingPolicy.OFF,
service=FLAGS.tf_data_service_address,
dataset_id=dataset_id,
job_name="shared_job")
dataset = coordinator.create_per_worker_dataset(new_dataset_fn)
```
## Limitations
- Python-based data processing: Datasets which use Python-based data processing
(e.g. `tf.py_function`, `tf.numpy_function`, or
`tf.data.Dataset.from_generator`) are currently not supported.
- Non-Serializable Resources: Datasets may only depend on TF resources that
support serialization. Serialization is currently supported for lookup
tables and variables. If your dataset depends on a TF resource that cannot be
serialized, please file a Github issue.
- Remote Resources: If a dataset depends on a resource, the dataset must be
registered from the same process that created the resource (e.g. the "chief"
job of ParameterServerStrategy).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops.data_service_ops import distribute
from tensorflow.python.data.experimental.ops.data_service_ops import from_dataset_id
from tensorflow.python.data.experimental.ops.data_service_ops import register_dataset
from tensorflow.python.data.experimental.ops.data_service_ops import ShardingPolicy
from tensorflow.python.data.experimental.service.server_lib import DispatcherConfig
from tensorflow.python.data.experimental.service.server_lib import DispatchServer
from tensorflow.python.data.experimental.service.server_lib import WorkerConfig
from tensorflow.python.data.experimental.service.server_lib import WorkerServer
| frreiss/tensorflow-fred | tensorflow/python/data/experimental/service/__init__.py | Python | apache-2.0 | 17,192 |
from thread import start_new_thread
from pyaudio import PyAudio
from pyspeech import best_speech_result, put_audio_data_in_queue
from time import sleep
import Queue
def background_stt(queue, profile, stt_type = 'google'):
start_new_thread(_spawn_listeners, (queue, profile, stt_type,))
def _spawn_listeners(queue, profile, stt_type):
p = PyAudio()
audio_data_queue = Queue.Queue()
start_new_thread(put_audio_data_in_queue, (p, audio_data_queue,))
while True:
_listen(p, queue, audio_data_queue, profile, stt_type)
def _listen(pyaudio, queue_out, queue_in, profile, stt_type):
output = best_speech_result(pyaudio, queue_in.get(), profile, stt_type)
if output != "":
queue_out.put(output)
if __name__ == "__main__":
import yaml
profile = yaml.load(open("profile.yml").read())
q = Queue.Queue()
background_stt(q, profile, 'att')
while True:
print(q.get())
| MattWis/constant_listener | constant_listener/constant_listener.py | Python | mit | 895 |
# ----------------------------------------------------------------------------
# Copyright (c) 2014--, biocore development team
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from cogent import DNA as DNA_cogent, LoadSeqs
from cogent.align.align import make_dna_scoring_dict, local_pairwise
def pair_hmm_align_unaligned_seqs(seqs, moltype=DNA_cogent, params={}):
"""
Checks parameters for pairwise alignment, returns alignment.
Code from Greg Caporaso.
"""
seqs = LoadSeqs(data=seqs, moltype=moltype, aligned=False)
try:
s1, s2 = seqs.values()
except ValueError:
raise ValueError(
"Pairwise aligning of seqs requires exactly two seqs.")
try:
gap_open = params['gap_open']
except KeyError:
gap_open = 5
try:
gap_extend = params['gap_extend']
except KeyError:
gap_extend = 2
try:
score_matrix = params['score_matrix']
except KeyError:
score_matrix = make_dna_scoring_dict(
match=1, transition=-1, transversion=-1)
return local_pairwise(s1, s2, score_matrix, gap_open, gap_extend)
| ekopylova/burrito-fillings | bfillings/align.py | Python | bsd-3-clause | 1,310 |
#!/usr/bin/env python3
from nltk.corpus import wordnet as wn
import sys, argparse, inflect
def explode_hyponyms(ss):
hs = ss.hyponyms()
l = []
if hs:
for h in hs:
l += explode_hyponyms(h)
return l
else:
return [ ss ]
def wordlist(synsets, plurals=False):
names = []
pl = inflect.engine()
for s in synsets:
name = s.lemmas()[0].name()
if plurals:
name = pl.plural(name)
name = name.replace('_', ' ')
names.append(name)
names = list(set(names))
return names
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--synset", type=str, default="animal", help="Synset to dump hyponyms of")
parser.add_argument("-l", "--list", action='store_true', help="List synsets")
parser.add_argument("-p", "--plurals", action='store_true', help="Generate plurals", default=False)
args = parser.parse_args()
synsets = []
if '.' in args.synset:
synset = wn.synset(args.synset)
if not synset:
print("Couldn't find synset with ID {}".format(args.synset));
sys.exit(-1)
synsets = [ synset ]
else:
synsets = wn.synsets(args.synset)
if not synsets:
print("Couldn't find synsets matching {}".format(args.synset));
sys.exit(-1)
if not synsets:
print("No synsets found for {}".format(args.synset))
sys.exit(-1)
if args.list:
print(synsets)
sys.exit(0)
sets = []
for ss in synsets:
sets += explode_hyponyms(ss)
words = wordlist(sets, args.plurals)
for word in words:
print(word)
| spikelynch/bots | amightyhost/hyponyms.py | Python | gpl-2.0 | 1,589 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ava.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| benhoff/ava | src/manage.py | Python | gpl-3.0 | 246 |
#!/usr/bin/env python
"""
Module containing all the player classes for the TicTacToe game. These should not be define directley and instead
only defined by the game classes
"""
import datetime
import logging
import socket
import pygame
import ai
class TTTPlayer(object):
"""
Base ttt player. All subclasses must have the game_piece as the first argument, in
order to preserve compatibility with the way that the TTTGame class is set up.
"""
def __init__(self, game_piece):
"""
Create the player
:param game_piece: 'x' or 'o'
:return: None
"""
if game_piece != 'x' and game_piece != 'o':
raise ValueError("Invalid game piece: {}. Game piece should be either 'x' or 'o'".format(game_piece))
else:
self.gp = game_piece
self.game = None
self.curCol = None
def setCursorCol(self):
"""
(Re)sets the cursor color to the board's color for this classes game piece
:return: The color the cursor was set to
"""
if self.gp == 'x':
self.curCol = self.game.board.xCol
elif self.gp == 'o':
self.curCol = self.game.board.oCol
return self.curCol
def setGame(self, game):
"""
Sets self.game to the given one
"""
self.game = game
self.setCursorCol()
def playPiece(self, pos, draw=True, color=None):
"""
Tells the board to put a piece down in pos
:param pos: (col, row)
:param draw: is this is false, then just the string representation of the board is updated
:param color: custom color to use for the piece.
:return: None
"""
if self.gp == 'x':
if draw:
self.game.board.drawX(pos, color)
else:
self.game.board.setPiece(pos, 'x')
elif self.gp == 'o':
if draw:
self.game.board.drawO(pos, color)
else:
self.game.board.setPiece(pos, 'o')
def getMove(self, timeout=60):
"""
'Abstract' getMove function. Should be overwritten.
:param timeout: how many seconds the player has to make a move.
:return: (col, row) of player's chosen move
"""
return "mymove"
class TTTHumanPlayer(TTTPlayer):
"""
Human TTT Player
"""
def __init__(self, game_piece, control_set="wasd"):
"""
Create the player
:param game_piece: 'x' or 'o'
:param control_set: can be 'wasd' or 'arrows'
:return: None
"""
super(TTTHumanPlayer, self).__init__(game_piece)
self.control_set = control_set
self.controls = []
self.values = []
self.keys = []
def setGame(self, game):
"""
Overwrites parent class' setGame method. Calls the parent's method and then self.setControls
"""
super(TTTHumanPlayer, self).setGame(game)
self.setControls()
def setControls(self):
"""
Setups the controls for the class. setGame must be called first in order for this to run correctly
"""
self.controls = []
self.values = [self.game.board.callable, self.game.board.mvCurUp, self.game.board.mvCurDown,
self.game.board.mvCurLeft, self.game.board.mvCurRight]
if self.control_set == "arrows":
self.keys = [pygame.K_RETURN, pygame.K_UP, pygame.K_DOWN, pygame.K_LEFT, pygame.K_RIGHT]
else:
self.keys = [pygame.K_SPACE, pygame.K_w, pygame.K_s, pygame.K_a, pygame.K_d]
for i in range(len(self.keys)):
self.controls.append([self.keys[i], self.values[i]])
def getMove(self, timeout=60):
"""
Gets a move from the player and returns the player's chosen position.
:param timeout: how many seconds the player has to make a move
:return: (col, row) of player's chosen move
"""
# need to track position, add a conditional in while loop to check for enter key
self.game.board.cursorShow = True
self.game.board.updateCursor(color=self.curCol)
start = datetime.datetime.now()
# this will run for sixty seconds, when the key found at self.controls[0][0] is not pressed and will only exit
# when the marker found at the cursor position is empty and when self.game.exit is True
while (datetime.datetime.now() - start).seconds < timeout and not \
(pygame.key.get_pressed()[self.controls[0][0]] and self.game.board.getPiece(
self.game.board.cursorPos) == " ") and self.game.exit is False:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.game.exit = True
elif event.type == pygame.KEYDOWN:
pressed = pygame.key.get_pressed()
for button, action in self.controls:
if pressed[button]:
action()
self.game.board.updateCursor(self.curCol)
pygame.display.flip()
self.game.clock.tick(self.game.fps)
else:
move = self.game.board.cursorPos
self.playPiece(move)
# if showing the cursor is not disabled then it will appear inbetween turns and after the game has ended
self.game.board.cursorShow = False
self.game.board.cursorPos = self.game.board.getEmptySpace()
if self.game.board.cursorPos is not None:
self.game.board.updateCursor()
pygame.display.flip()
self.game.clock.tick(self.game.fps)
return move
class TTTAiPlayer(TTTPlayer):
"""
A.I. TTT player.
"""
def __init__(self, game_piece, neural_net, default=False):
"""
Create the ai player
:param neural_net: Path to an exported neural net to be used as the brains of the A.I.
:param default: Load the default neural net that comes with this package found at data/ai_default.txt. If
this argument is True then the neural_net argument will be ignored
"""
super(TTTAiPlayer, self).__init__(game_piece)
if default:
self.neuralNet = ai.TTTNeuralNet.load(ai.DEFAULT_AI_PATH)
else:
self.neuralNet = ai.TTTNeuralNet.load(neural_net)
def getMove(self, timeout=60):
"""
Gets the necessary input from the board then calls the TTTNeuralNet's getMove function, passing said input
:param timeout: Added to match signature of method in parent class
"""
move = self.game.board.translateNumToPos(self.neuralNet.getMove(self.game.turn, self.game.board.sBoard))
self.playPiece(move)
self.game.board.cursorPos = self.game.board.getEmptySpace()
if self.game.board.cursorPos is not None:
self.game.board.updateCursor()
pygame.display.flip()
self.game.clock.tick(self.game.fps)
pygame.event.post(pygame.event.Event(pygame.USEREVENT, {})) # posting an event keeps the game loop going
return move
# this is here for when the multiplayer will be properly added
class _TTTLanHumanPlayer(TTTHumanPlayer):
"""
LAN human player for playing online with other people. Works by creating a TCP stream connection in a socket on port
54541. During the player's turn, this class will send cursor and move information to the other instance and
vice-versa. This is still a work in progress
"""
def __init__(self, game_piece, turn, remote_plr_addr, control_set="wasd", port=54541):
"""
Create the network player.
:param game_piece: 'x' or 'o'
:param turn: True if this instance goes first, false if the remote player goes first
:param remote_plr_addr: the remote player's address
:param control_set: 'wasd' or 'arrows'
:param port: default is 54541, but a custom one can be used
:return: None
"""
self.game = TTTGame()
self.game.player1 = None
self.game.player2 = None
super(TTTLanHumanPlayer, self).__init__(game_piece, self.game, control_set=control_set)
self.curCol = self.game.board.curCol
self.turn = turn
self.myPiece = game_piece
if self.myPiece == 'x':
self.oppPiece = 'o'
else:
self.oppPiece = 'x'
self.values = [self.game.board.callable, self.netMvCurUp, self.netMvCurDown, self.netMvCurLeft,
self.netMvCurRight]
for i in range(len(self.controls))[1:]:
self.controls[i][1] = self.values[i]
self.COMMLENGTH = 2
self.TURNOVER = "oo"
self.GAMEOVER = "1" # result of game (t, x, o)
self.MVCURRIGHT = "02"
self.MVCURLEFT = "03"
self.MVCURUP = "04"
self.MVCURDOWN = "05"
self.UPDATECUR = "07"
self.PLAYMOVE = "8" # + position of move
self.EXIT = "09"
self.commandKey = {}
self.port = port
self.myAddress = socket.gethostname()
self.socket = None
self.genSock()
self.success = "Success!"
self.remotePlayer = (remote_plr_addr, self.port)
self.remotePlayerConn = None # this will be used to send the remote player commands
# self.connectToRemotePlayer()
def main(self):
"""
Begin the game.
:return: None
"""
logging.info("Starting game")
self.game.board.initUI()
self.game.board.cursorPos = self.game.board.getEmptySpace()
self.game.board.cursorShow = True
self.game.board.updateCursor(self.curCol)
pygame.display.flip()
self.game.clock.tick(self.game.fps)
win = None
while self.game.exit is False:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.game.exit = True
if self.turn is True:
self.gp = self.myPiece
logging.info("It is my turn. Sending commands.")
self.getMove()
else:
self.gp = self.oppPiece
logging.info("It is the remote player's turn. Listening for commands.")
self.listenForCommands()
return win
def netMvCurUp(self):
"""
Moves the cursor up both on the remote player and locally.
:return: None
"""
self.sendCommand(self.MVCURUP)
self.game.board.mvCurUp()
def netMvCurDown(self):
"""
Moves the cursor down on both the remote player and locally.
:return: None
"""
self.sendCommand(self.MVCURDOWN)
self.game.board.mvCurDown()
def netMvCurLeft(self):
"""
Moves the cursor left on both the remote player and locally.
:return: None
"""
self.sendCommand(self.MVCURLEFT)
self.game.board.mvCurLeft()
def netMvCurRight(self):
"""
Moves the cursor right on both the remote player and locally.
:return: None
"""
self.sendCommand(self.MVCURRIGHT)
self.game.board.mvCurRight()
def executeCommand(self, command):
"""
Interprets the command sent from a remote player and executes the corresponding function.
:param command: command to interpret
:return: None
"""
if command == self.MVCURUP:
self.game.board.mvCurUp()
self.game.board.updateCursor()
elif command == self.MVCURDOWN:
self.game.board.mvCurDown()
self.game.board.updateCursor()
elif command == self.MVCURRIGHT:
self.game.board.mvCurRight()
self.game.board.updateCursor()
elif command == self.MVCURLEFT:
self.game.board.mvCurLeft()
self.game.board.updateCursor()
elif command == self.UPDATECUR:
self.game.board.updateCursor()
elif command[0] == self.PLAYMOVE:
move = self.intToCoord(command[1])
self.playPiece(move, draw=False)
self.game.board.cursorShow = False
self.game.board.cursorPos = self.game.board.getEmptySpace()
if self.game.board.cursorPos is not None:
self.game.board.updateCursor(color=self.curCol)
self.playPiece(move)
else:
self.playPiece(move)
elif command == self.EXIT:
self.game.exit = True
elif command[0] == self.GAMEOVER:
logging.info("The game has been reported over. The winner is {}".format(self.GAMEOVER[1]))
self.game.exit = True
self.disconnectRemotePlayer()
elif command == self.TURNOVER:
self.turn = True
pygame.display.flip()
self.game.clock.tick(self.game.fps)
def connectToRemotePlayer(self):
"""
Creates a connection with the remote player. How this is done is based on whether this player goes first or not.
If this player goes first, it will try to initiate a connection with the other player who is going to act as
a server. If the other player goes first, then this player will create a server and wait for a connection.
:return: None
"""
try:
if self.turn is True:
logging.info("It is this player's turn, therefore a connection with the other player will be "
"sought out.")
self._connectRemotePlayer()
self.remotePlayerConn = self.socket
else:
logging.info("It is the other player's turn, therefore a server will be set up to wait for "
"his/her connection.")
self._listenForRemotePlayer() # this will set self.remotePlayerConn automatically
except Exception, e:
logging.error("An error occurred while connecting to remote player: '{}'. Turn: '{}'".format(e, self.turn))
def genSock(self):
"""
Creates a new socket object sets it to self.socket
"""
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
pass
except Exception, e:
logging.error("Failure to bind socket to address '{}' on port {}: {}".format(self.myAddress, self.port, e),
exc_info=True)
def listenForCommands(self):
"""
Creates a server that listens for the other player to send commands.
:return: None
"""
logging.info("Starting to listen for commands")
self.game.board.cursorPos = self.game.board.getEmptySpace()
self.game.board.cursorShow = True
self.game.board.updateCursor(color=self.curCol)
while True:
data = self.remotePlayerConn.recv(self.COMMLENGTH)
logging.info("Received {}".format(data))
if data:
self.executeCommand(data)
if data == self.TURNOVER:
break
def _listenForRemotePlayer(self):
"""
Listens for a remote player connection and then sets self.remotePlayerConn to the returned connection
object
:return: None
"""
logging.info("Listening for player now")
self.socket.bind((self.myAddress, self.port))
self.socket.listen(1)
while True:
logging.info("Waiting for a connection from the remote player")
connection, client_address = self.socket.accept()
logging.info("Connection from: {}. Sending success message".format(client_address))
connection.sendall(self.success)
logging.info("Waiting 60 seconds for success message in return.")
amount_received = 0
amount_expected = len(self.success)
response_message = ""
start = datetime.datetime.now()
while amount_received < amount_expected and (datetime.datetime.now() - start).seconds <= 60:
data = connection.recv(len(self.success))
amount_received += len(data)
response_message += data
if response_message == self.success:
logging.info("Received success message from other player. Connection successful.")
self.remotePlayerConn = connection
return self.success
else:
raise Exception("Did not receive a success message in sixty seconds, instead got: '{}'".format(
response_message))
# NOTE, the return and raise statements will break the while loop
def _connectRemotePlayer(self):
"""
Creates a connection to the remote player to send commands. Tries to connect for sixty seconds every three
seconds
:return: None
"""
logging.info("Creating connection with remote player.")
self.socket.connect(self.remotePlayer)
logging.info("Waiting 60 seconds for a response")
start = datetime.datetime.now()
amount_received = 0
amount_expected = len(self.success)
response_message = ""
while amount_received < amount_expected and (datetime.datetime.now() - start).seconds <= 60:
data = self.socket.recv(len(self.success))
amount_received += len(data)
response_message += data
if response_message == self.success:
logging.info("Received '{}' in response to request for connection, sending "
"success message back".format(response_message))
self.socket.sendall(self.success)
return self.success
else:
raise Exception("Did not receive a success message in sixty seconds, instead got: '{}'".format(
response_message))
def disconnectRemotePlayer(self):
"""
Closes the connection to the remote player that was used to send commands.
:return: None
"""
logging.info("Closing command connection with remote player")
try:
self.socket.close()
self.socket.shutdown()
except Exception, e:
logging.warning("Failure to shut down socket: '{}'".format(e), exc_info=True)
def sendCommand(self, command):
"""
Creates a client that sends commands to the other player.
:return: None
"""
try:
logging.info("Sending command {}".format(command))
self.remotePlayerConn.sendall(command)
except Exception, e:
logging.warning("Failure to send command '{}': {}".format(command, e), exc_info=True)
@staticmethod
def coordToInt(coordinates):
"""
Converts the tuple of coordinates of the game board to their integer representation
:param coordinates: tuple of coordinates (col, row)
:return: int value representing position on board
"""
return coordinates[0] + (coordinates[1] - 1) * 3
@staticmethod
def intToCoord(intCoord):
"""
Converts the int representation of coordinates to the tuple representation
:param int: int coordinate to convert
:return: tuple coordinate
"""
intCoord = int(intCoord)
if intCoord > 6:
return intCoord - 6, 3
elif intCoord > 3:
return intCoord - 3, 2
else:
return intCoord, 1
def getMove(self, timeout=60):
"""
Gets a move from this player and sends all move information (such as cursor location) to the remote player.
:param timeout: Max time to get a move.
:return: None
"""
move = None # need to set to global status
try:
self.game.board.cursorPos = self.game.board.getEmptySpace()
self.game.board.cursorShow = True
self.game.board.updateCursor(color=self.curCol)
start = datetime.datetime.now()
done = False
while (datetime.datetime.now() - start).seconds < timeout and not (done and self.game.board.getPiece(
self.game.board.cursorPos) == " ") and self.game.exit is False:
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.game.exit = True
self.sendCommand(self.EXIT)
elif event.type == pygame.KEYDOWN:
pressed = pygame.key.get_pressed()
if pressed[self.controls[0][0]]:
done = True
else:
for button, action in self.controls:
if pressed[button]:
action()
self.game.board.updateCursor(self.curCol)
pygame.display.flip()
self.game.clock.tick(self.game.fps)
else:
move = self.game.board.cursorPos
self.sendCommand(self.PLAYMOVE + str(self.coordToInt(move)))
self.playPiece(move, draw=False)
self.game.board.cursorShow = False
self.game.board.cursorPos = self.game.board.getEmptySpace()
if self.game.board.cursorPos is not None:
self.game.board.updateCursor(color=self.curCol)
self.playPiece(move)
else:
self.playPiece(move)
pygame.display.flip()
self.game.clock.tick(self.game.fps)
finally:
logging.info("Move received and sent to remote player. Checking for a win.")
win = self.game.board.checkForWin(move)[0]
if win is not None:
logging.info("Game is over. Result: {}. Sending game over message to other player".format(win))
self.sendCommand(self.GAMEOVER + win)
logging.info("Message sent, closing connection and exiting")
self.disconnectRemotePlayer()
self.game.exit = True
else:
logging.info("The game is not over yet. Ending turn now.")
self.sendCommand(self.TURNOVER)
self.turn = False
| DevelopForLizardz/TicTacTio | tttio/players.py | Python | mit | 22,587 |
# Copyright 2015, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for the swift service."""
import os
from django import forms
from django.http import StreamingHttpResponse
from django.utils.http import urlunquote
from django.views.decorators.csrf import csrf_exempt
from django.views import generic
import six
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
from openstack_dashboard.api import swift
@urls.register
class Info(generic.View):
"""API for information about the Swift installation."""
url_regex = r'swift/info/$'
@rest_utils.ajax()
def get(self, request):
"""Get information about the Swift installation."""
capabilities = api.swift.swift_get_capabilities(request)
return {'info': capabilities}
@urls.register
class Containers(generic.View):
"""API for swift container listing for an account"""
url_regex = r'swift/containers/$'
@rest_utils.ajax()
def get(self, request):
"""Get the list of containers for this account
:param prefix: container name prefix value. Named items in the
response begin with this value
TODO(neillc): Add pagination
"""
prefix = request.GET.get('prefix', None)
if prefix:
containers, has_more = api.swift.\
swift_get_containers(request, prefix=prefix)
else:
containers, has_more = api.swift.swift_get_containers(request)
containers = [container.to_dict() for container in containers]
return {'items': containers, 'has_more': has_more}
@urls.register
class Container(generic.View):
"""API for swift container level information"""
url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/$'
@rest_utils.ajax()
def get(self, request, container):
"""Get the container details"""
return api.swift.swift_get_container(request, container).to_dict()
@rest_utils.ajax()
def post(self, request, container):
metadata = {}
if 'is_public' in request.DATA:
metadata['is_public'] = request.DATA['is_public']
# This will raise an exception if the container already exists
try:
api.swift.swift_create_container(request, container,
metadata=metadata)
except exceptions.AlreadyExists as e:
# 409 Conflict
return rest_utils.JSONResponse(str(e), 409)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s' % container,
)
@rest_utils.ajax()
def delete(self, request, container):
try:
api.swift.swift_delete_container(request, container)
except exceptions.Conflict as e:
# It cannot be deleted if it's not empty.
return rest_utils.JSONResponse(str(e), 409)
@rest_utils.ajax(data_required=True)
def put(self, request, container):
metadata = {'is_public': request.DATA['is_public']}
api.swift.swift_update_container(request, container, metadata=metadata)
@urls.register
class Objects(generic.View):
"""API for a list of swift objects"""
url_regex = r'swift/containers/(?P<container>[^/]+)/objects/$'
@rest_utils.ajax()
def get(self, request, container):
"""Get object information.
:param request:
:param container:
:return:
"""
path = request.GET.get('path')
if path is not None:
path = urlunquote(path)
objects = api.swift.swift_get_objects(
request,
container,
prefix=path
)
# filter out the folder from the listing if we're filtering for
# contents of a (pseudo) folder
contents = [{
'path': o.subdir if isinstance(o, swift.PseudoFolder) else o.name,
'name': o.name.split('/')[-1],
'bytes': o.bytes,
'is_subdir': isinstance(o, swift.PseudoFolder),
'is_object': not isinstance(o, swift.PseudoFolder),
'content_type': getattr(o, 'content_type', None)
} for o in objects[0] if o.name != path]
return {'items': contents}
class UploadObjectForm(forms.Form):
file = forms.FileField(required=False)
@urls.register
class Object(generic.View):
"""API for a single swift object or pseudo-folder"""
url_regex = r'swift/containers/(?P<container>[^/]+)/object/' \
'(?P<object_name>.+)$'
# note: not an AJAX request - the body will be raw file content
@csrf_exempt
def post(self, request, container, object_name):
"""Create or replace an object or pseudo-folder
:param request:
:param container:
:param object_name:
If the object_name (ie. POST path) ends in a '/' then a folder is
created, rather than an object. Any file content passed along with
the request will be ignored in that case.
POST parameter:
:param file: the file data for the upload.
:return:
"""
form = UploadObjectForm(request.POST, request.FILES)
if not form.is_valid():
raise rest_utils.AjaxError(500, 'Invalid request')
data = form.clean()
if object_name[-1] == '/':
result = api.swift.swift_create_pseudo_folder(
request,
container,
object_name
)
else:
result = api.swift.swift_upload_object(
request,
container,
object_name,
data['file']
)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s/object/%s' % (container, result.name)
)
@rest_utils.ajax()
def delete(self, request, container, object_name):
if object_name[-1] == '/':
try:
api.swift.swift_delete_folder(request, container, object_name)
except exceptions.Conflict as e:
# In case the given object is pseudo folder
# It cannot be deleted if it's not empty.
return rest_utils.JSONResponse(str(e), 409)
else:
api.swift.swift_delete_object(request, container, object_name)
def get(self, request, container, object_name):
"""Get the object contents."""
obj = api.swift.swift_get_object(
request,
container,
object_name
)
# Add the original file extension back on if it wasn't preserved in the
# name given to the object.
filename = object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1]
if not os.path.splitext(obj.name)[1] and obj.orig_name:
name, ext = os.path.splitext(obj.orig_name)
filename = "%s%s" % (filename, ext)
response = StreamingHttpResponse(obj.data)
safe = filename.replace(",", "")
if six.PY2:
safe = safe.encode('utf-8')
response['Content-Disposition'] = 'attachment; filename="%s"' % safe
response['Content-Type'] = 'application/octet-stream'
response['Content-Length'] = obj.bytes
return response
@urls.register
class ObjectMetadata(generic.View):
"""API for a single swift object"""
url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/' \
'(?P<object_name>.+)$'
@rest_utils.ajax()
def get(self, request, container, object_name):
return api.swift.swift_get_object(
request,
container_name=container,
object_name=object_name,
with_data=False
).to_dict()
@urls.register
class ObjectCopy(generic.View):
"""API to copy a swift object"""
url_regex = r'swift/containers/(?P<container>[^/]+)/copy/' \
'(?P<object_name>.+)$'
@rest_utils.ajax()
def post(self, request, container, object_name):
dest_container = request.DATA['dest_container']
dest_name = request.DATA['dest_name']
try:
result = api.swift.swift_copy_object(
request,
container,
object_name,
dest_container,
dest_name
)
except exceptions.AlreadyExists as e:
return rest_utils.JSONResponse(str(e), 409)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s/object/%s' % (dest_container,
result.name)
)
| BiznetGIO/horizon | openstack_dashboard/api/rest/swift.py | Python | apache-2.0 | 9,164 |
import typing
from typing import Any, Callable, List, Optional, Sequence
import jax
import jax.nn as jnn
import jax.random as jrandom
from ..custom_types import Array
from ..module import Module, static_field
from .linear import Linear
def _identity(x):
return x
if getattr(typing, "GENERATING_DOCUMENTATION", False):
def relu(_):
pass
jnn.relu = relu
_identity.__qualname__ = "identity" # Renamed for nicer documentation.
class MLP(Module):
"""Standard Multi-Layer Perceptron; also known as a feed-forward network."""
layers: List[Linear]
activation: Callable
final_activation: Callable
in_size: int = static_field()
out_size: int = static_field()
width_size: int = static_field()
depth: int = static_field()
def __init__(
self,
in_size: int,
out_size: int,
width_size: int,
depth: int,
activation: Callable = jnn.relu,
final_activation: Callable = _identity,
*,
key: "jax.random.PRNGKey",
**kwargs
):
"""**Arguments**:
- `in_size`: The size of the input layer.
- `out_size`: The size of the output layer.
- `width_size`: The size of each hidden layer.
- `depth`: The number of hidden layers.
- `activation`: The activation function after each hidden layer. Defaults to
ReLU.
- `final_activation`: The activation function after the output layer. Defaults
to the identity.
- `key`: A `jax.random.PRNGKey` used to provide randomness for parameter
initialisation. (Keyword only argument.)
"""
super().__init__(**kwargs)
keys = jrandom.split(key, depth + 1)
layers = []
if depth == 0:
layers.append(Linear(in_size, out_size, key=keys[0]))
else:
layers.append(Linear(in_size, width_size, key=keys[0]))
for i in range(depth - 1):
layers.append(Linear(width_size, width_size, key=keys[i + 1]))
layers.append(Linear(width_size, out_size, key=keys[-1]))
self.layers = layers
self.in_size = in_size
self.out_size = out_size
self.width_size = width_size
self.depth = depth
self.activation = activation
self.final_activation = final_activation
def __call__(
self, x: Array, *, key: Optional["jax.random.PRNGKey"] = None
) -> Array:
"""**Arguments:**
- `x`: A JAX array with shape `(in_size,)`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array with shape `(out_size,)`.
"""
for layer in self.layers[:-1]:
x = layer(x)
x = self.activation(x)
x = self.layers[-1](x)
x = self.final_activation(x)
return x
class Sequential(Module):
"""A sequence of [`equinox.Module`][]s applied in order."""
layers: Sequence[Module]
def __call__(self, x: Any, *, key: Optional["jax.random.PRNGKey"] = None) -> Any:
"""**Arguments:**
- `x`: Argument passed to the first member of the sequence.
- `key`: A `jax.random.PRNGKey`, which will be split and passed to every layer
to provide any desired randomness. (Optional. Keyword only argument.)
**Returns:**
The output of the last member of the sequence.
"""
if key is None:
keys = [None] * len(self.layers)
else:
keys = jrandom.split(key, len(self.layers))
for layer, key in zip(self.layers, keys):
x = layer(x, key=key)
return x
Sequential.__init__.__doc__ = """**Arguments:**
- `layers`: A sequence of [`equinox.Module`][]s.
"""
| patrick-kidger/equinox | equinox/nn/composed.py | Python | apache-2.0 | 3,841 |
# -*- coding: utf-8 -*-
# Copyright 2005 Michael Urman
# Copyright 2016 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import re
import struct
from mutagen._tags import Tags
from mutagen._util import DictProxy, convert_error, read_full
from mutagen._compat import PY3, itervalues, izip_longest
from ._util import (
BitPaddedInt,
unsynch,
ID3JunkFrameError,
ID3EncryptionUnsupportedError,
is_valid_frame_id,
error,
ID3NoHeaderError,
ID3UnsupportedVersionError,
ID3SaveConfig,
)
from ._frames import (
TDRC,
APIC,
TDOR,
TIME,
TIPL,
TORY,
TDAT,
Frames_2_2,
TextFrame,
TYER,
Frame,
IPLS,
Frames,
)
class ID3Header(object):
_V24 = (2, 4, 0)
_V23 = (2, 3, 0)
_V22 = (2, 2, 0)
_V11 = (1, 1)
f_unsynch = property(lambda s: bool(s._flags & 0x80))
f_extended = property(lambda s: bool(s._flags & 0x40))
f_experimental = property(lambda s: bool(s._flags & 0x20))
f_footer = property(lambda s: bool(s._flags & 0x10))
_known_frames = None
@property
def known_frames(self):
if self._known_frames is not None:
return self._known_frames
elif self.version >= ID3Header._V23:
return Frames
elif self.version >= ID3Header._V22:
return Frames_2_2
@convert_error(IOError, error)
def __init__(self, fileobj=None):
"""Raises ID3NoHeaderError, ID3UnsupportedVersionError or error"""
if fileobj is None:
# for testing
self._flags = 0
return
fn = getattr(fileobj, "name", "<unknown>")
data = fileobj.read(10)
if len(data) != 10:
raise ID3NoHeaderError("%s: too small" % fn)
id3, vmaj, vrev, flags, size = struct.unpack(">3sBBB4s", data)
self._flags = flags
self.size = BitPaddedInt(size) + 10
self.version = (2, vmaj, vrev)
if id3 != b"ID3":
raise ID3NoHeaderError("%r doesn't start with an ID3 tag" % fn)
if vmaj not in [2, 3, 4]:
raise ID3UnsupportedVersionError("%r ID3v2.%d not supported" % (fn, vmaj))
if not BitPaddedInt.has_valid_padding(size):
raise error("Header size not synchsafe")
if (self.version >= self._V24) and (flags & 0x0F):
raise error("%r has invalid flags %#02x" % (fn, flags))
elif (self._V23 <= self.version < self._V24) and (flags & 0x1F):
raise error("%r has invalid flags %#02x" % (fn, flags))
if self.f_extended:
extsize_data = read_full(fileobj, 4)
if PY3:
frame_id = extsize_data.decode("ascii", "replace")
else:
frame_id = extsize_data
if frame_id in Frames:
# Some tagger sets the extended header flag but
# doesn't write an extended header; in this case, the
# ID3 data follows immediately. Since no extended
# header is going to be long enough to actually match
# a frame, and if it's *not* a frame we're going to be
# completely lost anyway, this seems to be the most
# correct check.
# https://github.com/quodlibet/quodlibet/issues/126
self._flags ^= 0x40
extsize = 0
fileobj.seek(-4, 1)
elif self.version >= self._V24:
# "Where the 'Extended header size' is the size of the whole
# extended header, stored as a 32 bit synchsafe integer."
extsize = BitPaddedInt(extsize_data) - 4
if not BitPaddedInt.has_valid_padding(extsize_data):
raise error("Extended header size not synchsafe")
else:
# "Where the 'Extended header size', currently 6 or 10 bytes,
# excludes itself."
extsize = struct.unpack(">L", extsize_data)[0]
self._extdata = read_full(fileobj, extsize)
def determine_bpi(data, frames, EMPTY=b"\x00" * 10):
"""Takes id3v2.4 frame data and determines if ints or bitpaddedints
should be used for parsing. Needed because iTunes used to write
normal ints for frame sizes.
"""
# count number of tags found as BitPaddedInt and how far past
o = 0
asbpi = 0
while o < len(data) - 10:
part = data[o : o + 10]
if part == EMPTY:
bpioff = -((len(data) - o) % 10)
break
name, size, flags = struct.unpack(">4sLH", part)
size = BitPaddedInt(size)
o += 10 + size
if PY3:
try:
name = name.decode("ascii")
except UnicodeDecodeError:
continue
if name in frames:
asbpi += 1
else:
bpioff = o - len(data)
# count number of tags found as int and how far past
o = 0
asint = 0
while o < len(data) - 10:
part = data[o : o + 10]
if part == EMPTY:
intoff = -((len(data) - o) % 10)
break
name, size, flags = struct.unpack(">4sLH", part)
o += 10 + size
if PY3:
try:
name = name.decode("ascii")
except UnicodeDecodeError:
continue
if name in frames:
asint += 1
else:
intoff = o - len(data)
# if more tags as int, or equal and bpi is past and int is not
if asint > asbpi or (asint == asbpi and (bpioff >= 1 and intoff <= 1)):
return int
return BitPaddedInt
class ID3Tags(DictProxy, Tags):
__module__ = "mutagen.id3"
def __init__(self, *args, **kwargs):
self.unknown_frames = []
self._unknown_v2_version = 4
super(ID3Tags, self).__init__(*args, **kwargs)
def _read(self, header, data):
frames, unknown_frames, data = read_frames(header, data, header.known_frames)
for frame in frames:
self._add(frame, False)
self.unknown_frames = unknown_frames
self._unknown_v2_version = header.version[1]
return data
def _write(self, config):
# Sort frames by 'importance', then reverse frame size and then frame
# hash to get a stable result
order = ["TIT2", "TPE1", "TRCK", "TALB", "TPOS", "TDRC", "TCON"]
framedata = [(f, save_frame(f, config=config)) for f in itervalues(self)]
def get_prio(frame):
try:
return order.index(frame.FrameID)
except ValueError:
return len(order)
def sort_key(items):
frame, data = items
return (get_prio(frame), len(data), frame.HashKey)
framedata = [d for (f, d) in sorted(framedata, key=sort_key)]
# only write unknown frames if they were loaded from the version
# we are saving with. Theoretically we could upgrade frames
# but some frames can be nested like CHAP, so there is a chance
# we create a mixed frame mess.
if self._unknown_v2_version == config.v2_version:
framedata.extend(data for data in self.unknown_frames if len(data) > 10)
return bytearray().join(framedata)
def getall(self, key):
"""Return all frames with a given name (the list may be empty).
Args:
key (text): key for frames to get
This is best explained by examples::
id3.getall('TIT2') == [id3['TIT2']]
id3.getall('TTTT') == []
id3.getall('TXXX') == [TXXX(desc='woo', text='bar'),
TXXX(desc='baz', text='quuuux'), ...]
Since this is based on the frame's HashKey, which is
colon-separated, you can use it to do things like
``getall('COMM:MusicMatch')`` or ``getall('TXXX:QuodLibet:')``.
"""
if key in self:
return [self[key]]
else:
key = key + ":"
return [v for s, v in self.items() if s.startswith(key)]
def setall(self, key, values):
"""Delete frames of the given type and add frames in 'values'.
Args:
key (text): key for frames to delete
values (list[Frame]): frames to add
"""
self.delall(key)
for tag in values:
self[tag.HashKey] = tag
def delall(self, key):
"""Delete all tags of a given kind; see getall.
Args:
key (text): key for frames to delete
"""
if key in self:
del self[key]
else:
key = key + ":"
for k in list(self.keys()):
if k.startswith(key):
del self[k]
def pprint(self):
"""
Returns:
text: tags in a human-readable format.
"Human-readable" is used loosely here. The format is intended
to mirror that used for Vorbis or APEv2 output, e.g.
``TIT2=My Title``
However, ID3 frames can have multiple keys:
``POPM=user@example.org=3 128/255``
"""
frames = sorted(Frame.pprint(s) for s in self.values())
return "\n".join(frames)
def _add(self, frame, strict):
"""Add a frame.
Args:
frame (Frame): the frame to add
strict (bool): if this should raise in case it can't be added
and frames shouldn't be merged.
"""
if not isinstance(frame, Frame):
raise TypeError("%r not a Frame instance" % frame)
orig_frame = frame
frame = frame._upgrade_frame()
if frame is None:
if not strict:
return
raise TypeError("Can't upgrade %r frame" % type(orig_frame).__name__)
hash_key = frame.HashKey
if strict or hash_key not in self:
self[hash_key] = frame
return
# Try to merge frames, or change the new one. Since changing
# the new one can lead to new conflicts, try until everything is
# either merged or added.
while True:
old_frame = self[hash_key]
new_frame = old_frame._merge_frame(frame)
new_hash = new_frame.HashKey
if new_hash == hash_key:
self[hash_key] = new_frame
break
else:
assert new_frame is frame
if new_hash not in self:
self[new_hash] = new_frame
break
hash_key = new_hash
def loaded_frame(self, tag):
"""Deprecated; use the add method."""
self._add(tag, True)
def add(self, frame):
"""Add a frame to the tag."""
# add = loaded_frame (and vice versa) break applications that
# expect to be able to override loaded_frame (e.g. Quod Libet),
# as does making loaded_frame call add.
self.loaded_frame(frame)
def __setitem__(self, key, tag):
if not isinstance(tag, Frame):
raise TypeError("%r not a Frame instance" % tag)
super(ID3Tags, self).__setitem__(key, tag)
def __update_common(self):
"""Updates done by both v23 and v24 update"""
if "TCON" in self:
# Get rid of "(xx)Foobr" format.
self["TCON"].genres = self["TCON"].genres
mimes = {"PNG": "image/png", "JPG": "image/jpeg"}
for pic in self.getall("APIC"):
if pic.mime in mimes:
newpic = APIC(
encoding=pic.encoding,
mime=mimes[pic.mime],
type=pic.type,
desc=pic.desc,
data=pic.data,
)
self.add(newpic)
def update_to_v24(self):
"""Convert older tags into an ID3v2.4 tag.
This updates old ID3v2 frames to ID3v2.4 ones (e.g. TYER to
TDRC). If you intend to save tags, you must call this function
at some point; it is called by default when loading the tag.
"""
self.__update_common()
# TDAT, TYER, and TIME have been turned into TDRC.
timestamps = []
old_frames = [self.pop(n, []) for n in ["TYER", "TDAT", "TIME"]]
for y, d, t in izip_longest(*old_frames, fillvalue=u""):
ym = re.match(r"([0-9]+)\Z", y)
dm = re.match(r"([0-9]{2})([0-9]{2})\Z", d)
tm = re.match(r"([0-9]{2})([0-9]{2})\Z", t)
timestamp = ""
if ym:
timestamp += u"%s" % ym.groups()
if dm:
timestamp += u"-%s-%s" % dm.groups()[::-1]
if tm:
timestamp += u"T%s:%s:00" % tm.groups()
if timestamp:
timestamps.append(timestamp)
if timestamps and "TDRC" not in self:
self.add(TDRC(encoding=0, text=timestamps))
# TORY can be the first part of a TDOR.
if "TORY" in self:
f = self.pop("TORY")
if "TDOR" not in self:
try:
self.add(TDOR(encoding=0, text=str(f)))
except UnicodeDecodeError:
pass
# IPLS is now TIPL.
if "IPLS" in self:
f = self.pop("IPLS")
if "TIPL" not in self:
self.add(TIPL(encoding=f.encoding, people=f.people))
# These can't be trivially translated to any ID3v2.4 tags, or
# should have been removed already.
for key in ["RVAD", "EQUA", "TRDA", "TSIZ", "TDAT", "TIME"]:
if key in self:
del self[key]
# Recurse into chapters
for f in self.getall("CHAP"):
f.sub_frames.update_to_v24()
for f in self.getall("CTOC"):
f.sub_frames.update_to_v24()
def update_to_v23(self):
"""Convert older (and newer) tags into an ID3v2.3 tag.
This updates incompatible ID3v2 frames to ID3v2.3 ones. If you
intend to save tags as ID3v2.3, you must call this function
at some point.
If you want to to go off spec and include some v2.4 frames
in v2.3, remove them before calling this and add them back afterwards.
"""
self.__update_common()
# TMCL, TIPL -> TIPL
if "TIPL" in self or "TMCL" in self:
people = []
if "TIPL" in self:
f = self.pop("TIPL")
people.extend(f.people)
if "TMCL" in self:
f = self.pop("TMCL")
people.extend(f.people)
if "IPLS" not in self:
self.add(IPLS(encoding=f.encoding, people=people))
# TDOR -> TORY
if "TDOR" in self:
f = self.pop("TDOR")
if f.text:
d = f.text[0]
if d.year and "TORY" not in self:
self.add(TORY(encoding=f.encoding, text="%04d" % d.year))
# TDRC -> TYER, TDAT, TIME
if "TDRC" in self:
f = self.pop("TDRC")
if f.text:
d = f.text[0]
if d.year and "TYER" not in self:
self.add(TYER(encoding=f.encoding, text="%04d" % d.year))
if d.month and d.day and "TDAT" not in self:
self.add(
TDAT(encoding=f.encoding, text="%02d%02d" % (d.day, d.month))
)
if d.hour and d.minute and "TIME" not in self:
self.add(
TIME(encoding=f.encoding, text="%02d%02d" % (d.hour, d.minute))
)
# New frames added in v2.4
v24_frames = [
"ASPI",
"EQU2",
"RVA2",
"SEEK",
"SIGN",
"TDEN",
"TDOR",
"TDRC",
"TDRL",
"TDTG",
"TIPL",
"TMCL",
"TMOO",
"TPRO",
"TSOA",
"TSOP",
"TSOT",
"TSST",
]
for key in v24_frames:
if key in self:
del self[key]
# Recurse into chapters
for f in self.getall("CHAP"):
f.sub_frames.update_to_v23()
for f in self.getall("CTOC"):
f.sub_frames.update_to_v23()
def _copy(self):
"""Creates a shallow copy of all tags"""
items = self.items()
subs = {}
for f in self.getall("CHAP") + self.getall("CTOC"):
subs[f.HashKey] = f.sub_frames._copy()
return (items, subs)
def _restore(self, value):
"""Restores the state copied with _copy()"""
items, subs = value
self.clear()
for key, value in items:
self[key] = value
if key in subs:
value.sub_frames._restore(subs[key])
def save_frame(frame, name=None, config=None):
if config is None:
config = ID3SaveConfig()
flags = 0
if isinstance(frame, TextFrame):
if len(str(frame)) == 0:
return b""
framedata = frame._writeData(config)
usize = len(framedata)
if usize > 2048:
# Disabled as this causes iTunes and other programs
# to fail to find these frames, which usually includes
# e.g. APIC.
# framedata = BitPaddedInt.to_str(usize) + framedata.encode('zlib')
# flags |= Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN
pass
if config.v2_version == 4:
bits = 7
elif config.v2_version == 3:
bits = 8
else:
raise ValueError
datasize = BitPaddedInt.to_str(len(framedata), width=4, bits=bits)
if name is not None:
assert isinstance(name, bytes)
frame_name = name
else:
frame_name = type(frame).__name__
if PY3:
frame_name = frame_name.encode("ascii")
header = struct.pack(">4s4sH", frame_name, datasize, flags)
return header + framedata
def read_frames(id3, data, frames):
"""Does not error out"""
assert id3.version >= ID3Header._V22
result = []
unsupported_frames = []
if id3.version < ID3Header._V24 and id3.f_unsynch:
try:
data = unsynch.decode(data)
except ValueError:
pass
if id3.version >= ID3Header._V23:
if id3.version < ID3Header._V24:
bpi = int
else:
bpi = determine_bpi(data, frames)
while data:
header = data[:10]
try:
name, size, flags = struct.unpack(">4sLH", header)
except struct.error:
break # not enough header
if name.strip(b"\x00") == b"":
break
size = bpi(size)
framedata = data[10 : 10 + size]
data = data[10 + size :]
if size == 0:
continue # drop empty frames
if PY3:
try:
name = name.decode("ascii")
except UnicodeDecodeError:
continue
try:
# someone writes 2.3 frames with 2.2 names
if name[-1] == "\x00":
tag = Frames_2_2[name[:-1]]
name = tag.__base__.__name__
tag = frames[name]
except KeyError:
if is_valid_frame_id(name):
unsupported_frames.append(header + framedata)
else:
try:
result.append(tag._fromData(id3, flags, framedata))
except NotImplementedError:
unsupported_frames.append(header + framedata)
except ID3JunkFrameError:
pass
elif id3.version >= ID3Header._V22:
while data:
header = data[0:6]
try:
name, size = struct.unpack(">3s3s", header)
except struct.error:
break # not enough header
size, = struct.unpack(">L", b"\x00" + size)
if name.strip(b"\x00") == b"":
break
framedata = data[6 : 6 + size]
data = data[6 + size :]
if size == 0:
continue # drop empty frames
if PY3:
try:
name = name.decode("ascii")
except UnicodeDecodeError:
continue
try:
tag = frames[name]
except KeyError:
if is_valid_frame_id(name):
unsupported_frames.append(header + framedata)
else:
try:
result.append(tag._fromData(id3, 0, framedata))
except (ID3EncryptionUnsupportedError, NotImplementedError):
unsupported_frames.append(header + framedata)
except ID3JunkFrameError:
pass
return result, unsupported_frames, data
| hzlf/openbroadcast.org | website/tools/mutagen/id3/_tags.py | Python | gpl-3.0 | 21,234 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pygtk
pygtk.require('2.0')
import gtk
import json
from collections import namedtuple
from array import *
import pango
import random
from gettext import gettext as _
import copy
''' Scales '''
IMAGES_SCALE = [100, 100]
LETTERS_SCALE = [100, 100]
'''Color Selection association
Reference of colours codes :http://www.rapidtables.com/web/color/RGB_Color.htm
'''
COLOURS_ASSOCIATION = []
COLOURS_ASSOCIATION.append({"colour":"#0074DF", "available":True})
COLOURS_ASSOCIATION.append({"colour":"#FF1F68", "available":True})
COLOURS_ASSOCIATION.append({"colour":"#D9E021", "available":True})
COLOURS_ASSOCIATION.append({"colour":"#6FC72B", "available":True})
COLOURS_ASSOCIATION.append({"colour":"#F1C001", "available":True})
EVENTBOX_SCALE = [100,100]
'''Curren item selection association'''
SELECTED_COLOUR = gtk.gdk.Color("#FFFF00")
FONT_DESCRIPTION_BIG = 'DejaVu Bold 30'
FONT_DESCRIPTION_MEDIUM = 'DejaVu Bold 15'
class SimpleAssociation():
def saveExerciseState(self):
self.mainWindows.getLogger().debug("Inside to saveExerciseState")
stateJson = {}
stateJson['optionsSelectionState'] = self.optionsSelectionState
stateJson['correspondencesSelectionState'] = self.correspondencesSelectionState
stateJson['currentOptionSelected'] = self.currentOptionSelected
stateJson['lastOptionSelected'] = self.lastOptionSelected
stateJson['currentCorrespondenceSelected'] = self.currentCorrespondenceSelected
stateJson['lastCorrespondenceSelected'] = self.lastCorrespondenceSelected
stateJson['optionsList'] = self.optionsList
stateJson['correspondencesList'] = self.correspondencesList
stateJson['COLOURS_ASSOCIATION'] = self.COLOURS_ASSOCIATION
stateJson['exerciseCompleted'] = self.exerciseCompleted
return stateJson
def disconnectEventBoxs(self):
for index, eventBox in enumerate(self.allEventBoxs):
eventBox.disconnect(self.idHandlers[index])
def getWindow(self, exercise, mainWindows, stateJson):
self.mainWindows = mainWindows
windowSimpleAssociation = gtk.ScrolledWindow()
windowSimpleAssociation.exerciseInstance = self
label = gtk.Label(exercise.name)
label.modify_font(pango.FontDescription("Sans 10"))
vBoxWindows = gtk.VBox(False, 5)
hBoxExercises = gtk.HBox(False, 5)
self.vBoxOptions = gtk.VBox(False, 5)
self.vBoxOptions.set_border_width(10)
self.vBoxCorrespondences = gtk.VBox(False, 5)
self.vBoxCorrespondences.set_border_width(10)
frameExercises = gtk.Frame()
frameExercises.add(hBoxExercises)
self.idHandlers = []
self.allEventBoxs = []
self.exerciseCompleted = False
if stateJson is None:
self.optionsSelectionState = []
self.correspondencesSelectionState = []
self.currentOptionSelected = -1
self.lastOptionSelected = -1
self.currentCorrespondenceSelected = -1
self.lastCorrespondenceSelected = -1
self.optionsList, self.correspondencesList = self.disorderCorrespondences(exercise.items)
self.COLOURS_ASSOCIATION = COLOURS_ASSOCIATION
else:
self.optionsSelectionState = stateJson['optionsSelectionState']
self.correspondencesSelectionState = stateJson['correspondencesSelectionState']
self.currentOptionSelected = stateJson['currentOptionSelected']
self.lastOptionSelected = stateJson['lastOptionSelected']
self.currentCorrespondenceSelected = stateJson['currentCorrespondenceSelected']
self.lastCorrespondenceSelected = stateJson['lastCorrespondenceSelected']
self.optionsList = stateJson['optionsList']
self.correspondencesList = stateJson['correspondencesList']
self.COLOURS_ASSOCIATION = stateJson['COLOURS_ASSOCIATION']
self.exerciseCompleted = stateJson['exerciseCompleted']
self.mainWindows.getLogger().debug( self.COLOURS_ASSOCIATION )
firstOptionEventBox = None
frameVBoxOptions = gtk.Frame()
frameVBoxOptions.set_border_width(10)
#dark orange
frameVBoxOptions.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color("#FF8C00"))
frameVBoxCorrespondences = gtk.Frame()
frameVBoxCorrespondences.set_border_width(10)
#dark slate blue
frameVBoxCorrespondences.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color('#483D8B'))
for index, option in enumerate(self.optionsList):
'''Options'''
self.mainWindows.getLogger().debug(option)
eventBoxOption = self.createEventBox(option['option']['value'], option['option']['type'])
if not self.exerciseCompleted:
idHandler = eventBoxOption.connect("button-press-event", self.imageSelectedCallBack, self.vBoxCorrespondences)
self.allEventBoxs.append(eventBoxOption)
self.idHandlers.append(idHandler)
self.addEventBoxToVBox(eventBoxOption, self.vBoxOptions)
if index == 0:
firstOptionEventBox = eventBoxOption
if stateJson is None:
self.optionsSelectionState.append ( {"selected": -1, "pair": option['indexPair'], "colour": None} )
'''Correspondences'''
eventBoxCorrespondence = ( self.createEventBox(self.correspondencesList[index]['correspondence']['value'],
self.correspondencesList[index]['correspondence']['type']) )
if not self.exerciseCompleted:
idHandler = eventBoxCorrespondence.connect("button_press_event", self.pairSelectedCallBack, self.vBoxOptions)
self.allEventBoxs.append(eventBoxCorrespondence)
self.idHandlers.append(idHandler)
self.addEventBoxToVBox(eventBoxCorrespondence, self.vBoxCorrespondences)
if stateJson is None:
( self.correspondencesSelectionState.append( {"selected": -1,
"pair":self.correspondencesList[index]['indexPair'], "colour": None} ) )
frameVBoxOptions.add(self.vBoxOptions)
frameVBoxCorrespondences.add(self.vBoxCorrespondences)
hBoxExercises.pack_start(frameVBoxOptions, True,True,5)
hBoxExercises.pack_start(frameVBoxCorrespondences, True,True,50)
vBoxWindows.pack_start(frameExercises, True,True,0)
windowSimpleAssociation.add_with_viewport(vBoxWindows)
if stateJson is not None:
self.repaintResumeItems()
else:
self.setAllAvailableSelectionColour()
self.selectFirtImage(firstOptionEventBox)
return windowSimpleAssociation
def repaintResumeItems(self):
for index, value in enumerate(self.optionsSelectionState):
eventBoxOption = self.vBoxOptions.get_children()[index].get_children()[0]
eventBoxCorrespondence = self.vBoxCorrespondences.get_children()[index].get_children()[0]
if value['colour'] is not None:
self.mainWindows.getLogger().debug(value)
self.changeBackgroundColour(eventBoxOption,str(value['colour']['colour']))
valueCorresondence = self.correspondencesSelectionState[index]
self.mainWindows.getLogger().debug(valueCorresondence)
if valueCorresondence['colour'] is not None:
self.changeBackgroundColour(eventBoxCorrespondence, str(valueCorresondence['colour']['colour']))
firstFrameOption = self.vBoxOptions.get_children()[self.currentOptionSelected]
self.fakeSelection(firstFrameOption)
def addEventBoxToVBox(self, eventBox, vBox):
frameEventBox = gtk.EventBox()
frameEventBox.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color("white"))
eventBox.set_border_width(5)
frameEventBox.add(eventBox)
vBox.pack_start(frameEventBox, False,False,0)
def createEventBox(self, payload, typePayload):
eventBox = gtk.EventBox()
eventBox.set_size_request(EVENTBOX_SCALE[0], EVENTBOX_SCALE[1])
if typePayload == "image":
imageContainer = gtk.Image()
pixbuf = gtk.gdk.pixbuf_new_from_file(payload).scale_simple(IMAGES_SCALE[0], IMAGES_SCALE[1], 2)
imageContainer.set_from_pixbuf(pixbuf)
eventBox.add(imageContainer)
eventBox.modify_bg(gtk.STATE_NORMAL, eventBox.get_colormap().alloc_color('white'))
if typePayload == "letter":
letterLabel = gtk.Label(payload)
if len(payload) <= 8:
letterLabel.modify_font(pango.FontDescription(FONT_DESCRIPTION_BIG))
else:
letterLabel.modify_font(pango.FontDescription(FONT_DESCRIPTION_MEDIUM))
eventBox.add(letterLabel)
eventBox.modify_bg(gtk.STATE_NORMAL, eventBox.get_colormap().alloc_color('white'))
return eventBox
def selectFirtImage(self, firstEvenBox):
availableColour = self.getAvailableSelectionColour()
self.changeBackgroundColour(firstEvenBox, availableColour['colour'])
self.setSelectionStateColour(self.optionsSelectionState, 0, availableColour)
self.currentOptionSelected = 0
frameImageSelected = firstEvenBox.get_parent()
self.fakeSelection(frameImageSelected)
def disorderCorrespondences(self, items):
self.mainWindows.getLogger().debug("Inside to disorderCorrespondences")
optionsList = [None]*len(items)
correspondencesList = [None]*len(items)
indexsList = range(len(items))
originalList = copy.copy(indexsList)
self.mainWindows.getLogger().debug(originalList)
self.mainWindows.getLogger().debug(indexsList)
random.shuffle(indexsList)
while( originalList == indexsList and len(items) > 1):
self.mainWindows.getLogger().debug("Inside to while...")
random.shuffle(indexsList)
self.mainWindows.getLogger().debug(originalList)
self.mainWindows.getLogger().debug(indexsList)
for index, item in enumerate(items):
optionsList[index] = {"option":{"type":item.option.type, "value":item.option.value}, \
"indexPair": indexsList[index]}
correspondencesList[indexsList[index]] = ( {"correspondence":{"type":item.correspondence.type,
"value":item.correspondence.value}, "indexPair": index} )
return (optionsList, correspondencesList)
def checkCompletedExercise(self):
result = True
for index,imageSelectionState in enumerate( self.optionsSelectionState ):
if (imageSelectionState['selected'] != imageSelectionState['pair']) or \
(self.correspondencesSelectionState[index]['selected'] != self.correspondencesSelectionState[index]['pair']) :
result = False
break
if result:
self.exerciseCompleted = True
self.mainWindows.exerciseCompletedCallBack()
def setAllAvailableSelectionColour(self):
for colour in self.COLOURS_ASSOCIATION:
colour['available'] = True
def getAvailableSelectionColour(self):
response = None
for colour in self.COLOURS_ASSOCIATION:
if colour['available']:
response = colour
break
return response
def setAvailableColour(self, colour):
for currentColour in self.COLOURS_ASSOCIATION:
if currentColour['colour'] == colour['colour']:
currentColour['available'] = True
break
def setUnavailableColour(self, colour):
for currentColour in self.COLOURS_ASSOCIATION:
if currentColour['colour'] == colour['colour']:
currentColour['available'] = False
break
def imageSelectedCallBack(self, imageEventBox, *args):
frameImageSelected = imageEventBox.get_parent()
vBoxImages = imageEventBox.get_parent().get_parent()
allImagesFrames = vBoxImages.get_children()
indexImageSelected = vBoxImages.child_get_property(frameImageSelected, "position")
self.lastOptionSelected = self.currentOptionSelected
self.currentOptionSelected = indexImageSelected
vBoxPairs = args[1]
'''Se des-selecciona el par selecciondo previamente'''
if self.currentCorrespondenceSelected != -1:
framePairSelected = vBoxPairs.get_children()[self.currentCorrespondenceSelected]
self.fakeUnselection(framePairSelected)
# Revisamos si la ultima imagen seleccionada no fue asociada
if self.lastOptionSelected != -1 and self.optionsSelectionState[self.lastOptionSelected]['selected'] == -1:
# No se ha asociado nada, volvemos a a poner a blanco el bg colour
lastImageEvenBoxSelected = allImagesFrames[self.lastOptionSelected].get_children()[0]
self.changeBackgroundColour(lastImageEvenBoxSelected, "white")
self.setSelectionStateColour(self.optionsSelectionState, self.lastOptionSelected, None)
# Revisamos si ya existe una asociacion'''
if self.optionsSelectionState[indexImageSelected]['selected'] == -1:
# Aun no existe una asociación
colorAvailable = self.getAvailableSelectionColour()
self.changeBackgroundColour(imageEventBox, colorAvailable['colour'])
self.setSelectionStateColour(self.optionsSelectionState, indexImageSelected, colorAvailable)
#cambiamos los colores de los bordes (frames) para notificar la seleccion
lastFrameImageSelected = allImagesFrames[self.lastOptionSelected]
self.fakeUnselection(lastFrameImageSelected)
self.fakeSelection(frameImageSelected)
#Comprabamos la finalización del ejercicio
self.checkCompletedExercise()
def pairSelectedCallBack(self, pairEventBox, *args):
vBoxImages = args[1]
allFramesImages = vBoxImages.get_children()
framePairSelected = pairEventBox.get_parent()
vBoxPairs = framePairSelected.get_parent()
allPairFrames = vBoxPairs.get_children()
indexPairSelected = vBoxPairs.child_get_property(framePairSelected, "position")
self.lastCorrespondenceSelected = self.currentCorrespondenceSelected
self.currentCorrespondenceSelected = indexPairSelected
lastPairSelectionState = None
self.mainWindows.getLogger().debug( self.correspondencesSelectionState )
if self.lastCorrespondenceSelected != -1:
lastPairSelectionState = self.correspondencesSelectionState[self.lastCorrespondenceSelected]
pairIndexCurrentImageSelected = -1
imageEventBoxCurremtSelected = None
if self.currentOptionSelected != -1:
pairIndexCurrentImageSelected = self.optionsSelectionState[self.currentOptionSelected]['selected']
imageEventBoxCurremtSelected = self.optionsSelectionState[self.currentOptionSelected]['colour']
pairEventBoxCurrentImageSelected = None
if self.currentOptionSelected != -1 and pairIndexCurrentImageSelected != -1:
pairEventBoxCurrentImageSelected = allPairFrames[self.optionsSelectionState[self.currentOptionSelected]['selected']].get_children()[0]
# Verificamos que el ultimo par seleccionado no tenga una asocicion
if self.lastCorrespondenceSelected != -1 and lastPairSelectionState['selected'] == -1:
lastPairEventBoxSelected = allPairFrames[self.lastCorrespondenceSelected].get_children()[0]
self.changeBackgroundColour(lastPairEventBoxSelected, "white")
self.mainWindows.getLogger().debug(lastPairSelectionState)
self.setAvailableColour(lastPairSelectionState['colour'])
self.setSelectionStateColour(self.correspondencesSelectionState, self.lastCorrespondenceSelected, None)
#Comprobamos si hay alguna imagen seleccionada
if self.currentOptionSelected != -1:
#usamos el color de la imagen seleccionada como bg
colourImageSelected = self.optionsSelectionState[self.currentOptionSelected]['colour']
self.changeBackgroundColour(pairEventBox, colourImageSelected['colour'])
self.setSelectionStateColour(self.correspondencesSelectionState, indexPairSelected, colourImageSelected )
#La imagen asociada al par poseía otro par asociado anteriormente
if pairIndexCurrentImageSelected != -1 and pairIndexCurrentImageSelected != self.currentCorrespondenceSelected:
#des-asociamos el par aterior
currentPairEventBoxSelected = allPairFrames[pairIndexCurrentImageSelected].get_children()[0]
self.changeBackgroundColour(currentPairEventBoxSelected, "white")
self.setAvailableColour(self.correspondencesSelectionState[pairIndexCurrentImageSelected]['colour'])
self.setSelectionStateColour(self.correspondencesSelectionState, pairIndexCurrentImageSelected, None )
self.correspondencesSelectionState[pairIndexCurrentImageSelected]['selected'] = -1
#El par seleccionado ya fue asociado por otra imagen, la cual no es la actualmente seleccionada
if ( self.correspondencesSelectionState[indexPairSelected]['selected'] != -1
and self.correspondencesSelectionState[indexPairSelected]['selected'] != self.currentOptionSelected):
#des-asociamos la imagen anterior asociada al par
imagePairSelectedEventBox= allFramesImages[self.correspondencesSelectionState[indexPairSelected]['selected']].get_children()[0]
self.changeBackgroundColour(imagePairSelectedEventBox,"white")
self.setAvailableColour(self.optionsSelectionState[self.correspondencesSelectionState[indexPairSelected]['selected']]['colour'])
self.setSelectionStateColour(self.optionsSelectionState, self.correspondencesSelectionState[indexPairSelected]['selected'], None )
self.optionsSelectionState[self.correspondencesSelectionState[indexPairSelected]['selected']]['selected'] = -1
#guardamos los datos de la asociacion actuales de ambos lados
self.correspondencesSelectionState[indexPairSelected]['selected'] = self.currentOptionSelected
self.optionsSelectionState[self.currentOptionSelected]['selected'] = indexPairSelected
self.setUnavailableColour(colourImageSelected)
#cambiamos los colores de los bordes (frames) para notificar la seleccion
self.fakeSelection(framePairSelected)
lastFramePairSelected = allPairFrames[self.lastCorrespondenceSelected]
self.mainWindows.getLogger().debug(self.lastCorrespondenceSelected)
self.mainWindows.getLogger().debug(self.currentCorrespondenceSelected)
if (self.lastCorrespondenceSelected != self.currentCorrespondenceSelected) and self.lastCorrespondenceSelected != -1:
self.fakeUnselection(lastFramePairSelected)
#Comprabamos la finalización del ejercicio
self.checkCompletedExercise()
def fakeSelection(self, frame):
self.mainWindows.getLogger().debug(frame)
frame.modify_bg(gtk.STATE_NORMAL, SELECTED_COLOUR)
#self.mainWindows.getLogger().debug("get_style() : ")
#self.mainWindows.getLogger().debug(frame.get_style() )
def fakeUnselection(self, frame):
frame.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color("white"))
def changeBackgroundColour(self, eventBox, colour):
eventBox.modify_bg(gtk.STATE_NORMAL, gtk.gdk.Color(colour))
def setSelectionStateColour(self,selectionState, index, colour):
selectionState[index]['colour'] = colour
| ggimenez/HomeworkDesigner.activity | template.activity/simpleassociation.py | Python | gpl-2.0 | 17,950 |
from __future__ import absolute_import
from plotly import optional_imports
# Require that numpy exists for figure_factory
np = optional_imports.get_module("numpy")
if np is None:
raise ImportError(
"""\
The figure factory module requires the numpy package"""
)
from plotly.figure_factory._2d_density import create_2d_density
from plotly.figure_factory._annotated_heatmap import create_annotated_heatmap
from plotly.figure_factory._bullet import create_bullet
from plotly.figure_factory._candlestick import create_candlestick
from plotly.figure_factory._dendrogram import create_dendrogram
from plotly.figure_factory._distplot import create_distplot
from plotly.figure_factory._facet_grid import create_facet_grid
from plotly.figure_factory._gantt import create_gantt
from plotly.figure_factory._ohlc import create_ohlc
from plotly.figure_factory._quiver import create_quiver
from plotly.figure_factory._scatterplot import create_scatterplotmatrix
from plotly.figure_factory._streamline import create_streamline
from plotly.figure_factory._table import create_table
from plotly.figure_factory._trisurf import create_trisurf
from plotly.figure_factory._violin import create_violin
if optional_imports.get_module("pandas") is not None:
from plotly.figure_factory._county_choropleth import create_choropleth
from plotly.figure_factory._hexbin_mapbox import create_hexbin_mapbox
else:
def create_choropleth(*args, **kwargs):
raise ImportError("Please install pandas to use `create_choropleth`")
def create_hexbin_mapbox(*args, **kwargs):
raise ImportError("Please install pandas to use `create_hexbin_mapbox`")
if optional_imports.get_module("skimage") is not None:
from plotly.figure_factory._ternary_contour import create_ternary_contour
else:
def create_ternary_contour(*args, **kwargs):
raise ImportError("Please install scikit-image to use `create_ternary_contour`")
__all__ = [
"create_2d_density",
"create_annotated_heatmap",
"create_bullet",
"create_candlestick",
"create_choropleth",
"create_dendrogram",
"create_distplot",
"create_facet_grid",
"create_gantt",
"create_hexbin_mapbox",
"create_ohlc",
"create_quiver",
"create_scatterplotmatrix",
"create_streamline",
"create_table",
"create_ternary_contour",
"create_trisurf",
"create_violin",
]
| plotly/plotly.py | packages/python/plotly/plotly/figure_factory/__init__.py | Python | mit | 2,397 |
# Copyright 2019 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
{
"name": "Switzerland Account Tags",
"category": "Localisation",
"summary": "",
"version": "14.0.1.0.0",
"author": "Camptocamp SA, Odoo Community Association (OCA)",
"website": "https://github.com/OCA/l10n-switzerland",
"license": "AGPL-3",
"depends": ["l10n_ch"],
"data": [
"data/new/account.account.tag.csv",
"data/new/account.account.template.csv",
"data/update/account.account.template.csv",
],
}
| OCA/l10n-switzerland | l10n_ch_account_tags/__manifest__.py | Python | agpl-3.0 | 561 |
"""
WSGI config for pycontw2016 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/dev/howto/deployment/wsgi/
"""
import os
from django.conf import settings
from django.core.wsgi import get_wsgi_application
os.environ.setdefault(
'DJANGO_SETTINGS_MODULE',
'pycontw2016.settings.production.pycontw2016',
)
application = get_wsgi_application()
if settings.DEBUG:
# Wrap werkzeug debugger.
if settings.WERKZEUG_DEBUG:
try:
import django.views.debug
import six
from werkzeug.debug import DebuggedApplication
except ImportError:
pass
else:
def null_response(request, exc_type, exc_value, tb):
six.reraise(exc_type, exc_value, tb)
django.views.debug.technical_500_response = null_response
application = DebuggedApplication(application, evalex=True)
else:
# Wrap Sentry.
try:
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
except ImportError:
pass
else:
application = Sentry(application)
| pycontw/pycontw2016 | src/pycontw2016/wsgi.py | Python | mit | 1,200 |
from __future__ import absolute_import
from .arrow import (ArrowArchivedMixin, ArrowCreatedMixin,
ArrowCreatedModifiedMixin)
from .base import ArchivedMixin, CreatedMixin, CreatedModifiedMixin
from .pendulum import (PendulumArchivedMixin, PendulumCreatedMixin,
PendulumCreatedModifiedMixin)
| croscon/fleaker | fleaker/peewee/mixins/time/__init__.py | Python | bsd-3-clause | 335 |
from mumax import *
from math import *
# material
msat(800e3)
aexch(1.3e-11)
alpha(0.02)
# geometry
nx = 512
ny = 512
gridsize(nx, ny, 1)
partsize(1500e-9, 1500e-9, 3e-9)
# initial magnetization
uniform(1, 1, 0)
alpha(2)
run(5e-9) # relax
alpha(0.01)
save("m", "text")
# run
autosave("m", "omf", 10e-12)
autosave('table', 'ascii', 5e-12)
fieldmask("mask2.omf")
f = 5e9
omega = 2*pi*f
def myfield(t):
a = t/2e-9
if a>1:
a=1
return a*sin(omega*t), a*cos(omega*t), 0
applyfunction('field', myfield, 10e-9, 10e-12)
run(10e-9)
# End: Data Text
# End: Segment
| mumax/1 | test/fieldmask/spinwaves.py | Python | gpl-3.0 | 582 |
import sys
import urlparse
import requests
from itertools import chain
from motherbrain.helpers.tlds import tlds
TLD_URL = 'http://data.iana.org/TLD/tlds-alpha-by-domain.txt'
def domain_by_netloc(netloc):
all_levels = netloc.split('.')
top_levels = [x for x in all_levels if x.upper() in tlds]
other_levels = [x for x in all_levels if not x.upper() in tlds]
if not other_levels:
return None
second_level = [other_levels.pop()]
return '.'.join(list(chain(second_level, top_levels)))
def domain_by_url(url):
if not url:
return 'n/a'
url_parts = urlparse.urlparse(url)
if not hasattr(url_parts, 'netloc'):
return None
return domain_by_netloc(url_parts.netloc)
if __name__ == '__main__':
url = sys.argv[1]
print domain_by_url(url.lstrip().rstrip())
| urlist/urlist | motherbrain/helpers/fetch.py | Python | gpl-3.0 | 834 |
import unittest
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'datastructure'))
import binary_heap
class BinaryHeapFindMinTest(unittest.TestCase):
def test_return_minimum_item_from_heap(self):
heap = binary_heap.BinaryHeap()
heap.insert(5)
heap.insert(3)
heap.insert(1)
heap.insert(4)
heap.insert(2)
self.assertEqual(heap.findMinimum().key, 1)
| gwtw/py-data-structures | test/binary_heap_find_min_test.py | Python | mit | 418 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
import collections
import inspect
import types
from .base import (
MachError,
MethodHandler
)
from .config import ConfigProvider
from .registrar import Registrar
def CommandProvider(cls):
"""Class decorator to denote that it provides subcommands for Mach.
When this decorator is present, mach looks for commands being defined by
methods inside the class.
"""
# The implementation of this decorator relies on the parse-time behavior of
# decorators. When the module is imported, the method decorators (like
# @Command and @CommandArgument) are called *before* this class decorator.
# The side-effect of the method decorators is to store specifically-named
# attributes on the function types. We just scan over all functions in the
# class looking for the side-effects of the method decorators.
# Tell mach driver whether to pass context argument to __init__.
pass_context = False
if inspect.ismethod(cls.__init__):
spec = inspect.getargspec(cls.__init__)
if len(spec.args) > 2:
msg = 'Mach @CommandProvider class %s implemented incorrectly. ' + \
'__init__() must take 1 or 2 arguments. From %s'
msg = msg % (cls.__name__, inspect.getsourcefile(cls))
raise MachError(msg)
if len(spec.args) == 2:
pass_context = True
# We scan __dict__ because we only care about the classes own attributes,
# not inherited ones. If we did inherited attributes, we could potentially
# define commands multiple times. We also sort keys so commands defined in
# the same class are grouped in a sane order.
for attr in sorted(cls.__dict__.keys()):
value = cls.__dict__[attr]
if not isinstance(value, types.FunctionType):
continue
command_name, category, description, allow_all, conditions, parser = getattr(
value, '_mach_command', (None, None, None, None, None, None))
if command_name is None:
continue
if conditions is None and Registrar.require_conditions:
continue
msg = 'Mach command \'%s\' implemented incorrectly. ' + \
'Conditions argument must take a list ' + \
'of functions. Found %s instead.'
conditions = conditions or []
if not isinstance(conditions, collections.Iterable):
msg = msg % (command_name, type(conditions))
raise MachError(msg)
for c in conditions:
if not hasattr(c, '__call__'):
msg = msg % (command_name, type(c))
raise MachError(msg)
arguments = getattr(value, '_mach_command_args', None)
handler = MethodHandler(cls, attr, command_name, category=category,
description=description, allow_all_arguments=allow_all,
conditions=conditions, parser=parser, arguments=arguments,
pass_context=pass_context)
Registrar.register_command_handler(handler)
return cls
class Command(object):
"""Decorator for functions or methods that provide a mach subcommand.
The decorator accepts arguments that define basic attributes of the
command. The following arguments are recognized:
category -- The string category to which this command belongs. Mach's
help will group commands by category.
description -- A brief description of what the command does.
allow_all_args -- Bool indicating whether to allow unknown arguments
through to the command.
parser -- an optional argparse.ArgumentParser instance to use as
the basis for the command arguments.
For example:
@Command('foo', category='misc', description='Run the foo action')
def foo(self):
pass
"""
def __init__(self, name, category=None, description=None,
allow_all_args=False, conditions=None, parser=None):
self._name = name
self._category = category
self._description = description
self._allow_all_args = allow_all_args
self._conditions = conditions
self._parser = parser
def __call__(self, func):
func._mach_command = (self._name, self._category, self._description,
self._allow_all_args, self._conditions, self._parser)
return func
class CommandArgument(object):
"""Decorator for additional arguments to mach subcommands.
This decorator should be used to add arguments to mach commands. Arguments
to the decorator are proxied to ArgumentParser.add_argument().
For example:
@Command('foo', help='Run the foo action')
@CommandArgument('-b', '--bar', action='store_true', default=False,
help='Enable bar mode.')
def foo(self):
pass
"""
def __init__(self, *args, **kwargs):
self._command_args = (args, kwargs)
def __call__(self, func):
command_args = getattr(func, '_mach_command_args', [])
command_args.insert(0, self._command_args)
func._mach_command_args = command_args
return func
def SettingsProvider(cls):
"""Class decorator to denote that this class provides Mach settings.
When this decorator is encountered, the underlying class will automatically
be registered with the Mach registrar and will (likely) be hooked up to the
mach driver.
This decorator is only allowed on mach.config.ConfigProvider classes.
"""
if not issubclass(cls, ConfigProvider):
raise MachError('@SettingsProvider encountered on class that does ' +
'not derived from mach.config.ConfigProvider.')
Registrar.register_settings_provider(cls)
return cls
| michath/ConMonkey | python/mach/mach/decorators.py | Python | mpl-2.0 | 6,038 |
import json
import os
import unittest
import uuid
import pytest
from six.moves.urllib.error import HTTPError
wptserve = pytest.importorskip("wptserve")
from .base import TestUsingServer, doc_root
class TestFileHandler(TestUsingServer):
def test_GET(self):
resp = self.request("/document.txt")
self.assertEqual(200, resp.getcode())
self.assertEqual("text/plain", resp.info()["Content-Type"])
self.assertEqual(open(os.path.join(doc_root, "document.txt"), 'rb').read(), resp.read())
def test_headers(self):
resp = self.request("/with_headers.txt")
self.assertEqual(200, resp.getcode())
self.assertEqual("text/html", resp.info()["Content-Type"])
self.assertEqual("PASS", resp.info()["Custom-Header"])
# This will fail if it isn't a valid uuid
uuid.UUID(resp.info()["Another-Header"])
self.assertEqual(resp.info()["Same-Value-Header"], resp.info()["Another-Header"])
self.assertEqual(resp.info()["Double-Header"], "PA, SS")
def test_range(self):
resp = self.request("/document.txt", headers={"Range":"bytes=10-19"})
self.assertEqual(206, resp.getcode())
data = resp.read()
expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
self.assertEqual(10, len(data))
self.assertEqual("bytes 10-19/%i" % len(expected), resp.info()['Content-Range'])
self.assertEqual("10", resp.info()['Content-Length'])
self.assertEqual(expected[10:20], data)
def test_range_no_end(self):
resp = self.request("/document.txt", headers={"Range":"bytes=10-"})
self.assertEqual(206, resp.getcode())
data = resp.read()
expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
self.assertEqual(len(expected) - 10, len(data))
self.assertEqual("bytes 10-%i/%i" % (len(expected) - 1, len(expected)), resp.info()['Content-Range'])
self.assertEqual(expected[10:], data)
def test_range_no_start(self):
resp = self.request("/document.txt", headers={"Range":"bytes=-10"})
self.assertEqual(206, resp.getcode())
data = resp.read()
expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
self.assertEqual(10, len(data))
self.assertEqual("bytes %i-%i/%i" % (len(expected) - 10, len(expected) - 1, len(expected)),
resp.info()['Content-Range'])
self.assertEqual(expected[-10:], data)
def test_multiple_ranges(self):
resp = self.request("/document.txt", headers={"Range":"bytes=1-2,5-7,6-10"})
self.assertEqual(206, resp.getcode())
data = resp.read()
expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
self.assertTrue(resp.info()["Content-Type"].startswith("multipart/byteranges; boundary="))
boundary = resp.info()["Content-Type"].split("boundary=")[1]
parts = data.split("--" + boundary)
self.assertEqual("\r\n", parts[0])
self.assertEqual("--", parts[-1])
expected_parts = [("1-2", expected[1:3]), ("5-10", expected[5:11])]
for expected_part, part in zip(expected_parts, parts[1:-1]):
header_string, body = part.split("\r\n\r\n")
headers = dict(item.split(": ", 1) for item in header_string.split("\r\n") if item.strip())
self.assertEqual(headers["Content-Type"], "text/plain")
self.assertEqual(headers["Content-Range"], "bytes %s/%i" % (expected_part[0], len(expected)))
self.assertEqual(expected_part[1] + "\r\n", body)
def test_range_invalid(self):
with self.assertRaises(HTTPError) as cm:
self.request("/document.txt", headers={"Range":"bytes=11-10"})
self.assertEqual(cm.exception.code, 416)
expected = open(os.path.join(doc_root, "document.txt"), 'rb').read()
with self.assertRaises(HTTPError) as cm:
self.request("/document.txt", headers={"Range":"bytes=%i-%i" % (len(expected), len(expected) + 10)})
self.assertEqual(cm.exception.code, 416)
def test_sub_config(self):
resp = self.request("/sub.sub.txt")
expected = b"localhost localhost %i" % self.server.port
assert resp.read().rstrip() == expected
def test_sub_headers(self):
resp = self.request("/sub_headers.sub.txt", headers={"X-Test": "PASS"})
expected = b"PASS"
assert resp.read().rstrip() == expected
def test_sub_params(self):
resp = self.request("/sub_params.sub.txt", query="test=PASS")
expected = b"PASS"
assert resp.read().rstrip() == expected
class TestFunctionHandler(TestUsingServer):
def test_string_rv(self):
@wptserve.handlers.handler
def handler(request, response):
return "test data"
route = ("GET", "/test/test_string_rv", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(200, resp.getcode())
self.assertEqual("9", resp.info()["Content-Length"])
self.assertEqual("test data", resp.read())
def test_tuple_1_rv(self):
@wptserve.handlers.handler
def handler(request, response):
return ()
route = ("GET", "/test/test_tuple_1_rv", handler)
self.server.router.register(*route)
with pytest.raises(HTTPError) as cm:
self.request(route[1])
assert cm.value.code == 500
def test_tuple_2_rv(self):
@wptserve.handlers.handler
def handler(request, response):
return [("Content-Length", 4), ("test-header", "test-value")], "test data"
route = ("GET", "/test/test_tuple_2_rv", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(200, resp.getcode())
self.assertEqual("4", resp.info()["Content-Length"])
self.assertEqual("test-value", resp.info()["test-header"])
self.assertEqual("test", resp.read())
def test_tuple_3_rv(self):
@wptserve.handlers.handler
def handler(request, response):
return 202, [("test-header", "test-value")], "test data"
route = ("GET", "/test/test_tuple_3_rv", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(202, resp.getcode())
self.assertEqual("test-value", resp.info()["test-header"])
self.assertEqual("test data", resp.read())
def test_tuple_3_rv_1(self):
@wptserve.handlers.handler
def handler(request, response):
return (202, "Some Status"), [("test-header", "test-value")], "test data"
route = ("GET", "/test/test_tuple_3_rv_1", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(202, resp.getcode())
self.assertEqual("Some Status", resp.msg)
self.assertEqual("test-value", resp.info()["test-header"])
self.assertEqual("test data", resp.read())
def test_tuple_4_rv(self):
@wptserve.handlers.handler
def handler(request, response):
return 202, [("test-header", "test-value")], "test data", "garbage"
route = ("GET", "/test/test_tuple_1_rv", handler)
self.server.router.register(*route)
with pytest.raises(HTTPError) as cm:
self.request(route[1])
assert cm.value.code == 500
def test_none_rv(self):
@wptserve.handlers.handler
def handler(request, response):
return None
route = ("GET", "/test/test_none_rv", handler)
self.server.router.register(*route)
resp = self.request(route[1])
assert resp.getcode() == 200
assert "Content-Length" not in resp.info()
assert resp.read() == b""
class TestJSONHandler(TestUsingServer):
def test_json_0(self):
@wptserve.handlers.json_handler
def handler(request, response):
return {"data": "test data"}
route = ("GET", "/test/test_json_0", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(200, resp.getcode())
self.assertEqual({"data": "test data"}, json.load(resp))
def test_json_tuple_2(self):
@wptserve.handlers.json_handler
def handler(request, response):
return [("Test-Header", "test-value")], {"data": "test data"}
route = ("GET", "/test/test_json_tuple_2", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(200, resp.getcode())
self.assertEqual("test-value", resp.info()["test-header"])
self.assertEqual({"data": "test data"}, json.load(resp))
def test_json_tuple_3(self):
@wptserve.handlers.json_handler
def handler(request, response):
return (202, "Giraffe"), [("Test-Header", "test-value")], {"data": "test data"}
route = ("GET", "/test/test_json_tuple_2", handler)
self.server.router.register(*route)
resp = self.request(route[1])
self.assertEqual(202, resp.getcode())
self.assertEqual("Giraffe", resp.msg)
self.assertEqual("test-value", resp.info()["test-header"])
self.assertEqual({"data": "test data"}, json.load(resp))
class TestPythonHandler(TestUsingServer):
def test_string(self):
resp = self.request("/test_string.py")
self.assertEqual(200, resp.getcode())
self.assertEqual("text/plain", resp.info()["Content-Type"])
self.assertEqual("PASS", resp.read())
def test_tuple_2(self):
resp = self.request("/test_tuple_2.py")
self.assertEqual(200, resp.getcode())
self.assertEqual("text/html", resp.info()["Content-Type"])
self.assertEqual("PASS", resp.info()["X-Test"])
self.assertEqual("PASS", resp.read())
def test_tuple_3(self):
resp = self.request("/test_tuple_3.py")
self.assertEqual(202, resp.getcode())
self.assertEqual("Giraffe", resp.msg)
self.assertEqual("text/html", resp.info()["Content-Type"])
self.assertEqual("PASS", resp.info()["X-Test"])
self.assertEqual("PASS", resp.read())
def test_no_main(self):
with pytest.raises(HTTPError) as cm:
self.request("/no_main.py")
assert cm.value.code == 500
def test_invalid(self):
with pytest.raises(HTTPError) as cm:
self.request("/invalid.py")
assert cm.value.code == 500
def test_missing(self):
with pytest.raises(HTTPError) as cm:
self.request("/missing.py")
assert cm.value.code == 404
class TestDirectoryHandler(TestUsingServer):
def test_directory(self):
resp = self.request("/")
self.assertEqual(200, resp.getcode())
self.assertEqual("text/html", resp.info()["Content-Type"])
#Add a check that the response is actually sane
def test_subdirectory_trailing_slash(self):
resp = self.request("/subdir/")
assert resp.getcode() == 200
assert resp.info()["Content-Type"] == "text/html"
def test_subdirectory_no_trailing_slash(self):
with pytest.raises(HTTPError) as cm:
self.request("/subdir")
assert cm.value.code == 404
class TestAsIsHandler(TestUsingServer):
def test_as_is(self):
resp = self.request("/test.asis")
self.assertEqual(202, resp.getcode())
self.assertEqual("Giraffe", resp.msg)
self.assertEqual("PASS", resp.info()["X-Test"])
self.assertEqual("Content", resp.read())
#Add a check that the response is actually sane
if __name__ == '__main__':
unittest.main()
| youtube/cobalt | third_party/web_platform_tests/tools/wptserve/tests/functional/test_handlers.py | Python | bsd-3-clause | 11,798 |
import os
import pytest
from YSOVAR import plot
from . import outroot
@pytest.mark.usefixtures("data")
class Test_plots():
def test_lc_plots(self, data):
plot.make_lc_plots(data, outroot, twinx = True)
def test_cmd_plots(self, data):
plot.make_cmd_plots(data, outroot)
def test_ls_plots(self, data):
plot.make_ls_plots(data, outroot, 100, 4, 1)
def test_phased_plots(self, data):
plot.make_phased_lc_cmd_plots(data, outroot)
def test_latex(self, data):
output_cols = {'YSOVAR2_id': 'ID in YSOVAR 2 database',
'median_36': 'median [3.6]',
'mad_36': 'medium abs dev [3.6]',
'stddev_36': 'stddev [3.6]',
'median_45': 'median [4.5]',
'mad_45': 'medium abs dev [4.5]',
'stddev_45': 'stddev [4.5]',
'stetson_36_45': 'Stetson [3.6] vs. [4.5]'}
plot.make_latexfile(data, outroot, 'testatlas', output_cols = output_cols)
| YSOVAR/YSOVAR | test/test_plot.py | Python | gpl-3.0 | 1,134 |
# coding: utf-8
#
# xiaoyu <xiaokong1937@gmail.com>
#
# 2014/12/24 Merry Christmas
#
"""
Tests for xlink SDK.
"""
import unittest
from xlink import XlinkClient
class XlinkTestCase(unittest.TestCase):
def setUp(self):
APIKEY = '727c554409d5fa166860008db6385987782d5728'
APIUSER = 'apiuser'
self.client = XlinkClient(APIKEY, APIUSER)
def test_get_cmd(self):
self.assertEqual(self.client.sensor.get(id=4).commands[0].cmd, u'on')
if __name__ == "__main__":
unittest.main()
| xkong/xlinkwot | xlink_open_wrt/xlink_sdk/tests.py | Python | bsd-3-clause | 519 |
import stripe
from stripe.test.helper import (
StripeResourceTest, DUMMY_DISPUTE, NOW
)
class DisputeTest(StripeResourceTest):
def test_list_all_disputes(self):
stripe.Dispute.list(created={'lt': NOW})
self.requestor_mock.request.assert_called_with(
'get',
'/v1/disputes',
{
'created': {'lt': NOW},
}
)
def test_create_dispute(self):
stripe.Dispute.create(idempotency_key='foo', **DUMMY_DISPUTE)
self.requestor_mock.request.assert_called_with(
'post',
'/v1/disputes',
DUMMY_DISPUTE,
{'Idempotency-Key': 'foo'},
)
def test_retrieve_dispute(self):
stripe.Dispute.retrieve('dp_test_id')
self.requestor_mock.request.assert_called_with(
'get',
'/v1/disputes/dp_test_id',
{},
None
)
def test_update_dispute(self):
dispute = stripe.Dispute.construct_from({
'id': 'dp_update_id',
'evidence': {
'product_description': 'description',
},
}, 'api_key')
dispute.evidence['customer_name'] = 'customer'
dispute.evidence['uncategorized_text'] = 'text'
dispute.save()
self.requestor_mock.request.assert_called_with(
'post',
'/v1/disputes/dp_update_id',
{'evidence': {
'customer_name': 'customer',
'uncategorized_text': 'text',
}},
None
)
def test_close_dispute(self):
dispute = stripe.Dispute(id='dp_close_id')
dispute.close(idempotency_key='foo')
self.requestor_mock.request.assert_called_with(
'post',
'/v1/disputes/dp_close_id/close',
{},
{'Idempotency-Key': 'foo'},
)
| colehertz/Stripe-Tester | venv/lib/python3.5/site-packages/stripe/test/resources/test_disputes.py | Python | mit | 1,903 |
# Reverse digits of an integer.
# Example1: x = 123, return 321
# Example2: x = -123, return -321
# If the input number is big enough, the number may overflow in C or Java. However, the int type
# number in Python doesn't overflow beyond 32 bits (4 bytes). So the code below works for Python without
# handling overflow, but may not work for C or Java.
class Solution:
def reverse(self, x):
if x>0:
sign = 1
else:
sign = -1
x = - x
y = 0
while (x):
y *= 10;
y += x%10
x /= 10;
y = sign*y
# This part fakes the overflow in Python to pass leetcode.
if y>0x7FFFFFFF or y<-0x7FFFFFFF-1:
return 0
else:
return y
| lijunxyz/leetcode_practice | reverse_integer_easy/Solution2.py | Python | mit | 777 |
#!/usr/bin/env python
import sys
import polyglot_tokenizer as tok
if __package__ is None and not hasattr(sys, "frozen"):
# direct call of __main__.py
import os.path
path = os.path.realpath(os.path.abspath(__file__))
sys.path.append(os.path.dirname(os.path.dirname(path)))
if __name__ == '__main__':
tok.main()
| irshadbhat/indic-tokenizer | polyglot_tokenizer/__main__.py | Python | mit | 335 |
"""
ThottleObject: Base class for throttable objects.
"""
class ThrottleObject(object):
"""
ThrottleObject: Base class for throttable objects.
Inputs:
The address of a host to check
The method to use for a metric
Threshold to measure metric result against.
If method() returns greater than threshold,
the db is in "throttle mode".
In "throttle mode", child objects should not act; just wait.
"""
def __init__(self, host, method, threshold):
"""
Initalize the Throttle Object.
Take in the host to check, the method to use to monitor,
and the threshold value for that metric.
If method() returns greater than threshold,
the db is in "throttle mode".
In "throttle mode", child objects should not act; just wait.
"""
self.host = host
self.method = method
self.threshold = threshold
def okay(self, *args):
"""
Return a bool if it is okay to procced.
Can take in custom arguments for the method.
"""
return self.method(self.host, *args) < self.threshold
| birm/dbThrottle | dbThrottle/ThrottleObject.py | Python | gpl-3.0 | 1,141 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2016-10-21 12:55
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0001_initial'),
('course', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Lecture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='course.Course')),
('students', models.ManyToManyField(to='account.Student')),
],
),
]
| spyua/budda_scanner | budda/course/migrations/0002_lecture.py | Python | gpl-3.0 | 766 |
#!/usr/bin/env python
"""
Copyright 2016 ARC Centre of Excellence for Climate Systems Science
author: Scott Wales <scott.wales@unimelb.edu.au>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from subprocess import Popen, PIPE
from textwrap import dedent
from os import environ, path
from distutils.util import strtobool
import ldap
import getpass
from . import auth, gpg
def colour(text, colour):
if colour == 'red':
code = '\033[31;1m'
elif colour == 'green':
code = '\033[32m'
elif colour == 'blue':
code = '\033[93m'
else:
raise Exception
reset = '\033[m'
return code + text + reset
def info(text):
print("%s: %s"%(colour('INFO','blue'),text))
def warning(text):
print("%s: %s"%(colour('WARN','red'),text))
def todo(text):
print("%s: %s"%(colour('TODO','green'),text))
class SetupError(Exception):
"""
Indicates user needs to take action before setup can complete
"""
pass
def userinfo():
"""
Get current user's common name and email from LDAP
Returns: Tuple of (name, email)
"""
l = ldap.initialize(ldap.get_option(ldap.OPT_URI))
people = 'ou=People,dc=apac,dc=edu,dc=au'
info = l.search_s(people, ldap.SCOPE_SUBTREE, '(uid=%s)'%getpass.getuser())
return (info[0][1]['cn'][0],info[0][1]['mail'][0])
def prompt_bool(prompt):
"""
Ask a yes/no question
Returns: true/false answer
"""
raw_value = raw_input(prompt + ' [yes/no] ')
try:
return strtobool(raw_value)
except ValueError:
return ask_bool(prompt)
def prompt_or_default(prompt, default):
"""
Ask a question with a default answer
Returns: answer or default
"""
response = raw_input('%s [%s]: '%(prompt,default)).strip()
if response == '':
response = default
return response
def gpg_startup():
agent = dedent("""
[ -f ~/.gpg-agent-info ] && source ~/.gpg-agent-info
if [ -S "${GPG_AGENT_INFO%%:*}" ]; then
export GPG_AGENT_INFO
else
eval $( gpg-agent --daemon --allow-preset-passphrase --batch --max-cache-ttl 43200 --write-env-file ~/.gpg-agent-info )
fi
""")
home = environ['HOME']
for f in ['.profile','.bash_profile']:
p = path.join(home,f)
if path.exists(p):
# Check if gpg-agent is already referenced
grep = Popen(['grep','gpg-agent',p],stdout=PIPE)
grep.communicate()
if grep.returncode == 0:
warning('GPG Agent is referenced in ~/%s but is not currently running. '%f+
'Try relogging to start it again, if that doesn\'t work please contact the helpdesk')
continue
# Add script to file
with open(p,'a') as profile:
profile.write(agent)
todo('GPG Agent has been added to your startup scripts. '+
'Please log out of Accessdev then back in again to make sure it has been activated\n')
def check_gpg_agent():
"""
Make sure GPG-Agent is running
If the environment variable is not found add activation script to the
users's .profile
"""
try:
gpg.send('GETINFO version')
info('GPG Agent is running')
except Exception:
gpg_startup()
raise SetupError
def register_mosrs_account():
name, email = userinfo()
name = prompt_or_default('What is your name?',name)
email = prompt_or_default('What is your work email address?',email)
request = Popen(['mail', '-s','MOSRS account request for %s'%name, 'access_help@nf.nci.org.au'], stdin=PIPE)
request.communicate(dedent("""
ACCESS user %s (NCI id %s, email <%s>) would like to request an account on MOSRS.
Can the sponsor for their institution please submit a request on their behalf at
https://code.metoffice.gov.uk/trac/admin/newticket?type=account-request
You can check if they have an existing account at
https://code.metoffice.gov.uk/trac/home/wiki/UserList
"""%(name, environ['USER'], email)))
print('\n')
info('Submitting MOSRS account request for %s <%s> to access_help'%(name,email))
info('Once your account has been activated (will take at least one UK business day) '+
'you will receive an email detailing how to set up your password\n')
def setup_mosrs_account():
"""
Setup Mosrs
"""
check_gpg_agent()
mosrs_request = None
while mosrs_request not in ['yes', 'no', 'y', 'n']:
mosrs_request = prompt_or_default("Do you have a MOSRS account", "yes")
mosrs_request = mosrs_request.lower()
if mosrs_request.startswith('y'):
auth.check_or_update()
else:
print(dedent(
"""
If you need to access new versions of the UM please send a
request to 'cws_help@nci.org.au' saying that you'd like a MOSRS account
Once you have an account run this script again
"""
))
print('\n')
def check_raijin_ssh():
"""
Raijin has been decommissioned. There should no longer be any calls to this
procedure. In case there is, I'm leaving this stub in.
"""
raise ValueError("raijin should no longer be used. Please contact CMS")
def check_gadi_ssh():
"""
Test Rose/Cylc can be found on Gadi
"""
print('Testing Rose can be accessed on Gadi...')
# ssh -oBatchMode=yes /projects/access/bin/cylc --version
ssh = Popen(['ssh','-oBatchMode=yes','gadi','/projects/access/bin/cylc --version'])
result = ssh.wait()
if result == 0:
print('Successfully found Rose\n')
else:
warning('Unable to connect to Gadi')
warning('Follow the instructions at https://accessdev.nci.org.au/trac/wiki/Guides/SSH to set up a SSH agent\n')
raise SetupError
def accesssvn_setup():
"""
Setup GPG for access-svn access
"""
try:
check_gpg_agent()
print('\n')
print('To store your password for 12 hours run:')
print(' access-auth\n')
except SetupError:
todo('Once this has been done please run this setup script again\n')
def main():
print('\n')
print('Welcome to Accessdev, the user interface and control server for the ACCESS model at NCI')
print('This script will set up your account to use Rose and the UM\n')
try:
setup_mosrs_account()
check_gadi_ssh()
# Account successfully created
print('You are now able to use Rose and the UM. To see a list of available experiments run:')
print(' rosie go\n')
print('Your password will be cached for a maximum of 12 hours. To store your password again run:')
print(' mosrs-auth\n')
except SetupError:
todo('Once this has been done please run this setup script again\n')
finally:
print('You can ask for help with the ACCESS systems by emailing "access_help@nf.nci.org.au"\n')
if __name__ == '__main__':
main()
| ScottWales/mosrs-setup | mosrs/setup.py | Python | apache-2.0 | 7,573 |
thepath = 'C:\Users\Spyros\OneDrive\workspace\lerot\output_data\listwise_LL_evaluation_data\\Fold1\\data.csv'
data = []
with open(thepath, 'r') as f:
datastring = f.read()
data = datastring.strip().split('\n')
performance = []
my_wins = 0.0
site_wins = 0.0
for i in data:
current_run = i.split(',')
if int(current_run[0]) > int(current_run[1]):
my_wins += 1
if int(current_run[0]) < int(current_run[1]):
site_wins += 1
if my_wins > 0:
performance.append(my_wins / (my_wins + site_wins))
else:
performance.append(0)
outString = ''
for i in performance:
outString += ''.join((str(i), '\n'))
print outString
with open('C:\Users\Spyros\OneDrive\workspace\lerot\output_data\listwise_LL_evaluation_data\\Fold1\\out.csv', 'w') as f:
f.write(outString)
'''
thepath = 'C:\Users\Spyros\OneDrive\workspace\lerot\output_data\pairwise_local_evaluation_data\\'
data = []
for fold in range(1,6):
for iteration in range(5):
with open(thepath+'fold'+str(fold)+'_'+str(iteration), 'r') as f:
string = f.read()
data.append(string.split('\n'))
string = ''
for i in range(1000):
for j in range(25):
string += data[j][i]+','
string = string[:-1]
string += '\n'
with open(thepath+'finalOut.csv', 'w') as f:
string = f.write(string)'''
| m0re4u/LeRoT-SCLP | scripts/makeTsv.py | Python | gpl-3.0 | 1,359 |
import requests
from requests.adapters import HTTPAdapter
from requests.exceptions import RetryError
from requests.packages.urllib3.util.retry import Retry
import urllib.parse
import os
from . import _agent
from . import errors
class Client():
"""A base class to define clients for the ols servers.
This is a simple wrapper around requests.Session so we inherit all good
bits while providing a simple point for tests to override when needed.
"""
def __init__(self, conf, root_url):
"""Initialize Client object
:param config conf: Configuration details for the client
:param str root_url: Root url for all requests.
:type config: snapcraft.config.Config
"""
self.conf = conf
self.root_url = root_url
self.session = requests.Session()
# Setup max retries for all store URLs and the CDN
retries = Retry(total=int(os.environ.get('STORE_RETRIES', 5)),
backoff_factor=int(os.environ.get('STORE_BACKOFF', 2)),
status_forcelist=[104, 500, 502, 503, 504])
self.session.mount('http://', HTTPAdapter(max_retries=retries))
self.session.mount('https://', HTTPAdapter(max_retries=retries))
self._snapcraft_headers = {
'User-Agent': _agent.get_user_agent(),
}
def request(self, method, url, params=None, headers=None, **kwargs):
"""Send a request to url relative to the root url.
:param str method: Method used for the request.
:param str url: Appended with the root url first.
:param list params: Query parameters to be sent along with the request.
:param list headers: Headers to be sent along with the request.
:return Response of the request.
"""
# Note that url may be absolute in which case 'root_url' is ignored by
# urljoin.
if headers:
headers.update(self._snapcraft_headers)
else:
headers = self._snapcraft_headers
final_url = urllib.parse.urljoin(self.root_url, url)
try:
response = self.session.request(
method, final_url, headers=headers,
params=params, **kwargs)
except RetryError as e:
raise errors.StoreRetryError(e) from e
return response
def get(self, url, **kwargs):
"""Perform a GET request with the given arguments.
The arguments are the same as for the request function,
namely params and headers.
:param str url: url to send the request.
:return Response of the request.
"""
return self.request('GET', url, **kwargs)
def post(self, url, **kwargs):
"""Perform a POST request with the given arguments.
The arguments are the same as for the request function,
namely params and headers.
:param str url: url to send the request.
:return Response of the request.
"""
return self.request('POST', url, **kwargs)
def put(self, url, **kwargs):
"""Perform a PUT request with the given arguments.
The arguments are the same as for the request function,
namely params and headers.
:param str url: url to send the request.
:return Response of the request.
"""
return self.request('PUT', url, **kwargs)
| elopio/snapcraft | snapcraft/storeapi/_client.py | Python | gpl-3.0 | 3,393 |
# -*- coding: utf-8 -*-
import operator
import os
import re
import subprocess
import time
import urllib
from xml.dom.minidom import parseString as parse_xml
from module.network.CookieJar import CookieJar
from module.network.HTTPRequest import HTTPRequest
from ..internal.Hoster import Hoster
from ..internal.misc import exists, isexecutable, json, reduce, renice, replace_patterns, which
from ..internal.Plugin import Abort, Skip
class BIGHTTPRequest(HTTPRequest):
"""
Overcome HTTPRequest's load() size limit to allow
loading very big web pages by overrding HTTPRequest's write() function
"""
# @TODO: Add 'limit' parameter to HTTPRequest in v0.4.10
def __init__(self, cookies=None, options=None, limit=1000000):
self.limit = limit
HTTPRequest.__init__(self, cookies=cookies, options=options)
def write(self, buf):
""" writes response """
if self.limit and self.rep.tell() > self.limit or self.abort:
rep = self.getResponse()
if self.abort:
raise Abort()
f = open("response.dump", "wb")
f.write(rep)
f.close()
raise Exception("Loaded Url exceeded limit")
self.rep.write(buf)
class Ffmpeg(object):
_RE_DURATION = re.compile(r'Duration: (\d{2}):(\d{2}):(\d{2})\.(\d{2}),')
_RE_TIME = re.compile(r'time=(\d{2}):(\d{2}):(\d{2})\.(\d{2})')
_RE_VERSION = re.compile((r'ffmpeg version (.+?) '))
CMD = None
priority = 0
streams = []
start_time = (0, 0)
output_filename = None
error_message = ""
def __init__(self, priority, plugin=None):
self.plugin = plugin
self.priority = priority
self.streams = []
self.start_time = (0, 0)
self.output_filename = None
self.error_message = ""
self.find()
@classmethod
def find(cls):
"""
Check for ffmpeg
"""
if cls.CMD is not None:
return True
try:
if os.name == "nt":
ffmpeg = os.path.join(pypath, "ffmpeg.exe") if isexecutable(os.path.join(pypath, "ffmpeg.exe")) \
else "ffmpeg.exe"
else:
ffmpeg = "ffmpeg"
cmd = which(ffmpeg) or ffmpeg
p = subprocess.Popen([cmd, "-version"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = (_r.strip() if _r else "" for _r in p.communicate())
except OSError:
return False
m = cls._RE_VERSION.search(out)
if m is not None:
cls.VERSION = m.group(1)
cls.CMD = cmd
return True
@property
def found(self):
return self.CMD is not None
def add_stream(self, streams):
if isinstance(streams, list):
self.streams.extend(streams)
else:
self.streams.append(streams)
def set_start_time(self, start_time):
self.start_time = start_time
def set_output_filename(self, output_filename):
self.output_filename = output_filename
def run(self):
if self.CMD is None or self.output_filename is None:
return False
maps = []
args = []
meta = []
for i, stream in enumerate(self.streams):
args.extend(["-i", stream[1]])
maps.extend(["-map", "%s:%s:0" % (i, stream[0])])
if stream[0] == 's':
meta.extend(["-metadata:s:s:0:%s" % i, "language=%s" % stream[2]])
args.extend(maps)
args.extend(meta)
args.extend(["-y",
"-vcodec", "copy",
"-acodec", "copy",
"-scodec", "copy",
"-ss", "00:%s:%s.00" % (self.start_time[0], self.start_time[1]),
"-sub_charenc", "utf8"])
call = [self.CMD] + args + [self.output_filename]
p = subprocess.Popen(
call,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
renice(p.pid, self.priority)
duration = self._find_duration(p)
if duration:
last_line = self._progress(p, duration)
else:
last_line = ""
out, err = (_r.strip() if _r else "" for _r in p.communicate())
if err or p.returncode:
self.error_message = last_line
return False
else:
self.error_message = ""
return True
def _find_duration(self, process):
duration = 0
while True:
line = process.stderr.readline() #: ffmpeg writes to stderr
#: Quit loop on eof
if not line:
break
m = self._RE_DURATION.search(line)
if m is not None:
duration = sum(int(v) * [60 * 60 * 100, 60 * 100, 100, 1][i]
for i, v in enumerate(m.groups()))
break
return duration
def _progress(self, process, duration):
line = ""
last_line = ""
while True:
c = process.stderr.read(1) #: ffmpeg writes to stderr
#: Quit loop on eof
if not c:
break
elif c == "\r":
last_line = line.strip('\r\n')
line = ""
m = self._RE_TIME.search(last_line)
if m is not None:
current_time = sum(int(v) * [60 * 60 * 100, 60 * 100, 100, 1][i]
for i, v in enumerate(m.groups()))
if self.plugin:
progress = current_time * 100 / duration
self.plugin.pyfile.setProgress(progress)
else:
line += c
continue
return last_line #: Last line may contain error message
class YoutubeCom(Hoster):
__name__ = "YoutubeCom"
__type__ = "hoster"
__version__ = "0.67"
__status__ = "testing"
__pattern__ = r'https?://(?:[^/]*\.)?(?:youtu\.be/|youtube\.com/watch\?(?:.*&)?v=)[\w\-]+'
__config__ = [("activated", "bool", "Activated", True),
("quality", "sd;hd;fullhd;240p;360p;480p;720p;1080p;1440p;2160p;3072p;4320p", "Quality Setting", "hd"),
("vfmt", "int", "Video FMT/ITAG Number (0 for auto)", 0),
("afmt", "int", "Audio FMT/ITAG Number (0 for auto)", 0),
(".mp4", "bool", "Allow .mp4", True),
(".flv", "bool", "Allow .flv", True),
(".webm", "bool", "Allow .webm", True),
(".mkv", "bool", "Allow .mkv", True),
(".3gp", "bool", "Allow .3gp", False),
("aac", "bool", "Allow aac audio (DASH video only)", True),
("vorbis", "bool", "Allow vorbis audio (DASH video only)", True),
("opus", "bool", "Allow opus audio (DASH video only)", True),
("ac3", "bool", "Allow ac3 audio (DASH video only)", True),
("dts", "bool", "Allow dts audio (DASH video only)", True),
("3d", "bool", "Prefer 3D", False),
("subs_dl", "off;all_specified;first_available", "Download subtitles", "off"),
("subs_dl_langs", "str", "Subtitle language codes (ISO639-1) to download (comma separated)", ""),
("subs_embed", "bool", "Embed subtitles inside the output file (.mp4 and .mkv only)", False),
("priority", "int", "ffmpeg process priority", 0)]
__description__ = """Youtube.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("spoob", "spoob@pyload.org"),
("zoidberg", "zoidberg@mujmail.cz"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
URL_REPLACEMENTS = [(r'youtu\.be/', 'youtube.com/watch?v=')]
#: Invalid characters that must be removed from the file name
invalid_chars = u'\u2605:?><"|\\'
#: name, width, height, quality ranking, 3D, type
formats = {
# 3gp
17: {'ext': ".3gp", 'width': 176, 'height': 144, 'qi': 0, '3d': False, 'type': "av"},
36: {'ext': ".3gp", 'width': 400, 'height': 240, 'qi': 1, '3d': False, 'type': "av"},
# flv
5: {'ext': ".flv", 'width': 400, 'height': 240, 'qi': 1, '3d': False, 'type': "av"},
6: {'ext': ".flv", 'width': 640, 'height': 400, 'qi': 4, '3d': False, 'type': "av"},
34: {'ext': ".flv", 'width': 640, 'height': 360, 'qi': 4, '3d': False, 'type': "av"},
35: {'ext': ".flv", 'width': 854, 'height': 480, 'qi': 6, '3d': False, 'type': "av"},
# mp4
83: {'ext': ".mp4", 'width': 400, 'height': 240, 'qi': 1, '3d': True, 'type': "av"},
18: {'ext': ".mp4", 'width': 480, 'height': 360, 'qi': 2, '3d': False, 'type': "av"},
82: {'ext': ".mp4", 'width': 640, 'height': 360, 'qi': 3, '3d': True, 'type': "av"},
22: {'ext': ".mp4", 'width': 1280, 'height': 720, 'qi': 8, '3d': False, 'type': "av"},
136: {'ext': ".mp4", 'width': 1280, 'height': 720, 'qi': 8, '3d': False, 'type': "v"},
84: {'ext': ".mp4", 'width': 1280, 'height': 720, 'qi': 8, '3d': True, 'type': "av"},
37: {'ext': ".mp4", 'width': 1920, 'height': 1080, 'qi': 9, '3d': False, 'type': "av"},
137: {'ext': ".mp4", 'width': 1920, 'height': 1080, 'qi': 9, '3d': False, 'type': "v"},
85: {'ext': ".mp4", 'width': 1920, 'height': 1080, 'qi': 9, '3d': True, 'type': "av"},
264: {'ext': ".mp4", 'width': 2560, 'height': 1440, 'qi': 10, '3d': False, 'type': "v"},
266: {'ext': ".mp4", 'width': 3840, 'height': 2160, 'qi': 11, '3d': False, 'type': "v"},
38: {'ext': ".mp4", 'width': 4096, 'height': 3072, 'qi': 12 , '3d': False, 'type': "av"},
# webm
43: {'ext': ".webm", 'width': 640, 'height': 360, 'qi': 3, '3d': False, 'type': "av"},
100: {'ext': ".webm", 'width': 640, 'height': 360, 'qi': 3, '3d': True, 'type': "av"},
101: {'ext': ".webm", 'width': 640, 'height': 360, 'qi': 4, '3d': True, 'type': "av"},
44: {'ext': ".webm", 'width': 854, 'height': 480, 'qi': 5, '3d': False, 'type': "av"},
45: {'ext': ".webm", 'width': 1280, 'height': 720, 'qi': 7, '3d': False, 'type': "av"},
247: {'ext': ".webm", 'width': 1280, 'height': 720, 'qi': 7, '3d': False, 'type': "v"},
102: {'ext': ".webm", 'width': 1280, 'height': 720, 'qi': 8, '3d': True, 'type': "av"},
46: {'ext': ".webm", 'width': 1920, 'height': 1080, 'qi': 9, '3d': False, 'type': "av"},
248: {'ext': ".webm", 'width': 1920, 'height': 1080, 'qi': 9, '3d': False, 'type': "v"},
271: {'ext': ".webm", 'width': 2560, 'height': 1440, 'qi': 10, '3d': False, 'type': "v"},
313: {'ext': ".webm", 'width': 3840, 'height': 2160, 'qi': 11, '3d': False, 'type': "v"},
272: {'ext': ".webm", 'width': 7680, 'height': 4320, 'qi': 13, '3d': False, 'type': "v"},
# audio
139: {'ext': ".mp4", 'qi': 1, 'acodec': "aac", 'type': "a"},
140: {'ext': ".mp4", 'qi': 2, 'acodec': "aac", 'type': "a"},
141: {'ext': ".mp4", 'qi': 3, 'acodec': "aac", 'type': "a"},
256: {'ext': ".mp4", 'qi': 4, 'acodec': "aac", 'type': "a"},
258: {'ext': ".mp4", 'qi': 5, 'acodec': "aac", 'type': "a"},
325: {'ext': ".mp4", 'qi': 6, 'acodec': "dts", 'type': "a"},
328: {'ext': ".mp4", 'qi': 7, 'acodec': "ac3", 'type': "a"},
171: {'ext': ".webm", 'qi': 1, 'acodec': "vorbis", 'type': 'a'},
172: {'ext': ".webm", 'qi': 2, 'acodec': "vorbis", 'type': 'a'},
249: {'ext': ".webm", 'qi': 3, 'acodec': "opus", 'type': 'a'},
250: {'ext': ".webm", 'qi': 4, 'acodec': "opus", 'type': 'a'},
251: {'ext': ".webm", 'qi': 5, 'acodec': "opus", 'type': 'a'}
}
def _decrypt_signature(self, encrypted_sig):
"""Turn the encrypted 's' field into a working signature"""
# try:
# player_url = json.loads(re.search(r'"assets":.+?"js":\s*("[^"]+")',self.data).group(1))
# except (AttributeError, IndexError):
# self.fail(_("Player URL not found"))
player_url = self.player_config['assets']['js']
if player_url.startswith("//"):
player_url = 'https:' + player_url
if not player_url.endswith(".js"):
self.fail(_("Unsupported player type %s") % player_url)
cache_info = self.db.retrieve("cache")
cache_dirty = False
if cache_info is None or 'version' not in cache_info or cache_info[
'version'] != self.__version__:
cache_info = {'version': self.__version__,
'cache': {}}
cache_dirty = True
if player_url in cache_info['cache'] and time.time() < cache_info['cache'][player_url]['time'] + 24 * 60 * 60:
self.log_debug("Using cached decode function to decrypt the URL")
decrypt_func = lambda s: ''.join(s[_i] for _i in cache_info['cache'][player_url]['decrypt_map'])
decrypted_sig = decrypt_func(encrypted_sig)
else:
player_data = self.load(self.fixurl(player_url))
m = re.search(r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(', player_data) or \
re.search(r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', player_data)
try:
function_name = m.group('sig')
except (AttributeError, IndexError):
self.fail(_("Signature decode function name not found"))
try:
jsi = JSInterpreter(player_data)
decrypt_func = lambda s: jsi.extract_function(function_name)([s])
#: Since Youtube just scrambles the order of the characters in the signature
#: and does not change any byte value, we can store just a transformation map as a cached function
decrypt_map = [ord(c) for c in decrypt_func(''.join(map(unichr, range(len(encrypted_sig)))))]
cache_info['cache'][player_url] = {'decrypt_map': decrypt_map,
'time': time.time()}
cache_dirty = True
decrypted_sig = decrypt_func(encrypted_sig)
except (JSInterpreterError, AssertionError), e:
self.log_error(_("Signature decode failed"), e)
self.fail(e.message)
#: Remove old records from cache
for _k in list(cache_info['cache'].keys()):
if time.time() >= cache_info['cache'][_k]['time'] + 24 * 60 * 60:
cache_info['cache'].pop(_k, None)
cache_dirty = True
if cache_dirty:
self.db.store("cache", cache_info)
return decrypted_sig
def _handle_video(self):
use3d = self.config.get('3d')
if use3d:
quality = {'sd': 82, 'hd': 84, 'fullhd': 85, '240p': 83, '360p': 82, '480p': 82, '720p': 84,
'1080p': 85, '1440p': 85, '2160p': 85, '3072p': 85, '4320p': 85}
else:
quality = {'sd': 18, 'hd': 22, 'fullhd': 37, '240p': 5, '360p': 18, '480p': 35, '720p': 22,
'1080p': 37, '1440p': 264, '2160p': 266, '3072p': 38, '4320p': 272}
desired_fmt = self.config.get('vfmt') or quality.get(self.config.get('quality'), 0)
is_video = lambda x: 'v' in self.formats[x]['type']
if desired_fmt not in self.formats or not is_video(desired_fmt):
self.log_warning(_("VIDEO ITAG %d unknown, using default") % desired_fmt)
desired_fmt = 22
#: Build dictionary of supported itags (3D/2D)
allowed_suffix = lambda x: self.config.get(self.formats[x]['ext'])
video_streams = dict([(_s[0], _s[1:]) for _s in self.streams
if _s[0] in self.formats and allowed_suffix(_s[0]) and
is_video(_s[0]) and self.formats[_s[0]]['3d'] == use3d])
if not video_streams:
self.fail(_("No available video stream meets your preferences"))
self.log_debug("DESIRED VIDEO STREAM: ITAG:%d (%s %dx%d Q:%d 3D:%s) %sfound, %sallowed" %
(desired_fmt, self.formats[desired_fmt]['ext'], self.formats[desired_fmt]['width'],
self.formats[desired_fmt]['height'], self.formats[desired_fmt]['qi'],
self.formats[desired_fmt]['3d'], "" if desired_fmt in video_streams else "NOT ",
"" if allowed_suffix(desired_fmt) else "NOT "))
#: Return fmt nearest to quality index
if desired_fmt in video_streams and allowed_suffix(desired_fmt):
chosen_fmt = desired_fmt
else:
quality_index = lambda x: self.formats[x]['qi'] #: Select quality index
quality_distance = lambda x, y: abs(quality_index(x) - quality_index(y))
self.log_debug("Choosing nearest stream: %s" % [(_s, allowed_suffix(_s), quality_distance(_s, desired_fmt))
for _s in video_streams.keys()])
chosen_fmt = reduce(lambda x, y: x if quality_distance(x, desired_fmt) <= quality_distance(y, desired_fmt)
and quality_index(x) > quality_index(y) else y, video_streams.keys())
self.log_debug("CHOSEN VIDEO STREAM: ITAG:%d (%s %dx%d Q:%d 3D:%s)" %
(chosen_fmt, self.formats[chosen_fmt]['ext'], self.formats[chosen_fmt]['width'],
self.formats[chosen_fmt]['height'], self.formats[chosen_fmt]['qi'],
self.formats[chosen_fmt]['3d']))
url = video_streams[chosen_fmt][0]
if video_streams[chosen_fmt][1]:
if video_streams[chosen_fmt][2]:
signature = self._decrypt_signature(video_streams[chosen_fmt][1])
else:
signature = video_streams[chosen_fmt][1]
url += "&signature=" + signature
if "&ratebypass=" not in url:
url += "&ratebypass=yes"
file_suffix = self.formats[chosen_fmt]['ext'] if chosen_fmt in self.formats else ".flv"
if 'a' not in self.formats[chosen_fmt]['type']:
file_suffix = ".video" + file_suffix
self.pyfile.name = self.file_name + file_suffix
try:
filename = self.download(url, disposition=False)
except Skip, e:
filename = os.path.join(self.pyload.config.get("general", "download_folder"),
self.pyfile.package().folder,
self.pyfile.name)
self.log_info(_("Download skipped: %s due to %s") % (self.pyfile.name, e.message))
return filename, chosen_fmt
def _handle_audio(self, video_fmt):
desired_fmt = self.config.get('afmt') or 141
is_audio = lambda x: self.formats[x]['type'] == "a"
if desired_fmt not in self.formats or not is_audio(desired_fmt):
self.log_warning(_("AUDIO ITAG %d unknown, using default") % desired_fmt)
desired_fmt = 141
#: Build dictionary of supported audio itags
allowed_codec = lambda x: self.config.get(self.formats[x]['acodec'])
allowed_suffix = lambda x: self.config.get(".mkv") or \
self.config.get(self.formats[x]['ext']) and \
self.formats[x]['ext'] == self.formats[video_fmt]['ext']
audio_streams = dict([(_s[0], _s[1:]) for _s in self.streams
if _s[0] in self.formats and is_audio(_s[0]) and
allowed_codec(_s[0]) and allowed_suffix(_s[0])])
if not audio_streams:
self.fail(_("No available audio stream meets your preferences"))
if desired_fmt in audio_streams and allowed_suffix(desired_fmt):
chosen_fmt = desired_fmt
else:
quality_index = lambda x: self.formats[x]['qi'] #: Select quality index
quality_distance = lambda x, y: abs(quality_index(x) - quality_index(y))
self.log_debug("Choosing nearest stream: %s" % [(_s, allowed_suffix(_s), quality_distance(_s, desired_fmt))
for _s in audio_streams.keys()])
chosen_fmt = reduce(lambda x, y: x if quality_distance(x, desired_fmt) <= quality_distance(y, desired_fmt)
and quality_index(x) > quality_index(y) else y, audio_streams.keys())
self.log_debug("CHOSEN AUDIO STREAM: ITAG:%d (%s %s Q:%d)" %
(chosen_fmt, self.formats[chosen_fmt]['ext'], self.formats[chosen_fmt]['acodec'],
self.formats[chosen_fmt]['qi']))
url = audio_streams[chosen_fmt][0]
if audio_streams[chosen_fmt][1]:
if audio_streams[chosen_fmt][2]:
signature = self._decrypt_signature(audio_streams[chosen_fmt][1])
else:
signature = audio_streams[chosen_fmt][1]
url += "&signature=" + signature
if "&ratebypass=" not in url:
url += "&ratebypass=yes"
file_suffix = ".audio" + self.formats[chosen_fmt]['ext'] if chosen_fmt in self.formats else ".m4a"
self.pyfile.name = self.file_name + file_suffix
try:
filename = self.download(url, disposition=False)
except Skip, e:
filename = os.path.join(self.pyload.config.get("general", "download_folder"),
self.pyfile.package().folder,
self.pyfile.name)
self.log_info(_("Download skipped: %s due to %s") % (self.pyfile.name, e.message))
return filename, chosen_fmt
def _handle_subtitles(self):
def timedtext_to_srt(timedtext):
def _format_srt_time(millisec):
sec, milli = divmod(millisec, 1000)
m, s = divmod(int(sec), 60)
h, m = divmod(m, 60)
return "%02d:%02d:%02d,%s" % (h, m, s, milli)
i = 1
srt = ""
dom = parse_xml(timedtext)
body = dom.getElementsByTagName("body")[0]
paras = body.getElementsByTagName("p")
for para in paras:
srt += str(i) + "\n"
srt += _format_srt_time(int(para.attributes['t'].value)) + ' --> ' + \
_format_srt_time(int(para.attributes['t'].value) + int(para.attributes['d'].value)) + "\n"
for child in para.childNodes:
if child.nodeName == 'br':
srt += "\n"
elif child.nodeName == '#text':
srt += unicode(child.data)
srt += "\n\n"
i += 1
return srt
srt_files =[]
try:
subs = json.loads(self.player_config['args']['player_response'])['captions']['playerCaptionsTracklistRenderer']['captionTracks']
subtitles_urls = dict([(_subtitle['languageCode'],
urllib.unquote(_subtitle['baseUrl']).decode('unicode-escape') + "&fmt=3")
for _subtitle in subs])
self.log_debug("AVAILABLE SUBTITLES: %s" % subtitles_urls.keys() or "None")
except KeyError:
self.log_debug("AVAILABLE SUBTITLES: None")
return srt_files
subs_dl = self.config.get('subs_dl')
if subs_dl != "off":
subs_dl_langs = [_x.strip() for _x in self.config.get('subs_dl_langs', "").split(',') if _x.strip()]
if subs_dl_langs:
# Download only listed subtitles (`subs_dl_langs` config gives the priority)
for _lang in subs_dl_langs:
if _lang in subtitles_urls:
srt_filename = os.path.join(self.pyload.config.get("general", "download_folder"),
self.pyfile.package().folder,
os.path.splitext(self.file_name)[0] + "." + _lang + ".srt")
if self.pyload.config.get('download', 'skip_existing') and \
exists(srt_filename) and os.stat(srt_filename).st_size != 0:
self.log_info("Download skipped: %s due to File exists" % os.path.basename(srt_filename))
srt_files.append((srt_filename, _lang))
continue
timed_text = self.load(subtitles_urls[_lang], decode=False)
srt = timedtext_to_srt(timed_text)
with open(srt_filename, "w") as f:
f.write(srt.encode('utf-8'))
self.set_permissions(srt_filename)
self.log_debug("Saved subtitle: %s" % os.path.basename(srt_filename))
srt_files.append((srt_filename, _lang))
if subs_dl == "first_available":
break
else:
# Download any available subtitle
for _subtitle in subtitles_urls.items():
srt_filename = os.path.join(self.pyload.config.get("general", "download_folder"),
self.pyfile.package().folder,
os.path.splitext(self.file_name)[0] + "." + _subtitle[0] + ".srt")
if self.pyload.config.get('download', 'skip_existing') and \
exists(srt_filename) and os.stat(srt_filename).st_size != 0:
self.log_info("Download skipped: %s due to File exists" % os.path.basename(srt_filename))
srt_files.append((srt_filename, _subtitle[0]))
continue
timed_text = self.load(_subtitle[1], decode=False)
srt = timedtext_to_srt(timed_text)
with open(srt_filename, "w") as f:
f.write(srt.encode('utf-8'))
self.set_permissions(srt_filename)
self.log_debug("Saved subtitle: %s" % os.path.basename(srt_filename))
srt_files.append((srt_filename, _lang))
if subs_dl == "first_available":
break
return srt_files
def _postprocess(self, video_filename, audio_filename, subtitles_files):
final_filename = video_filename
subs_embed = self.config.get("subs_embed")
self.pyfile.setCustomStatus("postprocessing")
self.pyfile.setProgress(0)
if self.ffmpeg.found:
if audio_filename is not None:
video_suffix = os.path.splitext(video_filename)[1]
final_filename = os.path.join(os.path.dirname(video_filename),
self.file_name +
(video_suffix if video_suffix == os.path.splitext(audio_filename)[1]
else ".mkv"))
self.ffmpeg.add_stream(('v', video_filename))
self.ffmpeg.add_stream(('a', audio_filename))
if subtitles_files and subs_embed:
for subtitle in subtitles_files:
self.ffmpeg.add_stream(('s',) + subtitle)
self.ffmpeg.set_start_time(self.start_time)
self.ffmpeg.set_output_filename(final_filename)
self.pyfile.name = os.path.basename(final_filename)
self.pyfile.size = os.path.getsize(video_filename) + \
os.path.getsize(audio_filename) #: Just an estimate
if self.ffmpeg.run():
self.remove(video_filename, trash=False)
self.remove(audio_filename, trash=False)
if subtitles_files and subs_embed:
for subtitle in subtitles_files:
self.remove(subtitle[0])
else:
self.log_warning(_("ffmpeg error"), self.ffmpeg.error_message)
final_filename = video_filename
elif self.start_time[0] != 0 or self.start_time[1] != 0 or subtitles_files and subs_embed:
inputfile = video_filename + "_"
final_filename = video_filename
os.rename(video_filename, inputfile)
self.ffmpeg.add_stream(('v', video_filename))
self.ffmpeg.set_start_time(self.start_time)
if subtitles_files and subs_embed:
for subtitle in subtitles_files:
self.ffmpeg.add_stream(('s', subtitle))
self.pyfile.name = os.path.basename(final_filename)
self.pyfile.size = os.path.getsize(inputfile) #: Just an estimate
if self.ffmpeg.run():
self.remove(inputfile, trash=False)
if subtitles_files and subs_embed:
for subtitle in subtitles_files:
self.remove(subtitle[0])
else:
self.log_warning(_("ffmpeg error"), self.ffmpeg.error_message)
else:
if audio_filename is not None:
self.log_warning("ffmpeg is not installed, video and audio files will not be merged")
if subtitles_files and self.config.get("subs_embed"):
self.log_warning("ffmpeg is not installed, subtitles files will not be embedded")
self.pyfile.setProgress(100)
self.set_permissions(final_filename)
return final_filename
def setup(self):
self.resume_download = True
self.multiDL = True
try:
self.req.http.close()
except Exception:
pass
self.req.http = BIGHTTPRequest(
cookies=CookieJar(None),
options=self.pyload.requestFactory.getOptions(),
limit=2000000)
def process(self, pyfile):
pyfile.url = replace_patterns(pyfile.url, self.URL_REPLACEMENTS)
self.data = self.load(pyfile.url)
if re.search(r'<div id="player-unavailable" class="\s*player-width player-height\s*(?:player-unavailable\s*)?">',
self.data) or '"playabilityStatus":{"status":"ERROR"' in self.data:
self.offline()
if "We have been receiving a large volume of requests from your network." in self.data:
self.temp_offline()
m = re.search(r'ytplayer.config = ({.+?});', self.data)
if m is None:
self.fail(_("Player config pattern not found"))
self.player_config = json.loads(m.group(1))
self.ffmpeg = Ffmpeg(self.config.get('priority') ,self)
#: Set file name
self.file_name = self.player_config['args']['title']
#: Check for start time
self.start_time = (0, 0)
m = re.search(r't=(?:(\d+)m)?(\d+)s', pyfile.url)
if self.ffmpeg and m:
self.start_time = tuple(map(lambda _x: 0 if _x is None else int(_x), m.groups()))
self.file_name += " (starting at %sm%ss)" % (self.start_time[0], self.start_time[1])
#: Cleaning invalid characters from the file name
self.file_name = self.file_name.encode('ascii', 'replace')
for c in self.invalid_chars:
self.file_name = self.file_name.replace(c, '_')
#: Parse available streams
streams_keys = ['url_encoded_fmt_stream_map']
if 'adaptive_fmts' in self.player_config['args']:
streams_keys.append('adaptive_fmts')
self.streams = []
for streams_key in streams_keys:
streams = self.player_config['args'][streams_key]
streams = [_s.split('&') for _s in streams.split(',')]
streams = [dict((_x.split('=', 1)) for _x in _s) for _s in streams]
streams = [(int(_s['itag']),
urllib.unquote(_s['url']),
_s.get('s', _s.get('sig', None)),
True if 's' in _s else False)
for _s in streams]
self.streams += streams
self.log_debug("AVAILABLE STREAMS: %s" % [_s[0] for _s in self.streams])
video_filename, video_itag = self._handle_video()
has_audio = 'a' in self.formats[video_itag]['type']
if not has_audio:
audio_filename, audio_itag = self._handle_audio(video_itag)
else:
audio_filename = None
subtitles_files = self._handle_subtitles()
final_filename = self._postprocess(video_filename,
audio_filename,
subtitles_files)
#: Everything is finished and final name can be set
pyfile.name = os.path.basename(final_filename)
pyfile.size = os.path.getsize(final_filename)
self.last_download = final_filename
"""Credit to this awesome piece of code below goes to the 'youtube_dl' project, kudos!"""
class JSInterpreterError(Exception):
pass
class JSInterpreter(object):
def __init__(self, code, objects=None):
self._OPERATORS = [
('|', operator.or_),
('^', operator.xor),
('&', operator.and_),
('>>', operator.rshift),
('<<', operator.lshift),
('-', operator.sub),
('+', operator.add),
('%', operator.mod),
('/', operator.truediv),
('*', operator.mul),
]
self._ASSIGN_OPERATORS = [(op + '=', opfunc)
for op, opfunc in self._OPERATORS]
self._ASSIGN_OPERATORS.append(('=', lambda cur, right: right))
self._VARNAME_PATTERN = r'[a-zA-Z_$][a-zA-Z_$0-9]*'
if objects is None:
objects = {}
self.code = code
self._functions = {}
self._objects = objects
def interpret_statement(self, stmt, local_vars, allow_recursion=100):
if allow_recursion < 0:
raise JSInterpreterError('Recursion limit reached')
should_abort = False
stmt = stmt.lstrip()
stmt_m = re.match(r'var\s', stmt)
if stmt_m:
expr = stmt[len(stmt_m.group(0)):]
else:
return_m = re.match(r'return(?:\s+|$)', stmt)
if return_m:
expr = stmt[len(return_m.group(0)):]
should_abort = True
else:
# Try interpreting it as an expression
expr = stmt
v = self.interpret_expression(expr, local_vars, allow_recursion)
return v, should_abort
def interpret_expression(self, expr, local_vars, allow_recursion):
expr = expr.strip()
if expr == '': # Empty expression
return None
if expr.startswith('('):
parens_count = 0
for m in re.finditer(r'[()]', expr):
if m.group(0) == '(':
parens_count += 1
else:
parens_count -= 1
if parens_count == 0:
sub_expr = expr[1:m.start()]
sub_result = self.interpret_expression(sub_expr, local_vars, allow_recursion)
remaining_expr = expr[m.end():].strip()
if not remaining_expr:
return sub_result
else:
expr = json.dumps(sub_result) + remaining_expr
break
else:
raise JSInterpreterError('Premature end of parens in %r' % expr)
for op, opfunc in self._ASSIGN_OPERATORS:
m = re.match(r'(?x)(?P<out>%s)(?:\[(?P<index>[^\]]+?)\])?\s*%s(?P<expr>.*)$' %
(self._VARNAME_PATTERN, re.escape(op)), expr)
if m is None:
continue
right_val = self.interpret_expression(m.group('expr'), local_vars, allow_recursion - 1)
if m.groupdict().get('index'):
lvar = local_vars[m.group('out')]
idx = self.interpret_expression(m.group('index'), local_vars, allow_recursion)
assert isinstance(idx, int)
cur = lvar[idx]
val = opfunc(cur, right_val)
lvar[idx] = val
return val
else:
cur = local_vars.get(m.group('out'))
val = opfunc(cur, right_val)
local_vars[m.group('out')] = val
return val
if expr.isdigit():
return int(expr)
var_m = re.match(r'(?!if|return|true|false)(?P<name>%s)$' % self._VARNAME_PATTERN, expr)
if var_m:
return local_vars[var_m.group('name')]
try:
return json.loads(expr)
except ValueError:
pass
m = re.match(r'(?P<var>%s)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$' % self._VARNAME_PATTERN, expr)
if m is not None:
variable = m.group('var')
member = m.group('member')
arg_str = m.group('args')
if variable in local_vars:
obj = local_vars[variable]
else:
if variable not in self._objects:
self._objects[variable] = self.extract_object(variable)
obj = self._objects[variable]
if arg_str is None:
# Member access
if member == 'length':
return len(obj)
return obj[member]
assert expr.endswith(')')
# Function call
if arg_str == '':
argvals = tuple()
else:
argvals = tuple(self.interpret_expression(v, local_vars, allow_recursion) for v in arg_str.split(','))
if member == 'split':
assert argvals == ('',)
return list(obj)
if member == 'join':
assert len(argvals) == 1
return argvals[0].join(obj)
if member == 'reverse':
assert len(argvals) == 0
obj.reverse()
return obj
if member == 'slice':
assert len(argvals) == 1
return obj[argvals[0]:]
if member == 'splice':
assert isinstance(obj, list)
index, howMany = argvals
res = []
for i in range(index, min(index + howMany, len(obj))):
res.append(obj.pop(index))
return res
return obj[member](argvals)
m = re.match(r'(?P<in>%s)\[(?P<idx>.+)\]$' % self._VARNAME_PATTERN, expr)
if m is not None:
val = local_vars[m.group('in')]
idx = self.interpret_expression(m.group('idx'), local_vars, allow_recursion - 1)
return val[idx]
for op, opfunc in self._OPERATORS:
m = re.match(r'(?P<x>.+?)%s(?P<y>.+)' % re.escape(op), expr)
if m is None:
continue
x, abort = self.interpret_statement(m.group('x'), local_vars, allow_recursion - 1)
if abort:
raise JSInterpreterError('Premature left-side return of %s in %r' % (op, expr))
y, abort = self.interpret_statement(m.group('y'), local_vars, allow_recursion - 1)
if abort:
raise JSInterpreterError('Premature right-side return of %s in %r' % (op, expr))
return opfunc(x, y)
m = re.match(r'^(?P<func>%s)\((?P<args>[a-zA-Z0-9_$,]+)\)$' % self._VARNAME_PATTERN, expr)
if m is not None:
fname = m.group('func')
argvals = tuple(int(v) if v.isdigit() else local_vars[v]
for v in m.group('args').split(','))
if fname not in self._functions:
self._functions[fname] = self.extract_function(fname)
return self._functions[fname](argvals)
raise JSInterpreterError('Unsupported JS expression %r' % expr)
def extract_object(self, objname):
obj = {}
obj_m = re.search(r'(?:var\s+)?%s\s*=\s*\{\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\}(?:,\s*)?)*)\}\s*;'
% re.escape(objname), self.code)
fields = obj_m.group('fields')
# Currently, it only supports function definitions
fields_m = re.finditer(r'(?P<key>[a-zA-Z$0-9]+)\s*:\s*function\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}', fields)
for f in fields_m:
argnames = f.group('args').split(',')
obj[f.group('key')] = self.build_function(argnames, f.group('code'))
return obj
def extract_function(self, function_name):
func_m = re.search(r'(?x)(?:function\s+%s|[{;,]\s*%s\s*=\s*function|var\s+%s\s*=\s*function)\s*\((?P<args>[^)]*)\)\s*\{(?P<code>[^}]+)\}'
% (re.escape(function_name), re.escape(function_name), re.escape(function_name)), self.code)
if func_m is None:
raise JSInterpreterError('Could not find JS function %r' % function_name)
argnames = func_m.group('args').split(',')
return self.build_function(argnames, func_m.group('code'))
def call_function(self, function_name, *args):
f = self.extract_function(function_name)
return f(args)
def build_function(self, argnames, code):
def resf(argvals):
local_vars = dict(zip(argnames, argvals))
for stmt in code.split(';'):
res, abort = self.interpret_statement(stmt, local_vars)
if abort:
break
return res
return resf
| synopat/pyload | module/plugins/hoster/YoutubeCom.py | Python | gpl-3.0 | 42,175 |
# -*- encoding: utf-8 -*-
# pilas engine: un motor para hacer videojuegos
#
# Copyright 2010-2014 - Hugo Ruscitti
# License: LGPLv3 (see http://www.gnu.org/licenses/lgpl.html)
#
# Website - http://www.pilas-engine.com.ar
from pilasengine import colores
from pilasengine.fondos.fondo import Fondo
class Fondos(object):
"""Representa la propiedad pilas.fondos
Este objeto se encarga de hacer accesible
la creación de fondos para las escenas.
"""
def __init__(self, pilas):
self.pilas = pilas
def Plano(self):
import plano
nuevo_fondo = plano.Plano(self.pilas)
# Importante: cuando se inicializa el actor, el método __init__
# realiza una llamada a pilas.actores.agregar_actor
# para vincular el actor a la escena.
return nuevo_fondo
def Galaxia(self, dx=0, dy=-1):
import galaxia
nuevo_fondo = galaxia.Galaxia(self.pilas)
nuevo_fondo.dx = dx
nuevo_fondo.dy = dy
return nuevo_fondo
def Tarde(self):
import tarde
return tarde.Tarde(self.pilas)
def Selva(self):
import selva
return selva.Selva(self.pilas)
def Noche(self):
import noche
return noche.Noche(self.pilas)
def Espacio(self):
import espacio
return espacio.Espacio(self.pilas)
def Nubes(self):
import nubes
return nubes.Nubes(self.pilas)
def Pasto(self):
import pasto
return pasto.Pasto(self.pilas)
def Volley(self):
import volley
return volley.Volley(self.pilas)
def Color(self, _color=colores.blanco):
import color
return color.Color(self.pilas, _color)
def Blanco(self):
import blanco
return blanco.Blanco(self.pilas)
def Fondo(self, imagen=None):
import fondo
return fondo.Fondo(self.pilas, imagen)
def FondoMozaico(self, imagen=None):
import fondo_mozaico
return fondo_mozaico.FondoMozaico(self.pilas, imagen)
def Cesped(self):
import cesped
return cesped.Cesped(self.pilas)
def DesplazamientoHorizontal(self):
import desplazamiento_horizontal
return desplazamiento_horizontal.DesplazamientoHorizontal(self.pilas)
| hgdeoro/pilas | pilasengine/fondos/__init__.py | Python | lgpl-3.0 | 2,302 |
from django.conf import settings
from site_news.models import SiteNewsItem
def site_news(request):
"""
Inserts the currently active news items into the template context.
This ignores MAX_SITE_NEWS_ITEMS.
"""
# Grab all active items in proper date/time range.
items = SiteNewsItem.current_and_active.all()
return {'site_news_items': items}
| glesica/django-site-news | site_news/context_processors.py | Python | bsd-3-clause | 379 |
from tests.helpers import create_ctfd, register_user, login_as_user
from CTFd.models import Teams
def test_admin_panel():
"""Does the admin panel return a 200 by default"""
app = create_ctfd()
with app.app_context():
client = login_as_user(app, name="admin", password="password")
r = client.get('/admin')
assert r.status_code == 302
r = client.get('/admin/graphs')
assert r.status_code == 200
def test_admin_pages():
"""Does admin pages return a 200 by default"""
app = create_ctfd()
with app.app_context():
client = login_as_user(app, name="admin", password="password")
r = client.get('/admin/pages')
assert r.status_code == 200
def test_admin_teams():
"""Does admin teams return a 200 by default"""
app = create_ctfd()
with app.app_context():
client = login_as_user(app, name="admin", password="password")
r = client.get('/admin/teams')
assert r.status_code == 200
def test_admin_scoreboard():
"""Does admin scoreboard return a 200 by default"""
app = create_ctfd()
with app.app_context():
client = login_as_user(app, name="admin", password="password")
r = client.get('/admin/scoreboard')
assert r.status_code == 200
def test_admin_containers():
"""Does admin containers return a 200 by default"""
app = create_ctfd()
with app.app_context():
client = login_as_user(app, name="admin", password="password")
r = client.get('/admin/containers')
assert r.status_code == 200
def test_admin_chals():
"""Does admin chals return a 200 by default"""
app = create_ctfd()
with app.app_context():
client = login_as_user(app, name="admin", password="password")
r = client.get('/admin/chals')
assert r.status_code == 200
def test_admin_statistics():
"""Does admin statistics return a 200 by default"""
app = create_ctfd()
with app.app_context():
client = login_as_user(app, name="admin", password="password")
r = client.get('/admin/statistics')
assert r.status_code == 200
def test_admin_config():
"""Does admin config return a 200 by default"""
app = create_ctfd()
with app.app_context():
client = login_as_user(app, name="admin", password="password")
r = client.get('/admin/config')
assert r.status_code == 200
| liam-middlebrook/CTFd | tests/test_admin_facing.py | Python | apache-2.0 | 2,420 |
from .log_loss import *
from .log_loss_weighted import *
try:
from .fast_log_loss import *
except ImportError:
print("warning: could not import fast log loss")
print("warning: returning handle to standard loss functions")
# todo replace with warning object
import log_loss as fast_log_loss
try:
from .lookup_log_loss import *
except ImportError:
print("warning: could not import lookup log loss")
print("warning: returning handle to standard loss functions")
# todo replace with warning object
import log_loss as lookup_log_loss
| ustunb/risk-slim | riskslim/loss_functions/__init__.py | Python | bsd-3-clause | 572 |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Compute Engine authentication."""
from google.auth.compute_engine.credentials import Credentials
__all__ = [
'Credentials'
]
| catapult-project/catapult | third_party/gae_ts_mon/gae_ts_mon/protobuf/google/auth/compute_engine/__init__.py | Python | bsd-3-clause | 719 |
'''
Accepts csv number, and fractions as arguments and writes random subsamples out.
'''
import argparse
import json
import random
import pandas as pd
def random_df_sample(df, fraction):
indicies = list(df.index)
sample = random.sample(indicies, int(fraction * len(indicies)))
return df.loc[sample]
def main():
#################################################################
# setup parser for accepting arguments from the bash shell
parser = argparse.ArgumentParser(
description='Subsampler')
parser.add_argument('-i', '--input',
help='Input file name as .csv. Compounds in columns,'
'mutants in rows.')
parser.add_argument('-o', '--output',
help='Output file name as .csv')
parser.add_argument('-f', '--fraction',
help='Fraction of subsample.', default=.2, type=float)
parser.add_argument('-n', '--file_number',
help='Number of files to output.',
default=5, type=int)
args = vars(parser.parse_args())
#################################################################
df = pd.read_csv(args["input"], index_col=0)
sub_dfs = [random_df_sample(df, args["fraction"]) for _ in range(args["file_number"])]
for i, sub_df in enumerate(sub_dfs):
sub_df.to_csv(args["output"][:-4] + str(i+1) + '.csv')
return 1
if __name__ == '__main__':
exit(main())
| c-bun/CrossCompare | subsample.py | Python | apache-2.0 | 1,477 |
'''
Author: Alex Walter
Date: 5/13/2013
For computing and comparing QE measurements
(Originally for comparing QE measurements with different polariztions of light.
If you just want to look at a single QE measurement choose a dummy polariztion angle = 0)
Usage:
$ python QECalibration.py
Then click the buttons
Advanced Usage:
Save the QEData.npy as something specific. ie QEData_*date*.npy
Copy the initializeWithAug22Data() function but fill in the relevant info
Call your new initialze function in the GUI __init__().
The way it works:
1. All the photon data is taken in a single long exposure.
We must first determine which section of the exposure corresponds to which wavelength of light.
This is saved as *QEfile*_param.txt
2. Calculate the QE for each pixel. This is saved as "QEData.npy" (which can be reloaded if you restart the GUI)
3. Some pixels may be bad and skew the average, we'll flag them and ignore them.
Then we plot the average, median, mode QE across the array
4. We can compare one QE measurement to another.
For example, with different optimal filters loaded, different polarizations of light, different cool downs, etc..
Important Files:
1. obsFile.h5 - This contains the raw photon data from the QE measurement
2. QEfile.txt - This contains the output from the QE labview code with the calibrated photodiode intensity data
3. QEfile_param.txt - This file is created with this GUI. It contains the timestamps during the obsFile.h5 during which each wavelength was shown on the MKID array
4. QEData.npy - This is a temporary file created by this GUI containing the calculated pixel QE data. I save this because it can take a while to do the calculation and I don't want to redo it evertime I open reopen the GUI
'''
import sys, os, time, struct, math
from os.path import isfile
from numpy import *
from tables import *
from PyQt4 import QtCore
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from mpl_pyqt4_widget import MPL_Widget
from matplotlib import pyplot as plt
import QEfile
from QEfile import *
class QECalibration(QMainWindow):
def __init__(self, parent=None):
QMainWindow.__init__(self, parent)
#print '1'
self.good_pixel = []
self.goodPixelQELimit = 0.1
self.selectedPixel = None
self.obsFileList = []
self.numObservations = 0
self.bmapList = [] #different observations have different unix times
self.QEFileList = []
self.currentObs=-1
self.setWindowTitle('QE Calibration')
self.imagex = 308
self.imagey = 322
self.obsTime = 2700
self.create_main_frame()
self.position = self.tv_image.pos()
#self.addtomainframe()
self.create_status_bar()
#use mouse to select pixel from tv_image, also triggers a new spectrum to be displayed
self.tv_image.mousePressEvent = self.start_pixel_select
#self.beammapfile = os.environ['BEAMMAP_PATH']#"beamimage.h5" #Palomar 2012 device
#self.beammapfile='sorted_beamimage46x44.h5'
#load beam map from default beammap directory
#self.loadbeammap()
#self.initializeWithNov3Data()
#self.initializeWithMay12Data()
#self.initializeWithNov14Data()
#self.initializeWithNov17Data()
#self.initializeWithNov23Data()
#self.initializeWithNov25Data()
#self.initializeWithAug22Data()
def initializeWithAug22Data(self):
#Sci 6
obsfiles = ['obs_20140822-184134.h5']
QEfiles = ['QE_20140822-184134.txt']
path = '/Scratch/QEData/20140822/'
os.system("cp QEData_obs_20140822-184134.npy QEData.npy")
angleList = [0]
for i in range(len(obsfiles)):
self.addMeasurementFile()
#print str(path + obsfiles[i])
self.initializeObsFile(str(path + obsfiles[i]))
self.initializeQEFile(str(path + QEfiles[i]),int(angleList[i]))
self.doneCal()
def initializeWithNov25Data(self):
#Sci 5 - optimal filters
obsfiles = ['obs_20131125-202654.h5']
QEfiles = ['QE_20131125-202654.txt']
# path = '/home/abwalter/ARCONS-pipeline/QECalibration/'
path = '/Scratch/QEData/20131125/'
os.system("cp QEData_obs_20131125-202654.npy QEData.npy")
angleList = [0]
for i in range(len(obsfiles)):
self.addMeasurementFile()
#print str(path + obsfiles[i])
self.initializeObsFile(str(path + obsfiles[i]))
self.initializeQEFile(str(path + QEfiles[i]),int(angleList[i]))
self.doneCal()
def initializeWithNov23Data(self):
#Sci 5
obsfiles = ['obs_20131124-022103.h5']
QEfiles = ['QE_20131124-022103.txt']
# path = '/home/abwalter/ARCONS-pipeline/QECalibration/'
path = '/Scratch/QEData/20131123/'
os.system("cp QEData_obs_20131124-022103.npy QEData.npy")
angleList = [0]
for i in range(len(obsfiles)):
self.addMeasurementFile()
#print str(path + obsfiles[i])
self.initializeObsFile(str(path + obsfiles[i]))
self.initializeQEFile(str(path + QEfiles[i]),int(angleList[i]))
self.doneCal()
def initializeWithNov17Data(self):
#Sci 4
obsfiles = ['obs_20131118-025832.h5']
QEfiles = ['obs_20131118-025832.txt']
# path = '/home/abwalter/ARCONS-pipeline/QECalibration/'
path = '/Scratch/QEData/20131117/'
os.system("cp QEData_obs_20131118-025832.npy QEData.npy")
angleList = [0]
for i in range(len(obsfiles)):
self.addMeasurementFile()
#print str(path + obsfiles[i])
self.initializeObsFile(str(path + obsfiles[i]))
self.initializeQEFile(str(path + QEfiles[i]),int(angleList[i]))
self.doneCal()
def initializeWithNov14Data(self):
#Sci 5 (bad coax on feedline 2)
obsfiles = ['obs_20131115-023325.h5']
QEfiles = ['obs_20131115-023325.txt']
# path = '/home/abwalter/ARCONS-pipeline/QECalibration/'
path = '/Scratch/QEData/20131114/'
os.system("cp QEData_obs_20131115-023325.npy QEData.npy")
angleList = [0]
for i in range(len(obsfiles)):
self.addMeasurementFile()
#print str(path + obsfiles[i])
self.initializeObsFile(str(path + obsfiles[i]))
self.initializeQEFile(str(path + QEfiles[i]),int(angleList[i]))
self.doneCal()
def initializeWithNov3Data(self):
obsfiles = ['obs_20131104-030007.h5']
QEfiles = ['QE_20131103.txt']
# path = '/home/abwalter/ARCONS-pipeline/QECalibration/'
path = '/Scratch/QEData/20131103/'
angleList = [0]
for i in range(len(obsfiles)):
self.addMeasurementFile()
#print str(path + obsfiles[i])
self.initializeObsFile(str(path + obsfiles[i]))
self.initializeQEFile(str(path + QEfiles[i]),int(angleList[i]))
self.doneCal()
def initializeWithMay12Data(self):
obsfiles = ['obs_20130512-215105.h5', 'obs_20130512-224055.h5','obs_20130512-232518.h5','obs_20130513-000311.h5','obs_20130513-013920.h5', 'obs_20130513-021837.h5','obs_20130513-025834.h5','obs_20130513-033611.h5','obs_20130513-041526.h5']
QEfiles = ['20130512POL1.txt', '20130512POL2.txt', '20130512POL3.txt', '20130512POL4.txt', '20130512POL5.txt', '20130512POL6.txt', '20130512POL7.txt', '20130512POL8.txt', '20130512POL9.txt']
# path = '/home/abwalter/ARCONS-pipeline/QECalibration/'
path = '/Scratch/PolarizationData/'
angleList = [90, 90, 45, 45, 0, 0, -45, -45, -90]
for i in range(len(obsfiles)):
self.addMeasurementFile()
#print str(path + obsfiles[i])
self.initializeObsFile(str(path + obsfiles[i]))
self.initializeQEFile(str(path + QEfiles[i]),int(angleList[i]))
self.doneCal()
def toggleGoodPixel(self):
if (self.selectedPixel != None) & (self.currentObs != -1):
x = self.selectedPixel%(self.nxpix)
y = self.selectedPixel/self.nxpix
try:
self.good_pixel.remove(self.selectedPixel)
print 'Removed '+str(self.bmapList[self.currentObs][y][x])+' from list'
self.scene.addRect(self.scalex*(x),self.scaley*((self.nypix -1)-y),(self.scalex),(self.scaley), Qt.blue)
except ValueError:
self.good_pixel.append(self.selectedPixel)
print 'Added '+str(self.bmapList[self.currentObs][y][x])+' to list!'
self.scene.addRect(self.scalex*(x),self.scaley*((self.nypix -1)-y),(self.scalex),(self.scaley),Qt.darkCyan)
def getAvgQE(self):
# get QE of all pixels at all wavelength for current obs
# set bad wavelengths to -1
# plot median
self.getQEData()
medianArr=[]
avgArr=[]
modeArr=[]
wavelengthArr = (self.QEFileList[0]).data[:,0]
r=0.2
numBins=50
for i in range(len(wavelengthArr)): #number of wavelengths
data=np.copy(self.QEData[i,self.currentObs]) #44x46 array of QE's for pixels at wavelength i for current obs
#remove roach 4,5,6,7
#for k in range(self.nxpix):
# for j in range(self.nypix):
# if self.bmap[j,k][2:3]=='4' or self.bmap[j,k][2:3]=='5' or self.bmap[j,k][2:3]=='6' or self.bmap[j,k][2:3]=='7':
# data[k,j]=-1
#ignore pixels with <0.1% QE
# if data[k,j]<0.001:
# data[k,j]=-1
hist=np.histogram(data,bins = numBins,range = (0.0, r),density=False)
hist[0][0]=0
modeArr.append((np.argmax(hist[0])+0.5)*r/numBins)
data=np.ma.array(data,mask=data<0.001)
medianQE=np.ma.median(data)
medianArr.append(medianQE)
averageQE=np.ma.average(data)
avgArr.append(averageQE)
plt.plot(wavelengthArr,modeArr,label='mode')
plt.plot(wavelengthArr,medianArr,label='median')
plt.plot(wavelengthArr,avgArr,label='avg')
plt.legend()
plt.xlabel('Wavelength (nm)')
plt.ylabel('QE')
plt.show()
#np.savetxt('avgQE_20131125-202654.txt',np.transpose([wavelengthArr,medianArr]),fmt='%i\t%.4f')
def compareQE(self):
# get QE of all pixels at all wavelengths with current obs
# set bad wavelengths to -1
# compare other observations to this one
# make average of each wavelength over different pixels in different obs files compared to their pixel in current obs
# plot for different wavelengths
# average wavelengths
self.getQEData()
numRows = len(self.QEData) #number of wavelengths
numCol = len(self.QEData[0]) #number of observation files
plt.figure()
# QEFileObject = self.QEFileList[self.currentObs]
medianQEArr = np.zeros((numRows,numCol))
angleArr = []
wavelengthArr = (self.QEFileList[0]).data[:,0]
for QEFileObject in self.QEFileList:
angleArr.append(QEFileObject.angle)
print "Making histogram plots..."
for i in range(numRows):
for j in range(numCol):
# plt.subplot(numRows,numCol,i*numCol+j+1)
plt.subplot(1,numCol,0*numCol+j+1)
# x=self.QEData[i,self.currentObs]
# y=self.QEData[i,j]
divided = np.select([((self.QEData[i,self.currentObs]>0) & (self.QEData[i,j]>0))],[1.0*self.QEData[i,j]/self.QEData[i,self.currentObs]],-1)
weights = np.select([(divided>0)],[1],0)
if np.sum(weights) != 0:
averageQE = np.average(divided,None,weights)
medianQE = np.median(np.sort(divided.flatten())[np.where(np.sort(divided.flatten())>0)[0][0]:])
else:
averageQE = -1
medianQE = -1
#print '(obs/currentObs,wavelength): (' +str(j)+','+str(i)+') average relative QE: '+str(averageQE)
#print '(obs/currentObs,wavelength): (' +str(j)+','+str(i)+') median relative QE: '+str(medianQE)
medianQEArr[i,j] = medianQE
plt.hist(divided.flatten(), bins = 100,range = (0.0, 2.0))
print 'wavelength: ' + str(wavelengthArr[i])
plt.show()
# plt.show()
print "Making median plots..."
for i in range(len(wavelengthArr)):
# for i in range(len(wavelengthArr[0:13])):
# for i in [6]:
plt.plot(angleArr, medianQEArr[i,:],marker='o',label=str(wavelengthArr[i]))
plt.legend()
plt.show()
# plt.legend()
# plt.show()
# wavelength = 700
# print "wv: " +str(wavelength)
# x = self.QEData[0,0]
# print x
# print x.shape
# y = self.QEData[0,1]
# print y
# print y.shape
# divided = np.select([((x>0) & (y>0))],[1.0*y/x],-1)
# weights = np.select([(divided>0)],[1],0)
# averageQE = np.average(divided,None,weights)
# print averageQE
#average(self.QEData[wavelength,obs 1]/self.QEData[wavelength, obs 2])
# plt.hist(divided.flatten(), range = (0.0, 2.0), bins = 100)
# plt.show()
def histogramQE(self):
# get QE of all pixels at all wavelengths with current obs
# set bad wavelengths to -1
# histogram QE's for specified obs and wavelength
self.getQEData()
#numRows = len(self.QEData) #number of wavelengths
#numCol = len(self.QEData[0]) #number of observation files
plt.figure()
wavelengthArr = (self.QEFileList[0]).data[:,0]
print wavelengthArr
print 'Making histogram plots...'
#plt.subplot(numRows,1,1)
for i in range(len(wavelengthArr)):
plt.subplot(np.ceil(np.sqrt(len(wavelengthArr))),np.ceil(np.sqrt(len(wavelengthArr))),i+1)
#plt.hist(self.QEData[i,self.currentObs].flatten(),range=(0.0,0.2),bins=50)
data=np.copy(self.QEData[i,self.currentObs])
#remove roach 4,5,6,7
#for k in range(self.nxpix):
# for j in range(self.nypix):
# if self.bmap[j,k][2:3]=='4' or self.bmap[j,k][2:3]=='5' or self.bmap[j,k][2:3]=='6' or self.bmap[j,k][2:3]=='7':
# data[k,j]=-1
#ignore pixels with 0 QE
# if data[k,j]<0.0001:
# data[k,j]=-1
numBad=np.sum(data<=0)
print 'Wavelength: '+str(wavelengthArr[i])+' numBad: '+str(numBad)
plt.hist(data.flatten(),range=(0.0,0.2),bins=50)
ax=plt.gca()
#ax.legend()
ax.set_title(str(wavelengthArr[i])+' nm')
ax.set_ylim((0.,(self.nxpix*self.nypix - numBad)/5.))
plt.show()
def getQEData(self):
if isfile('QEData.npy'):
self.QEData = np.load('QEData.npy')
print 'Loaded QEData from: QEData.npy'
print "`-->Misses any recently added data!"
else:
if (self.currentObs>=0):
try:
self.obsFileList[self.currentObs]
self.QEFileList[self.currentObs]
except IndexError:
print 'Missing observation or QE file'
dataQE = []
QEFileObject = self.QEFileList[self.currentObs]
print "Populating QE Data"
for wavelength in QEFileObject.data[:,0]:
# for wavelength in [700]:
waveArr = []
print " making waveArr: " + str(wavelength)
for obsNum in range(len(self.obsFileList)):
# for obsNum in [0,2]:
pixelArr = np.zeros((self.nxpix, self.nypix))
print " obs: " + str(obsNum)
for x in range(self.nxpix):
for y in range(self.nypix):
pixelArr[x,y] = self.isGoodQE(obsNum, self.nxpix*y+x, wavelength)
waveArr.append(pixelArr)
dataQE.append(np.asarray(waveArr))
print "DONE!"
self.QEData = np.asarray(dataQE)
np.save('QEData.npy',self.QEData)
print " "
def isGoodQE(self, obsNum, pixNum, wavelength,deadTime=100):
#deadTime in microseconds for linearity correction. deadTime=0 applies no correction
if (self.currentObs>=0):
try:
self.obsFileList[self.currentObs]
self.QEFileList[self.currentObs]
except IndexError:
print 'Missing observation or QE file'
currentPix = self.selectedPixel
currentObsNum = self.currentObs
self.currentObs = obsNum
self.selectedPixel = pixNum
#self.goodPixelQELimit = 0.1
QEFileObject = self.QEFileList[self.currentObs]
ind = where(QEFileObject.data[:,0] == wavelength)[0][0]
waveArr = QEFileObject.data[ind]
# print 'Obs: '+str(obsNum)+ ' pix: (' + str(self.selectedPixel/self.nxpix)+','+str(self.selectedPixel%self.nxpix)+')'+' wavelength: ' +str(wavelength)
backGroundRate1, sb1 = self.getAverageTimeStream(self.currentObs,waveArr[-6],waveArr[-5])
# print 'backGroundRate1: ' +str(backGroundRate1)
backGroundRate2, sb2 = self.getAverageTimeStream(self.currentObs,waveArr[-2],waveArr[-1])
# print 'backGroundRate2: ' +str(backGroundRate2)
countRate, s = self.getAverageTimeStream(self.currentObs,waveArr[-4],waveArr[-3])
# print 'countRate: ' +str(countRate)
if deadTime>0:
backGroundRate1=backGroundRate1/(1.0-backGroundRate1*deadTime*10.0**-6.0)
backGroundRate2=backGroundRate2/(1.0-backGroundRate2*deadTime*10.0**-6.0)
countRate=countRate/(1.0-countRate*deadTime*10.0**-6.0)
backGroundRate = (backGroundRate1+backGroundRate2)/2.0
countRate-=backGroundRate
QE=QEFileObject.findQE(countRate,waveArr[0])
#if background doesn't change significantly before and after
#if (QE>0.01) & (abs(backGroundRate2 - backGroundRate1)<3*math.sqrt(backGroundRate)) & (countRate>3*math.sqrt(backGroundRate)):
#if (abs(backGroundRate2 - backGroundRate1)<3*math.sqrt(backGroundRate)) & (countRate>1.0/2.0*math.sqrt(backGroundRate)):
err=math.sqrt(backGroundRate)
if backGroundRate<1:
err=math.sqrt(5)
if (abs(backGroundRate2 - backGroundRate1)<3*err):
# print 'Obs: '+str(obsNum)+ ' pix: (' + str(self.selectedPixel/self.nxpix)+','+str(self.selectedPixel%self.nxpix)+')'+' wavelength: ' +str(wavelength)
# print 'backGroundRate1: ' +str(backGroundRate1)
# print 'backGroundRate2: ' +str(backGroundRate2)
# print 'countRate: ' +str(countRate)
self.selectedPixel = currentPix
self.currentObs = currentObsNum
return QE
else:
self.selectedPixel = currentPix
self.currentObs = currentObsNum
return -0.01
# if QE > self.goodPixelQELimit:
# return QE
# else:
# return -1
def findGoodPixel(self):
if (self.currentObs>=0):
try:
self.obsFileList[self.currentObs]
self.QEFileList[self.currentObs]
except IndexError:
print 'Missing observation file'
currentPix = self.selectedPixel
goodPixels = []
#self.goodPixelQELimit = 0.1
QEFileObject = self.QEFileList[self.currentObs]
wavelength = float(self.wv_combobox.currentText())
print "Looking for good pixels in obs " +str(self.currentObs) +" at wavelength: " + str(wavelength)
ind = where(QEFileObject.data[:,0] == wavelength)[0][0]
waveArr = QEFileObject.data[ind] #timing data for wavelength selected and current obs
for x in range(self.nxpix):
for y in range(self.nypix):
if self.isGoodQE(self.currentObs, self.nxpix*y+x, wavelength)>self.goodPixelQELimit:
goodPixels.append(self.nxpix*y+x)
# self.selectedPixel = self.nxpix*x+y
# backGroundRate1 = self.getAverageTimeStream(self.currentObs,waveArr[-6],waveArr[-5])
# backGroundRate2 = self.getAverageTimeStream(self.currentObs,waveArr[-2],waveArr[-1])
# backGroundRate = (backGroundRate1+backGroundRate2)/2.0
# countRate = self.getAverageTimeStream(self.currentObs,waveArr[-4],waveArr[-3])
# countRate-=backGroundRate
# QE=QEFileObject.findQE(countRate,waveArr[0])
# if QE > self.goodPixelQELimit:
# goodPixels.append(self.selectedPixel)
self.selectedPixel = currentPix
self.good_pixel = goodPixels
self.display_image()
print "Found: " +str(self.pix_toTuple(self.good_pixel))
def pix_toTuple(self, pixelArr):
newArr = []
for pix in pixelArr:
newArr.append([pix%(self.nxpix),pix/self.nxpix])
return newArr
def start_pixel_select(self,event):
#Mouse press returns x,y position of first pixel to be used in spectra
startrawx,startrawy = event.pos().x(), event.pos().y()
if hasattr(self,'scalex'):
startpx = int(startrawx/self.scalex)
startpy = int((self.nypix) - startrawy/self.scaley)
startpix = self.nxpix*startpy+startpx
print 'Pixel: ('+ str(startpx) +', '+ str(startpy)+')'
self.pixel_label.setText('Pixel: ('+ str(startpx) +', '+ str(startpy)+')')
self.selectedPixel=startpix
self.display_image()
self.plot_timestream()
self.plot_QE()
def getAverageTimeStream(self, obsNum, tstart, tend):
x=self.selectedPixel%(self.nxpix)
y=self.selectedPixel/self.nxpix
#QEFileObject = self.QEFileList[obsNum]
bmap = self.bmapList[obsNum]
ti=int(tstart)
tf=int(tend)
# print '(t1,t2): ('+str(ti)+','+str(tf)+')'
counts = zeros(tf-ti)
h5file = openFile(str(self.obsFileList[obsNum]), 'r')
for t in range(tf-ti):
try:
if bmap[y][x] == '':
continue
try:
counts[t] += len(h5file.root._f_getChild(bmap[y][x])[ti+t])
if counts[t]<0:
counts[t]=0
except NoSuchNodeError:
counts[t]=0
except IndexError:
print '(x,y): ' + '(', str(x)+','+str(y)+')'
print 'pixel: ' + str(self.selectedPixel)
print 'nxpix: ' + str(self.nxpix)
print 'nypix: ' +str(self.nypix)
print 'len bmap: ' + str(len(bmap))
print 'len bmap[0]: ' + str(len(bmap[0]))
pass
h5file.close()
# if ti>1400:
# print 'counts: ' +str(counts)
#return np.mean(counts)
# print 'std: ' + str(np.std(counts, ddof = 1))
return np.median(counts), np.std(counts, ddof = 1)
def plot_timestream(self):
print 'current obs: '+str(self.currentObs)
obsfilename = self.obsFileList[self.currentObs]
x=self.selectedPixel%(self.nxpix)
y=self.selectedPixel/self.nxpix
# i=self.startpy
# j=self.startpx
bmap = self.bmapList[self.currentObs]
# self.ui.pixelpath.setText(str(bmap[i][j]))
# print "plotting time stream for" + str(bmap[i][j])
# self.pixel_coord.setText(str(bmap[i][j]))
print "plotting time stream for" + str(bmap[y][x])
self.pixel_coord.setText(str(bmap[y][x]))
# nphot=0
# ti=self.startTime
ti=0
tf=self.obsTime
if ti>tf:
copyti = ti
ti=tf
tf=copyti
print "WARNING: selected ti > tf. They were switched for you."
counts = zeros(tf-ti)
dcounts = zeros(tf-ti-1)
h5file = openFile(str(obsfilename), 'r')
for t in range(tf-ti):
if bmap[y][x] == '':
continue
try:
counts[t] += len(h5file.root._f_getChild(bmap[y][x])[ti+t])
if t <= (tf-ti-2):
dcounts[t]= len(h5file.root._f_getChild(bmap[y][x])[ti+t+1])-len(h5file.root._f_getChild(bmap[y][x])[ti+t])
# if dcounts[t]<0:
# dcounts[t]=0
if counts[t]<0:
counts[t]=0
except NoSuchNodeError:
counts[t]=0
timesteps = xrange(ti,tf)
dtimesteps = xrange(ti,tf-1)
#print "plotting timestream of ", tf-ti, " seconds"
# self.ui.countlabel.setText(str(sum(counts)))
self.timestream_plot.canvas.ax.clear()
self.timestream_plot.canvas.ax.plot(timesteps,counts)
self.Dtimestream_plot.canvas.ax.clear()
self.Dtimestream_plot.canvas.ax.plot(dtimesteps,dcounts)
self.timestream_plot.format_labels('Time Stream', 'time (s)', 'counts')
self.Dtimestream_plot.format_labels('D Time Stream', 'time (s)', 'dcounts/dt')
try:
self.plot_timeAreas()
except IndexError:
pass
self.timestream_plot.canvas.draw()
self.Dtimestream_plot.canvas.draw()
h5file.close()
print "done plotting timestream"
def addNewObsFile(self):
newdatafile = QFileDialog.getOpenFileName(parent=None, caption=QString(str("Choose Obs File")),directory = "/Scratch/QEData/",filter=QString(str("H5 (*.h5)")))
self.initializeObsFile(newdatafile)
def initializeObsFile(self, fn):
newdatafile = fn
if len(newdatafile)!=0:
self.obsFileList.append(newdatafile)
self.currentObs = self.numObservations
self.beammapfile = str(newdatafile)
#self.beammapfile = '/Scratch/LABTEST/20140910/beammap_SCI6_B140731-Force_20140910.h5'
self.loadbeammap()
self.bmapList.append(self.bmap)
# self.currentObs = len(self.obsFileList)-1
# try:
# self.QEFileList[self.currentObs]
# except IndexError:
# i=len(self.QEFileList)
# while i <= self.currentObs:
# self.QEFileList.append(None)
# i+=1
print 'current obs: '+str(self.currentObs)
self.obsNum_label.setText('Measurement: ' + str(self.currentObs))
splitname = newdatafile.split("/")
justname = splitname[-1]
self.obsfile_label.setText(str(justname))
self.blankscene = QGraphicsScene()
self.blankscene.clear()
self.tv_image.setScene(self.blankscene)
self.tv_image.show()
# self.ui.histogram_plot.canvas.ax.clear()
# self.ui.histogram_plot.canvas.format_labels()
# self.ui.histogram_plot.canvas.draw()
self.get_ut()
print 'Making image'
fn = self.obsFileList[self.currentObs].split("/")[-1].split(".")[0]
self.imagefile = "./TV_frame_"+str(fn)+".png"
if isfile(self.imagefile):
self.display_image()
else:
self.make_image()
def addNewQEFile(self):
newQEfile=QFileDialog.getOpenFileName(parent=None, caption=QString(str("Choose Obs File")),directory = "/Scratch/QEData/",filter=QString(str("txt (*.txt)")))
angle = self.askForAngle()
if angle != None:
self.initializeQEFile(newQEfile,angle)
def askForAngle(self):
angle, ok = QInputDialog.getInt(self,'Input Dialog here', 'Enter angle to nearest degree:')
if ok:
return angle
else:
print 'Cancelled'
return None
def initializeQEFile(self, fn, angle):
newQEfile = fn
if len(newQEfile) != 0:
# angle, ok = QInputDialog.getInt(self,'Input Dialog here', 'Enter angle to nearest degree:')
# if ok:
# self.QEFileList.append(QEfile(newQEfile,angle))
# self.currentObs = self.numObservations
# self.currentObs = len(self.QEFileList)-1
# print len(self.QEFileList)
# print 'current obs: '+str(self.currentObs)
# self.obsNum_label.setText('Measurement: ' + str(self.currentObs))
# splitname = newQEfile.split("/")
# justname = splitname[-1]
# self.QEfile_label.setText(str(justname))
# self.addQETiming()
# self.populateTimeAreaData()
# else:
# print 'cancelled'
self.QEFileList.append(QEfile(newQEfile,angle))
self.currentObs = self.numObservations
# self.currentObs = len(self.QEFileList)-1
# print len(self.QEFileList)
print 'current obs: '+str(self.currentObs)
self.obsNum_label.setText('Measurement: ' + str(self.currentObs))
splitname = newQEfile.split("/")
justname = splitname[-1]
self.QEfile_label.setText(str(justname))
# self.addQETiming()
self.populateTimeAreaData()
# try:
# self.obsFileList[self.currentObs]
# except IndexError:
# i=len(self.obsFileList)
# while i <= self.currentObs:
# self.obsFileList.append(None)
# i+=1
def addQETiming(self):
if (self.currentObs >= 0) & (len(self.QEFileList) >= (self.currentObs+1)):
QEdata=self.QEFileList[self.currentObs].data
for ind in range(len(QEdata)):
self.addWaveToFrame(QEdata[ind],ind+1)
def make_image(self):
obsfilename = self.obsFileList[self.currentObs]
bmap = self.bmapList[self.currentObs]
nphot=0
# ti=self.startTime
# tf=self.endTime
# if ti>tf:
# copyti = ti
# ti=tf
# tf=copyti
# print "WARNING: selected ti > tf. They were switched for you."
h5file = openFile(str(obsfilename), 'r')
all_photons = []
for j in range(self.nxpix*self.nypix):
all_photons.append([])
counts = zeros((self.nypix,self.nxpix))
for i in xrange(self.nypix):
for j in xrange(self.nxpix):
if bmap[i][j] == '':
counts[i][j]=0
continue
try:
# print '('+str(i)+', '+str(j)+')'
photons = concatenate(h5file.root._f_getChild(bmap[i][j])[0:self.obsTime])
# photons = concatenate(h5file.root._f_getChild(bmap[i][j])[0:3])
counts[i][j]=len(photons)
if counts[i][j]<0:
counts[i][j]=0
except NoSuchNodeError:
counts[i][j]=0
nphot += counts[i][j]
photon_count = counts
im = photon_count
photon_count = flipud(photon_count)
# if self.ui.man_contrast.isChecked():
# self.vmax = self.ui.vmax.value()
# self.vmin = self.ui.vmin.value()
# else:
# indices = sort(reshape(photon_count,self.nxpix*self.nypix))
# self.satpercent=2.5
# brightest = int((self.satpercent/100.0)*(self.nxpix*self.nypix))
# self.vmax = indices[(-1*brightest)]
# self.vmin = 0
indices = sort(reshape(photon_count,self.nxpix*self.nypix))
self.satpercent=2.5
brightest = int((self.satpercent/100.0)*(self.nxpix*self.nypix))
self.vmax = indices[(-1*brightest)]
self.vmin = 0
fig = plt.figure(figsize=(0.01*self.nxpix,0.01*self.nypix), dpi=100, frameon=False)
im = plt.figimage(photon_count, cmap='gray', vmin = self.vmin, vmax = self.vmax)
fn = self.obsFileList[self.currentObs].split("/")[-1].split(".")[0]
self.imfile = "TV_frame_"+str(fn)+".png"
plt.savefig(self.imfile, pad_inches=0)
print "done making image."
self.display_image()
h5file.close()
def display_image(self):
#search directory for image
fn = self.obsFileList[self.currentObs].split("/")[-1].split(".")[0]
self.imagefile = "./TV_frame_"+str(fn)+".png"
self.tv_image.setGeometry(self.position.x(), self.position.y()-8, self.scalex*(self.nxpix)+4, self.scaley*(self.nypix)+4)
if isfile(self.imagefile):
pix = self.makepixmap(self.imagefile, scalex=self.scalex, scaley=self.scaley)
#display pixmap
self.scene = QGraphicsScene()
self.scene.addPixmap(pix)
# if self.histogram_pixel != []:
# for p in self.histogram_pixel:
# x = p%(self.nxpix)
# y = (self.nypix-1) - p/self.nxpix
# self.scene.addRect(self.scalex*(x),self.scaley*(y),(self.scalex),(self.scaley), Qt.blue)
if self.selectedPixel != None:
x=self.selectedPixel%(self.nxpix)
y = (self.nypix-1) - self.selectedPixel/self.nxpix
self.scene.addRect(self.scalex*(x),self.scaley*(y),(self.scalex),(self.scaley), Qt.blue)
for pix in self.good_pixel:
x=pix%(self.nxpix)
y = (self.nypix-1) - pix/self.nxpix
self.scene.addRect(self.scalex*(x),self.scaley*(y),(self.scalex),(self.scaley), Qt.green)
if pix == self.selectedPixel:
self.scene.addRect(self.scalex*(x),self.scaley*(y),(self.scalex),(self.scaley), Qt.darkCyan)
self.tv_image.setScene(self.scene)
self.tv_image.show()
#os.remove(str(imagefile)) #want to keep this around for saving purposes
else:
print 'No image found: ' +str(self.imagefile)
self.blankscene = QGraphicsScene()
self.blankscene.clear()
self.tv_image.setScene(self.blankscene)
self.tv_image.show()
def get_ut(self):
obsfilename = self.obsFileList[self.currentObs]
h5file = openFile(str(obsfilename), mode = "r")
htable = h5file.root.header.header.read()
self.obsTime = int(htable["exptime"])
h5address = h5file.root.beammap.beamimage.read()[0][0]
h5time = int(h5address.split('t')[1])
try:
self.ut = int(htable["unixtime"])
except ValueError:
print "unixtime not found, checking for deprecated ut field"
self.ut = int(htable["ut"])
if self.ut != h5time:
self.ut = h5time
h5file.close()
self.bmap = self.bmapList[self.currentObs]
for i in xrange(self.nypix):
for j in xrange(self.nxpix):
head = str(self.bmap[i][j])
if head.split('t')[0] == '':
self.bmap[i][j] = head + 't' + str(self.ut)
else:
self.bmap[i][j] = head.split('t')[0] + 't' + str(self.ut)
print "Pixel addresses updated in beammap"
def makepixmap(self, imagefile, scalex=1, scaley=1):
'''Given an image file this function converts them to pixmaps to be displayed by QT gui'''
qtimage = QImage(imagefile)
width, height = qtimage.size().width(), qtimage.size().height()
qtimage = qtimage.scaled(width*scalex,height*scaley)
pix = QPixmap.fromImage(qtimage)
return pix
def loadbeammap(self):
bmfile = openFile(self.beammapfile, 'r')
#read beammap in to memory to create beam image
self.bmap = bmfile.root.beammap.beamimage.read()
self.nxpix = shape(self.bmap)[1]
self.nypix = shape(self.bmap)[0]
bmfile.close()
print "Beammap loaded from " +str(self.beammapfile)
self.scalex=float(self.imagex/float(self.nxpix))
self.scaley=float(self.imagey/float(self.nypix))
def addtomainframe(self):
self.button_0 = QPushButton("button 0")
self.vbox.addWidget(self.button_0)
self.main_frame.setLayout(self.vbox)
self.setCentralWidget(self.main_frame)
def plot_timeAreas(self):
QEFileObject = self.QEFileList[self.currentObs]
for waveArr in QEFileObject.data:
# print waveArr
self.timestream_plot.canvas.ax.axvspan(waveArr[5], waveArr[6], facecolor='r', alpha=0.5)
self.timestream_plot.canvas.ax.axvspan(waveArr[7], waveArr[8], facecolor='g', alpha=0.5)
self.timestream_plot.canvas.ax.axvspan(waveArr[9], waveArr[10], facecolor='r', alpha=0.5)
self.Dtimestream_plot.canvas.ax.axvspan(waveArr[5], waveArr[6], facecolor='r', alpha=0.5)
self.Dtimestream_plot.canvas.ax.axvspan(waveArr[7], waveArr[8], facecolor='g', alpha=0.5)
self.Dtimestream_plot.canvas.ax.axvspan(waveArr[9], waveArr[10], facecolor='r', alpha=0.5)
self.timestream_plot.format_labels('Time Stream', 'time (s)', 'counts')
self.Dtimestream_plot.format_labels('D Time Stream', 'time (s)', 'dcounts/dt')
self.timestream_plot.canvas.draw()
self.Dtimestream_plot.canvas.draw()
def timeChanged(self):
source = self.sender()
value = int(source.value())
self.QEFileList[self.currentObs].data[int(source.y), int(source.x)]=value
self.plot_timestream()
self.plot_QE()
def addWaveToFrame(self):
#QE calibration
#self.test_button = QPushButton('test')
#QObject.connect(self.test_button, SIGNAL('clicked()'),self.toggleEnable)
obsfile_button = QPushButton("upload obs file")
QObject.connect(obsfile_button, SIGNAL('clicked()'),self.addNewObsFile)
#if obs file exists....
self.obsfile_label = QLabel("")
self.obsfile_label.setFrameShape(QFrame.Box)
self.obsfile_label.setMinimumWidth(300)
self.obsfile_label.setMaximumHeight(25)
QEfile_button = QPushButton("upload QE file")
QObject.connect(QEfile_button, SIGNAL('clicked()'),self.addNewQEFile)
#if obs file exists....
self.QEfile_label = QLabel("")
self.QEfile_label.setMinimumWidth(300)
self.QEfile_label.setMaximumHeight(25)
self.QEfile_label.setFrameShape(QFrame.Box)
#if obs file exists.... populateTimeAreaData()
self.vbox_cal = QVBoxLayout()
hbox_obs = QHBoxLayout()
hbox_obs.addWidget(obsfile_button)
hbox_obs.addWidget(self.obsfile_label)
self.vbox_cal.addLayout(hbox_obs)
hbox_QE = QHBoxLayout()
hbox_QE.addWidget(QEfile_button)
hbox_QE.addWidget(self.QEfile_label)
self.vbox_cal.addLayout(hbox_QE)
self.cal_QFrame.setLayout(self.vbox_cal)
def populateTimeAreaData(self):
wave_label = QLabel("wavelength")
t1_label = QLabel("Trough 1 Start")
t2_label = QLabel("Trough 1 End")
f1_label = QLabel("Flux Start")
f2_label = QLabel("Flux End")
t3_label = QLabel("Trough 2 Start")
t4_label = QLabel("Trough 2 End")
grid_wave = QGridLayout()
grid_wave.addWidget(wave_label,0,0)
grid_wave.addWidget(t1_label,0,1)
grid_wave.addWidget(t2_label,0,2)
grid_wave.addWidget(f1_label,0,3)
grid_wave.addWidget(f2_label,0,4)
grid_wave.addWidget(t3_label,0,5)
grid_wave.addWidget(t4_label,0,6)
QEFileObject = self.QEFileList[self.currentObs]
row=0
for waveArr in QEFileObject.data:
row+=1
wave_label = QLabel(str(waveArr[0]))
# t1_text = QLineEdit(str(waveArr[5]))
t1_spinbox = QSpinBox()
t1_spinbox.setMaximum(100000)
t1_spinbox.setValue(int(waveArr[5]))
t1_spinbox.x = 5
t1_spinbox.y = row-1
QObject.connect(t1_spinbox, SIGNAL('editingFinished()'),self.timeChanged)
t2_spinbox = QSpinBox()
t2_spinbox.setMaximum(100000)
t2_spinbox.setValue(int(waveArr[6]))
t2_spinbox.x = 6
t2_spinbox.y = row-1
QObject.connect(t2_spinbox, SIGNAL('editingFinished()'),self.timeChanged)
f1_spinbox = QSpinBox()
f1_spinbox.setMaximum(100000)
f1_spinbox.setValue(int(waveArr[7]))
f1_spinbox.x = 7
f1_spinbox.y = row-1
QObject.connect(f1_spinbox, SIGNAL('editingFinished()'),self.timeChanged)
f2_spinbox = QSpinBox()
f2_spinbox.setMaximum(100000)
f2_spinbox.setValue(int(waveArr[8]))
f2_spinbox.x = 8
f2_spinbox.y = row-1
QObject.connect(f2_spinbox, SIGNAL('editingFinished()'),self.timeChanged)
t3_spinbox = QSpinBox()
t3_spinbox.setMaximum(100000)
t3_spinbox.setValue(int(waveArr[9]))
t3_spinbox.x = 9
t3_spinbox.y = row-1
QObject.connect(t3_spinbox, SIGNAL('editingFinished()'),self.timeChanged)
t4_spinbox = QSpinBox()
t4_spinbox.setMaximum(100000)
t4_spinbox.setValue(int(waveArr[10]))
t4_spinbox.x = 10
t4_spinbox.y = row-1
QObject.connect(t4_spinbox, SIGNAL('editingFinished()'),self.timeChanged)
grid_wave.addWidget(wave_label,row,0)
grid_wave.addWidget(t1_spinbox,row,1)
grid_wave.addWidget(t2_spinbox,row,2)
grid_wave.addWidget(f1_spinbox,row,3)
grid_wave.addWidget(f2_spinbox,row,4)
grid_wave.addWidget(t3_spinbox,row,5)
grid_wave.addWidget(t4_spinbox,row,6)
angle = QEFileObject.angle
self.angle_label = QLabel('Angle: '+str(angle))
doneCal_button = QPushButton('Done Calibration')
QObject.connect(doneCal_button, SIGNAL('clicked()'),self.doneCal)
self.vbox_cal.addWidget(self.angle_label)
self.vbox_cal.addWidget(doneCal_button)
self.vbox_cal.addLayout(grid_wave)
def switchObs(self):
source = self.sender()
self.currentObs = int(source.num)
self.obsNum_label.setText('Measurement: ' + str(self.currentObs))
self.display_image()
if self.selectedPixel != None:
self.plot_timestream()
def plot_QE(self):
if (len(self.QEFileList) > 0) & (len(self.obsFileList) == len(self.QEFileList)) & (self.selectedPixel != None):
self.wvQE_plot.canvas.ax.clear()
self.angleQE_plot.canvas.ax.clear()
wvQEArr = []
angleQEArr = []
for obsNum in range(len(self.obsFileList)):
QEFileObject = self.QEFileList[obsNum]
wvQEArr.append(zeros((len(QEFileObject.data),2)))
for wvNum in range(len(QEFileObject.data)):
waveArr = QEFileObject.data[wvNum]
backGroundRate1, sb1 = self.getAverageTimeStream(obsNum,waveArr[-6],waveArr[-5])
backGroundRate2, sb2 = self.getAverageTimeStream(obsNum,waveArr[-2],waveArr[-1])
backGroundRate = (backGroundRate1+backGroundRate2)/2.0
countRate, s = self.getAverageTimeStream(obsNum,waveArr[-4],waveArr[-3])
countRate-=backGroundRate
#print "obsnum: " + str(obsNum)
#print "wavelength: " +str(waveArr[0])
#print "backGroundRate: " +str(backGroundRate)
#print "countRate: " +str(countRate)
QE=QEFileObject.findQE(countRate,waveArr[0])
#print "QE: "+str(QE)
if QE < 0:
QE = 0
# print 'QE: ' + str(QE)+' wavelength: '+str(waveArr[0])
wvQEArr[obsNum][wvNum,:]=[waveArr[0], QE]
if int(waveArr[0]) == int(self.wv_combobox.currentText()):
angleQEArr.append([QEFileObject.angle, QE])
self.wvQE_plot.canvas.ax.plot(wvQEArr[obsNum][:,0],wvQEArr[obsNum][:,1], label = str(obsNum))
angleQEArr=np.reshape(angleQEArr,(len(angleQEArr),2))
# self.angleQE_plot.canvas.ax.plot(angleQEArr[:,0], angleQEArr[:,1])
self.angleQE_plot.canvas.ax.plot(angleQEArr[:,0], angleQEArr[:,1],'bo')
# print 'AngleArr: ' + str(angleQEArr)
# print 'x: ' +str(angleQEArr[:,0])
# print 'y: ' +str(angleQEArr[:,1])
# self.wvQE_plot.canvas.ax.legend(loc='upper right')
# handles, labels = self.wvQE_plot.canvas.ax.get_legend_handles_labels()
# self.wvQE_plot.canvas.ax.legend(handles, labels, loc='upper right')
self.wvQE_plot.canvas.ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
self.angleQE_plot.format_labels('Polarization', 'Angle (degrees)', 'QE')
self.wvQE_plot.format_labels('QE', 'wavelength (nm)', 'QE')
self.angleQE_plot.canvas.draw()
self.wvQE_plot.canvas.draw()
def doneCal(self):
QEFileExists = False
obsFileExists = False
try:
QEFileExists = isfile(str(self.QEFileList[self.currentObs].filename))
except IndexError:
pass
try:
obsFileExists = isfile(str(self.obsFileList[self.currentObs]))
except IndexError:
pass
if QEFileExists & (not obsFileExists):
self.QEFileList.pop(self.currentObs)
# self.currentObs-=1
self.currentObs = self.numObservations-1
elif obsFileExists & (not QEFileExists):
self.obsFileList.pop(self.currentObs)
# self.currentObs-=1
self.currentObs = self.numObservations-1
elif obsFileExists & QEFileExists:
self.numObservations+=1
self.QEFileList[self.currentObs].saveParam()
obsNum_button = QPushButton('('+str(self.currentObs)+')')
obsNum_button.num = self.currentObs
QObject.connect(obsNum_button, SIGNAL('clicked()'),self.switchObs)
editObs_button = QPushButton('Edit')
editObs_button.num = self.currentObs
angle_label_temp = QLabel(str(self.angle_label.text()))
obsfile_label_temp = QLabel(str(self.obsfile_label.text()))
QEfile_label_temp = QLabel(str(self.QEfile_label.text()))
hbox0 = QHBoxLayout()
vbox0 = QVBoxLayout()
vbox0.addWidget(angle_label_temp)
vbox0.addWidget(obsfile_label_temp)
vbox0.addWidget(QEfile_label_temp)
hbox0.addWidget(obsNum_button)
hbox0.addLayout(vbox0)
hbox0.addWidget(editObs_button)
self.vbox_file.insertLayout(self.currentObs, hbox0)
self.cal_QFrame.hide()
self.vbox_QEcal.removeWidget(self.cal_QFrame)
self.cal_QFrame = QFrame()
self.cal_QFrame.hide()
self.vbox_QEcal.addWidget(self.cal_QFrame)
self.file_QFrame.show()
def toggleEnable(self):
# self.grid_wave.hide()
for i in range(7):
if self.grid_wave.itemAtPosition(1,i).widget().isVisible():
self.grid_wave.itemAtPosition(1,i).widget().hide()
else:
self.grid_wave.itemAtPosition(1,i).widget().show()
def addMeasurementFile(self):
self.file_QFrame.hide()
self.addWaveToFrame()
self.cal_QFrame.show()
def create_main_frame(self):
self.main_frame = QWidget()
self.tv_image = QGraphicsView()
self.tv_image.setMinimumSize(self.imagex,self.imagey)
self.tv_image.setMaximumSize(self.imagex,self.imagey)
self.tv_image.setMouseTracking(True)
self.tv_image.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.tv_image.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.tv_image.setAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignTop)
self.pixel_label = QLabel("pixel")
self.obsNum_label = QLabel('Measurement: ' + str(self.currentObs))
self.pixel_coord = QLabel('pixel coord')
self.pixel_button = QPushButton('Toggle good pixel')
QObject.connect(self.pixel_button, SIGNAL('clicked()'),self.toggleGoodPixel)
self.goodpixel_button = QPushButton('Find good pixels')
QObject.connect(self.goodpixel_button, SIGNAL('clicked()'),self.findGoodPixel)
self.timestream_plot = MPL_Widget()
self.timestream_plot.format_labels('Time Stream', 'time (s)', 'counts')
self.Dtimestream_plot = MPL_Widget()
self.Dtimestream_plot.format_labels('D Time Stream', 'time (s)', 'dcounts/dt')
self.file_QFrame = QFrame()
self.cal_QFrame = QFrame() #calibration box
self.cal_QFrame.hide()
addMeasurement_button = QPushButton('Add a new Measurement')
QObject.connect(addMeasurement_button, SIGNAL('clicked()'),self.addMeasurementFile)
self.wvQE_plot = MPL_Widget()
self.wvQE_plot.format_labels('QE', 'wavelength (nm)', 'QE')
self.wv_combobox = QComboBox()
self.wv_combobox.addItems(['400', '450', '500', '550', '600', '650', '700', '750', '800', '850', '900', '950', '1000', '1050', '1100', '1150', '1200', '1250', '1300'])
QObject.connect(self.wv_combobox, SIGNAL('currentIndexChanged(QString)'),self.plot_QE)
self.average_button = QPushButton('Find Obs Average')
QObject.connect(self.average_button, SIGNAL('clicked()'),self.getAvgQE)
self.compare_button = QPushButton('Compare Averages')
QObject.connect(self.compare_button, SIGNAL('clicked()'),self.compareQE)
self.histogram_button = QPushButton('Make Histograms')
QObject.connect(self.histogram_button, SIGNAL('clicked()'),self.histogramQE)
self.angleQE_plot = MPL_Widget()
self.angleQE_plot.format_labels('Polarization', 'Angle (degrees)', 'QE')
hbox = QHBoxLayout() #main box
vbox_plot = QVBoxLayout() #plot box
hbox_tv = QHBoxLayout()
hbox_tv.addWidget(self.tv_image)
vbox_pixellabels = QVBoxLayout()
vbox_pixellabels.addWidget(self.pixel_label)
vbox_pixellabels.addWidget(self.obsNum_label)
vbox_pixellabels.addWidget(self.pixel_coord)
vbox_pixellabels.addWidget(self.pixel_button)
vbox_pixellabels.addWidget(self.goodpixel_button)
hbox_tv.addLayout(vbox_pixellabels)
vbox_plot.addLayout(hbox_tv)
vbox_plot.addWidget(self.timestream_plot)
vbox_plot.addWidget(self.Dtimestream_plot)
self.vbox_file = QVBoxLayout() #add New Files box
self.vbox_file.addWidget(addMeasurement_button)
self.file_QFrame.setLayout(self.vbox_file)
self.vbox_QEcal = QVBoxLayout()
self.vbox_QEcal.addWidget(self.file_QFrame)
self.vbox_QEcal.addWidget(self.cal_QFrame)
vbox_QEplot = QVBoxLayout()
vbox_QEplot.addWidget(self.wvQE_plot)
hbox_QEbuttons = QHBoxLayout()
hbox_QEbuttons.addWidget(self.wv_combobox)
hbox_QEbuttons.addWidget(self.average_button)
hbox_QEbuttons.addWidget(self.compare_button)
hbox_QEbuttons.addWidget(self.histogram_button)
vbox_QEplot.addLayout(hbox_QEbuttons)
vbox_QEplot.addWidget(self.angleQE_plot)
hbox.addLayout(vbox_plot)
hbox.addLayout(self.vbox_QEcal)
hbox.addLayout(vbox_QEplot)
self.main_frame.setLayout(hbox)
self.setCentralWidget(self.main_frame)
def create_status_bar(self):
self.status_text = QLabel("Awaiting orders.")
self.statusBar().addWidget(self.status_text, 1)
def on_about(self):
msg = """ Message to user goes here.
"""
QMessageBox.about(self, "MKID-ROACH software demo", msg.strip())
def closeEvent(self, event):
# for obsNum in range(len(self.obsFileList)):
# self.imagefile = "./TV_frame"+str(self.currentObs)+".png"
# if isfile(self.imagefile):
# os.remove(str(self.imagefile))
# print 'Removed: ' +str(self.imagefile)
time.sleep(.1)
QtCore.QCoreApplication.instance().quit
if __name__ == "__main__":
app = QApplication(sys.argv)
myapp = QECalibration()
myapp.show()
app.exec_()
| bmazin/ARCONS-pipeline | QEcal/QECalibration.py | Python | gpl-2.0 | 54,351 |
"""Python CLI for Microsoft SQL."""
# Copyright (C) 2016 Russell Troxel
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__version__ = '1.0.4'
| rtrox/mssqlcli | mssqlcli/__init__.py | Python | gpl-3.0 | 728 |
from .emitter import Emitter
from .parser import Parser, Packet
from .transports.polling import Polling
import gevent
import gevent.event
import gevent.queue
import json
import logging
logger = logging.getLogger(__name__)
class Client(Emitter):
TRANSPORTS = {
'polling': Polling
}
def __init__(self, scheme, hostname, port, path='/engine.io', transports=[], parser=None):
super(Client, self).__init__()
self.scheme = scheme
self.hostname = hostname
self.port = port
self.path = path
self.transports = [t for t in self.TRANSPORTS.keys() if t in transports] or self.TRANSPORTS.keys()
self.parser = parser or Parser()
self.state = 'closed'
self.sid = None
self.ping_interval = None
self.ping_timeout = None
self.pong_event = gevent.event.Event()
self.send_queue = gevent.queue.JoinableQueue()
self.transport = None
self.ping_pong_loop = None
self.flush_loop = None
def open(self):
self.state = 'opening'
transport_name = self.transports[0]
transport = self.create_transport(transport_name)
transport.open()
self.set_transport(transport)
def close(self):
if self.state not in ['opening', 'open']:
return
self.state = 'closing'
self.send_queue.join()
self.handle_close()
def send(self, message, binary=False):
self.send_packet(Packet(Packet.MESSAGE, message, binary))
def create_transport(self, name):
return self.TRANSPORTS[name](self, self.scheme, self.hostname, self.port, self.path, self.parser)
def set_transport(self, transport):
if self.transport:
logger.debug("Clearing existing transport")
self.transport.removeAllListeners()
self.transport = transport
self.transport.on('close', self.handle_close)
self.transport.on('packet', self.handle_packet)
self.transport.on('error', self.handle_error)
def send_packet(self, packet):
if self.state in ['closing', 'closed']:
logger.warning("Trying to send a packet while state is: %s", self.state)
return
self.send_queue.put(packet)
def loop_flush(self):
while self.state in ['open', 'closing']:
logger.debug("Waiting packets")
self.send_queue.peek()
logger.debug("Flushing packets")
packets = []
try:
while True:
packet = self.send_queue.get_nowait()
packets.append(packet)
except gevent.queue.Empty:
pass
self.transport.send(packets)
for packet in packets:
self.send_queue.task_done()
def loop_ping_pong(self):
while self.state in ['open', 'closing']:
self.pong_event.clear()
self.send_packet(Packet(Packet.PING))
pong_received = self.pong_event.wait(timeout=self.ping_timeout/1000)
if not pong_received:
logger.warning("Pong timeout")
self.handle_close()
break
gevent.sleep(self.ping_interval/1000)
def start_loop(self, func, *args, **kwargs):
def loop_stopped(g):
logger.debug("Stop %s", func.__name__)
g = gevent.spawn(func, *args, **kwargs)
g.rawlink(loop_stopped)
logger.debug("Start %s", func.__name__)
return g
def stop_loop(self, loop):
if loop:
loop.kill(block=False)
def handle_open(self):
self.state = 'open'
self.emit('open')
def handle_close(self):
if self.state in ['opening', 'open', 'closing']:
logger.debug("Closing client")
self.state = 'closed'
not_closed_by_transport = (self.state == 'closing')
self.transport.close(send=not_closed_by_transport)
self.sid = None
self.stop_loop(self.ping_pong_loop)
self.stop_loop(self.flush_loop)
self.emit('close')
def handle_handshake(self, handshake):
self.sid = handshake['sid']
self.ping_interval = handshake['pingInterval']
self.ping_timeout = handshake['pingTimeout']
self.handle_open()
self.ping_pong_loop = self.start_loop(self.loop_ping_pong)
self.flush_loop = self.start_loop(self.loop_flush)
def handle_packet(self, packet):
if self.state not in ['open', 'opening']:
logger.warning("Packet received while state is: %s", self.state)
return
if packet.type == Packet.OPEN:
handshake = json.loads(packet.data)
self.handle_handshake(handshake)
elif packet.type == Packet.CLOSE:
self.transport.close(send=False)
elif packet.type == Packet.PONG:
self.pong_event.set()
elif packet.type == Packet.MESSAGE:
self.emit('message', packet.data)
else:
logger.warning("Invalid message type: %s", packet.type_string)
def handle_error(self, error):
logger.warning("Error occured: %s", error)
self.emit('error', error)
self.handle_close()
| max00xam/service.maxxam.teamwatch | lib/engineio_client/client.py | Python | gpl-3.0 | 5,306 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------
# Filename : trello_search.py
# Description :
# Created By : Joe Pistone
# Date Created : 16-Mar-2017 10:07
# Date Modified :
#
# License : Development
#
# Description : Search for trello users
#
# (c) Copyright 2017, TheKillingTime all rights reserved.
#-----------------------------------------------------------
__author__ = "Joe Pistone"
__version__ = "0.1"
import sys
import json
import urllib2
class Search_Trello(object):
def __init__(self, search_term):
self.terms = search_term
self.trello_url = "https://api.trello.com/1/search/members?query=%s" % self.terms
def make_call(self):
"""
Calls the Trello Api.
"""
try:
request = urllib2.Request(self.trello_url)
self.result = urllib2.urlopen(request)
except urllib2.HTTPError, e:
if e.code == 400:
print "[!] Bad Request"
elif e.code == 404:
print "[!] Page Not found."
else:
print "[!] Error calling Api %s " % e
def parse_output(self):
"""
Parses the output of the result.
"""
try:
trello_output = json.load(self.result)
for name in trello_output:
print "-" * 25
print "[+] Full Name: %s " % name['fullName']
print "[+] Username: %s " % name['username']
print "[+] Initials: %s " % name['initials']
except UnicodeEncodeError:
print "[!] UnicodeEncodeError."
def main(self):
"""
Start it up.
"""
self.make_call()
self.parse_output()
print "-" * 25
if __name__ == '__main__':
if len(sys.argv) != 2:
print "[!] Usage %s <who to search for>"
sys.exit(1)
search_term = sys.argv[1]
if search_term.isalnum():
st = Search_Trello(search_term)
st.main()
else:
print "[!] Alpha numeric searches please."
| daguy666/scripts | trello_search.py | Python | unlicense | 2,154 |
#!/usr/bin/env python
# coding=utf-8
# Copyright (C) 2015 by Serge Poltavski #
# serge.poltavski@gmail.com #
# #
# This program is free software; you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation; either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/> #
__author__ = 'Serge Poltavski'
import re
from pddoc.pd import XLET_MESSAGE, XLET_SOUND
def expr_args_parse(args):
if len(args) == 0:
return 1
params = list(set(re.findall("(\$[fsi][0-9])", " ".join(args))))
if len(params) == 0:
return 1
else:
return int(max(params, key=lambda x: int(x[2]))[2])
def expr_tilde_args_parse(args):
if len(args) == 0:
return []
inl = list(set(re.findall("(\$[fsiv][0-9])", " ".join(args))))
if len(inl) == 0:
return []
else:
inl.sort(key=lambda x: int(x[2:]))
res = []
for i in inl:
if i == "$v1":
continue
if i[1] == 'v':
res.append(XLET_SOUND)
else:
res.append(XLET_MESSAGE)
return res
def func_list_in(args):
if not args:
return 2
if args[0] in ("trim", "length"):
return 1
else:
return 2
def func_list_out(args):
if not args:
return 1
if args[0] == "split":
return 3
else:
return 1
def fexpr_tilde_args_parse(args):
if len(args) == 0:
return []
inl = list(set(re.findall("(\$[fsixy][0-9])", " ".join(args))))
if len(inl) == 0:
return []
else:
inl.sort(key=lambda x: int(x[2:]))
res = []
for i in inl:
if i == "$x1":
continue
if i[1] == 'x':
res.append(XLET_SOUND)
else:
res.append(XLET_MESSAGE)
return res
_re_num = re.compile(r"^\d+$")
_objects = {
"adc~": (
lambda args: [XLET_MESSAGE],
lambda args: (2 if len(args) == 0 else len(args)) * [XLET_SOUND]
),
"dac~": (
lambda args: (2 if len(args) == 0 else len(args)) * [XLET_SOUND],
lambda args: []
),
"readsf~": (
lambda args: [XLET_MESSAGE],
lambda args: (1 if len(args) == 0 else int(args[0])) * [XLET_SOUND] + [XLET_MESSAGE]
),
"writesf~": (
lambda args: (1 if len(args) == 0 else int(args[0])) * [XLET_SOUND],
lambda args: []
),
"expr": (
lambda args: expr_args_parse(args) * [XLET_MESSAGE],
lambda args: len(list(filter(None, " ".join(args).split(";")))) * [XLET_MESSAGE]
),
"expr~": (
lambda args: [XLET_SOUND] + expr_tilde_args_parse(args),
lambda args: [XLET_SOUND]
),
"fexpr~": (
lambda args: [XLET_SOUND] + fexpr_tilde_args_parse(args),
lambda args: [XLET_SOUND]
),
"sprintf": (
lambda args: (1 if not len(args) else (len(" ".join(args).replace("%%", "").split("%")) - 1)) * [
XLET_MESSAGE],
lambda args: (1 if " ".join(args).replace("%%", "").count("%") > 0 else 0) * [XLET_MESSAGE]
),
"select": (
lambda args: (2 if len(args) < 2 else 1) * [XLET_MESSAGE],
lambda args: (2 if len(args) == 0 else len(args) + 1) * [XLET_MESSAGE]
),
"sel": (
lambda args: (2 if len(args) < 2 else 1) * [XLET_MESSAGE],
lambda args: (2 if len(args) == 0 else len(args) + 1) * [XLET_MESSAGE]
),
"route": (
lambda args: (2 if len(args) < 2 else 1) * [XLET_MESSAGE],
lambda args: (2 if len(args) == 0 else len(args) + 1) * [XLET_MESSAGE]
),
"send": (
lambda args: (2 if not args else 1) * [XLET_MESSAGE],
lambda args: []
),
"s": (
lambda args: (2 if not args else 1) * [XLET_MESSAGE],
lambda args: []
),
"pointer": (
lambda args: 2 * [XLET_MESSAGE],
lambda args: (len(args) + 2) * [XLET_MESSAGE]
),
"pack": (
lambda args: (2 if len(args) == 0 else len(args)) * [XLET_MESSAGE],
lambda args: [XLET_MESSAGE]
),
"unpack": (
lambda args: [XLET_MESSAGE],
lambda args: (2 if len(args) == 0 else len(args)) * [XLET_MESSAGE]
),
"trigger": (
lambda args: [XLET_MESSAGE],
lambda args: (2 if len(args) == 0 else len(args)) * [XLET_MESSAGE]
),
"t": (
lambda args: [XLET_MESSAGE],
lambda args: (2 if len(args) == 0 else len(args)) * [XLET_MESSAGE]
),
"notein": (
lambda args: [],
lambda args: (3 if len(args) == 0 else 2) * [XLET_MESSAGE]
),
"ctlin": (
lambda args: [],
lambda args: (3 if len(args) == 0 else 2 if len(args) == 1 else 1) * [XLET_MESSAGE]
),
"pgmin": (
lambda args: [],
lambda args: (2 if len(args) == 0 else 1) * [XLET_MESSAGE]
),
"bendin": (
lambda args: [],
lambda args: (2 if len(args) == 0 else 1) * [XLET_MESSAGE]
),
"touchin": (
lambda args: [],
lambda args: (2 if len(args) == 0 else 1) * [XLET_MESSAGE]
),
"polytouchin": (
lambda args: [],
lambda args: (3 if len(args) == 0 else 2) * [XLET_MESSAGE]
),
"list": (
lambda args: func_list_in(args) * [XLET_MESSAGE],
lambda args: func_list_out(args) * [XLET_MESSAGE]
),
"array set": (
lambda args: 3 * [XLET_MESSAGE],
lambda args: []
)
}
def is_snd_math(name):
return name in ("+~", "*~", "-~", "/~", "max~", "min~")
def is_digit_object(name):
return _re_num.search(name)
def inlets(name, args):
# digits float object [2], [3] and others
if is_digit_object(name):
return [XLET_MESSAGE]
if is_snd_math(name):
if len(args) == 0:
return [XLET_SOUND] * 2
else:
return [XLET_SOUND, XLET_MESSAGE]
if name in _objects:
return _objects[name][0](args)
return []
def outlets(name, args):
# digits float object [2], [3] and others
if is_digit_object(name):
return [XLET_MESSAGE] * 2
if is_snd_math(name):
return [XLET_SOUND]
if name in _objects:
return _objects[name][1](args)
return []
def has_object(name):
if is_snd_math(name) or is_digit_object(name):
return True
return name in _objects | uliss/pddoc | pddoc/pd/externals/core/xletsdb_core.py | Python | gpl-3.0 | 7,201 |
# coding=utf-8
# Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.util.contextutil import temporary_file
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class FindBugsTest(PantsRunIntegrationTest):
@classmethod
def hermetic(cls):
return True
def run_pants(self, command, config=None, stdin_data=None, extra_env=None, **kwargs):
full_config = {
'GLOBAL': {
'pythonpath': ["%(buildroot)s/contrib/findbugs/src/python"],
'backend_packages': ["pants.contrib.findbugs"]
}
}
if config:
for scope, scoped_cfgs in config.items():
updated = full_config.get(scope, {})
updated.update(scoped_cfgs)
full_config[scope] = updated
return super(FindBugsTest, self).run_pants(command, full_config, stdin_data, extra_env, **kwargs)
def test_no_warnings(self):
cmd = ['compile', 'contrib/findbugs/tests/java/org/pantsbuild/contrib/findbugs:none']
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
self.assertNotIn('Bug', pants_run.stdout_data)
self.assertNotIn('Errors:', pants_run.stdout_data)
def test_empty_source_file(self):
cmd = ['compile', 'contrib/findbugs/tests/java/org/pantsbuild/contrib/findbugs:empty']
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
self.assertNotIn('Bug', pants_run.stdout_data)
self.assertNotIn('Errors:', pants_run.stdout_data)
def test_low_warning(self):
cmd = ['compile', 'contrib/findbugs/tests/java/org/pantsbuild/contrib/findbugs:low']
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
self.assertIn('Bug[low]: VA_FORMAT_STRING_USES_NEWLINE Format string', pants_run.stdout_data)
self.assertNotIn('Errors:', pants_run.stdout_data)
self.assertIn('Bugs: 1 (High: 0, Normal: 0, Low: 1)', pants_run.stdout_data)
def test_all_warnings(self):
cmd = ['compile', 'contrib/findbugs/tests/java/org/pantsbuild/contrib/findbugs:all']
pants_run = self.run_pants(cmd)
self.assert_success(pants_run)
self.assertIn('Bug[high]: EC_UNRELATED_TYPES', pants_run.stdout_data)
self.assertIn('Bug[normal]: NP_ALWAYS_NULL', pants_run.stdout_data)
self.assertIn('Bug[low]: VA_FORMAT_STRING_USES_NEWLINE', pants_run.stdout_data)
self.assertNotIn('Errors:', pants_run.stdout_data)
self.assertIn('Bugs: 3 (High: 1, Normal: 1, Low: 1)', pants_run.stdout_data)
def test_max_rank_fail_on_error(self):
cmd = ['compile', 'contrib/findbugs/tests/java/org/pantsbuild/contrib/findbugs:all']
pants_ini_config = {'compile.findbugs': {'max_rank': 9, 'fail_on_error': True}}
pants_run = self.run_pants(cmd, config=pants_ini_config)
self.assert_failure(pants_run)
self.assertIn('Bug[high]:', pants_run.stdout_data)
self.assertIn('Bug[normal]:', pants_run.stdout_data)
self.assertNotIn('Bug[low]:', pants_run.stdout_data)
self.assertIn('FAILURE: failed with 2 bugs and 0 errors', pants_run.stdout_data)
def test_exclude(self):
cmd = ['compile', 'contrib/findbugs/tests/java/org/pantsbuild/contrib/findbugs:all']
with temporary_file() as exclude_file:
exclude_file.write(dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<FindBugsFilter>
<Match>
<Bug pattern="NP_ALWAYS_NULL" />
<Class name="org.pantsbuild.contrib.findbugs.AllWarnings" />
<Method name="main" />
</Match>
</FindBugsFilter>
"""))
exclude_file.close()
pants_ini_config = {'compile.findbugs': {'exclude_filter_file': exclude_file.name}}
pants_run = self.run_pants(cmd, config=pants_ini_config)
self.assert_success(pants_run)
self.assertIn('Bug[high]:', pants_run.stdout_data)
self.assertNotIn('Bug[normal]:', pants_run.stdout_data)
self.assertIn('Bug[low]:', pants_run.stdout_data)
self.assertNotIn('Errors:', pants_run.stdout_data)
self.assertIn('Bugs: 2 (High: 1, Normal: 0, Low: 1)', pants_run.stdout_data)
def test_error(self):
cmd = ['compile', 'contrib/findbugs/tests/java/org/pantsbuild/contrib/findbugs:all']
with temporary_file() as exclude_file:
exclude_file.write(dedent("""\
<?xml version="1.0" encoding="UTF-8"?>
<FindBugsFilter>
<Incomplete Tag
</FindBugsFilter>
"""))
exclude_file.close()
pants_ini_config = {'compile.findbugs': {'exclude_filter_file': exclude_file.name}}
pants_run = self.run_pants(cmd, config=pants_ini_config)
self.assert_success(pants_run)
self.assertIn('Bug[high]:', pants_run.stdout_data)
self.assertIn('Bug[normal]:', pants_run.stdout_data)
self.assertIn('Bug[low]:', pants_run.stdout_data)
self.assertIn('Errors: 1', pants_run.stdout_data)
self.assertIn('Unable to read filter:', pants_run.stdout_data)
self.assertIn('Attribute name "Tag" associated with an element type', pants_run.stdout_data)
self.assertIn('Bugs: 3 (High: 1, Normal: 1, Low: 1)', pants_run.stdout_data)
| ity/pants | contrib/findbugs/tests/python/pants_test/contrib/findbugs/tasks/test_findbugs_integration.py | Python | apache-2.0 | 5,278 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.