repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
catapult-project/catapult
|
third_party/gsutil/third_party/mock/mock/mock.py
|
Python
|
bsd-3-clause
| 83,928
| 0.001513
|
# mock.py
# Test tools for mocking and patching.
# E-mail: fuzzyman AT voidspace DOT org DOT uk
#
# mock 1.0.1
# http://www.voidspace.org.uk/python/mock/
#
# Copyright (c) 2007-2013, Michael Foord & the mock team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
__all__ = (
'__version__',
'version_info',
'Mock',
'MagicMock',
'patch',
'sentinel',
'DEFAULT',
'ANY',
'call',
'create_autospec',
'FILTER_DIR',
'CallableMixin',
'NonCallableMock',
'NonCallableMagicMock',
'mock_open',
'PropertyMock',
)
from functools import partial
import inspect
import pprint
import sys
try:
import builtins
except ImportError:
import __builtin__ as builtins
from types import ModuleType
import six
from six import wraps
# TODO(houglum): Adjust this section if we use a later version of mock.
# Manually specify version so that we don't need to rely on pbr (which works
# best when packages are installed via pip rather than direct download).
# This allows us to include mock in other projects which can be installed
# via methods other than pip (downloading a git repo, tarball, etc.).
__version__ = '2.0.0'
version_info = (2, 0, 0, 'final', 0)
import mock
try:
inspectsignature = inspect.signature
except AttributeError:
import funcsigs
inspectsignature = funcsigs.signature
# TODO: use six.
try:
unicode
except NameError:
# Python 3
basestring = unicode = str
try:
long
except NameError:
# Python 3
long = int
try:
BaseException
except NameError:
# Python 2.4 compatibility
BaseException = Exception
if six.PY2:
# Python 2's next() can't handle a non-iterator with a __next__ method.
_next = next
def next(obj, _next=_next):
if getattr(obj, '__next__', None):
return obj.__next__()
return _next(obj)
del _next
_builtins = set(name for name in dir(builtins) if not name.startswith('_'))
BaseExceptions = (BaseException,)
if 'java' in sys.platform:
# jython
import java
BaseExceptions = (BaseException, java.lang.Throwable)
try:
_isidentifier = str.isidentifier
except AttributeError:
# Python 2.X
import keyword
import re
regex = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
def _isidentifier(string):
if string in keyword.kwlist:
return False
return regex.match(string)
self = 'im_self'
builtin = '__builtin__'
if six.PY3:
self = '__self__'
builtin = 'builtins'
# NOTE: This FILTER_DIR is not used. The binding in mock.FILTER_DIR is.
FILTER_DIR = True
# Workaround for Python issue #12370
# Without this, the __class__ properties wouldn't be set correctly
_safe_super = super
def _is_instance_mock(obj):
# can't use isinstance on Mock objects because they override __class__
# The base class for all mocks is NonCallableMock
return issubclass(type(obj), NonCallableMock)
def _is_exception(obj):
return (
isinstance(obj, BaseExceptions) or
isinstance(obj, ClassTypes) and issubclass(obj, BaseExceptions)
)
class _slotted(object):
__slots__ = ['a']
DescriptorTypes = (
type(_slotted.a),
property,
)
def _get_signature_object(func, as_instance, eat_self):
"""
Given an arbitrary, possibly callable object, try to create a suitable
signature object.
Return a (reduced func, signature) tuple, or None.
"""
if isinstance(func, ClassTypes) and not as_instance:
# If it's a type and should be modelled as a type, use __init__.
try:
func = func.__init__
except AttributeError:
return None
# Skip the `self` argument in __init__
eat_self = True
elif not isinstance(func, FunctionTypes):
# If we really want to model an instance of the passed type,
# __call__ should be looked up, not __init__.
try:
func = func.__call__
except AttributeError:
return None
if eat_self:
sig_func = partial(func, None)
else:
sig_func = func
try:
return func, inspectsignature(sig_func)
except ValueError:
# Certain callable types are not supported by inspect.signature()
return None
def _check_signature(func, mock, skipfirst, instance=False):
sig = _get_signature_object(func, instance, skipfirst)
if sig is None:
return
func, sig = sig
def checksig(_mock_self, *args, **kwargs):
sig.bind(*args, **kwargs)
_copy_func_details(func, checksig)
type(mock)._mock_check_sig = checksig
def _copy_func_details(func, funcopy):
funcopy.__name__ = func.__name__
funcopy.__doc__ = func.__doc__
try:
funcopy.__text_signature__ = func.__text_signature__
except AttributeError:
pass
# we explicitly don't copy func.__dict__ into this copy as it would
# expose original attributes that should be mocked
try:
funcopy.__module__ = func.__module__
except AttributeError:
pass
try:
funcopy.__defaults__ = func.__defaults__
except AttributeError:
pass
try:
funcopy.__kwdefaults__ = func.__kwdefaults__
except AttributeError:
pass
if six.PY2:
funcopy.func_defaults = func.func_defaults
return
def _callable(obj):
if isinstance(obj, ClassTypes):
return True
if getattr(obj, '__call__', None) is not None:
return True
return False
def _is_list(obj):
# checks for list or tuples
# XXXX badly named!
return type(obj) in (list, tuple)
def _instance_callable(obj):
"""Given an object, return True if the object is callable.
For classes, return True if instances would be callable."""
if not isinstance(obj, ClassTypes):
# already an instance
return getattr(obj, '__call__', None) is not None
if six.PY3:
# *could* be broken by a class overriding __mro__ or __dict__ via
# a metaclass
for base in (obj,) + obj.__mro__:
if base.__dict__.get('__call__') is not None:
return True
else:
klass = obj
# uses __bases__ instead of __mro__ so that we work with old style classes
if klass.__dict__.get('__call__') is not None:
return True
for base in klass.__bases__:
if _instance_callable(base):
return True
return False
def _set_signature(mock, original, instance=False):
# creates a function with signature (*args, **kwargs) that delegates to a
# mock. It still does signature checking by calling a lambd
|
a with the same
# signature as the original.
|
if not _callable(original):
return
skipfirst = isinstance(original, ClassTypes)
result = _get_signature_object(original, instance, skipfirst)
if resu
|
dbbhattacharya/kitsune
|
kitsune/wiki/facets.py
|
Python
|
bsd-3-clause
| 5,435
| 0
|
import hashlib
from django.conf import settings
from django.core.cache import cache
from django.db.models import Count
from elasticsearch.exceptions import TransportError
from statsd import statsd
from kitsune.products.models import Topic
from kitsune.wiki.models import Document, DocumentMappingType
def topics_for(products, parent=False):
"""Returns a list of topics that apply to passed in products and topics.
:arg products: a list of Product instances
:arg parent: (optional) limit to topics with the given parent
"""
statsd.incr('wiki.facets.topics_for.db')
docs = Document.objects.filter(
locale=settings.WIKI_DEFAULT_LANGUAGE,
is_archived=False,
current_revision__isnull=False,
category__in=settings.IA_DEFAULT_CATEGORIES)
for product in products:
docs = docs.filter(products=product)
for product in products:
qs = Topic.objects.filter(product=product)
qs = (qs.filter(visible=True, document__in=docs)
.annotate(num_docs=Count('document'))
.distinct())
if parent or parent is None:
qs = qs.filter(parent=p
|
arent)
return qs
def documents_for(locale, topics=None, products=None):
"""Returns a tuple of lists of articles that apply to topics and products.
The first item in the tuple is the list of articles for the locale
specified. The second item is the list of fallback articles in en-US
that aren't localized to the specified locale. If the specified locale
is en-US, the second item will be None.
:arg locale: the locale
:arg topics: (optiona
|
l) a list of Topic instances
:arg products: (optional) a list of Product instances
The articles are returned as a list of dicts with the following keys:
id
document_title
url
document_parent_id
"""
documents = _documents_for(locale, topics, products)
# For locales that aren't en-US, get the en-US documents
# to fill in for untranslated articles.
if locale != settings.WIKI_DEFAULT_LANGUAGE:
l10n_document_ids = [d['document_parent_id'] for d in documents if
'document_parent_id' in d]
en_documents = _documents_for(
locale=settings.WIKI_DEFAULT_LANGUAGE,
products=products,
topics=topics)
fallback_documents = [d for d in en_documents if
d['id'] not in l10n_document_ids]
else:
fallback_documents = None
return documents, fallback_documents
def _documents_for(locale, topics=None, products=None):
"""Returns a list of articles that apply to passed in topics and products.
"""
# First try to get the results from the cache
documents = cache.get(_documents_for_cache_key(
locale, topics, products))
if documents:
statsd.incr('wiki.facets.documents_for.cache')
return documents
try:
# Then try ES
documents = _es_documents_for(locale, topics, products)
cache.add(
_documents_for_cache_key(locale, topics, products),
documents)
statsd.incr('wiki.facets.documents_for.es')
except TransportError:
# Finally, hit the database (through cache machine)
# NOTE: The documents will be the same ones returned by ES
# but they won't be in the correct sort (by votes in the last
# 30 days). It is better to return them in the wrong order
# than not to return them at all.
documents = _db_documents_for(locale, topics, products)
statsd.incr('wiki.facets.documents_for.db')
return documents
def _es_documents_for(locale, topics=None, products=None):
"""ES implementation of documents_for."""
s = (DocumentMappingType.search()
.values_dict('id', 'document_title', 'url', 'document_parent_id',
'document_summary')
.filter(document_locale=locale, document_is_archived=False,
document_category__in=settings.IA_DEFAULT_CATEGORIES))
for topic in topics or []:
s = s.filter(topic=topic.slug)
for product in products or []:
s = s.filter(product=product.slug)
return list(s.order_by('-document_recent_helpful_votes')[:100])
def _db_documents_for(locale, topics=None, products=None):
"""DB implementation of documents_for."""
qs = Document.objects.filter(
locale=locale,
is_archived=False,
current_revision__isnull=False,
category__in=settings.IA_DEFAULT_CATEGORIES)
for topic in topics or []:
qs = qs.filter(topics=topic)
for product in products or []:
qs = qs.filter(products=product)
# Convert the results to a dicts to look like the ES results.
doc_dicts = []
for d in qs.distinct():
doc_dicts.append(dict(
id=d.id,
document_title=d.title,
url=d.get_absolute_url(),
document_parent_id=d.parent_id,
document_summary=d.current_revision.summary))
return doc_dicts
def _documents_for_cache_key(locale, topics, products):
m = hashlib.md5()
key = '{locale}:{topics}:{products}:new'.format(
locale=locale,
topics=','.join(sorted([t.slug for t in topics or []])),
products=','.join(sorted([p.slug for p in products or []])))
m.update(key)
return 'documents_for:%s' % m.hexdigest()
|
EuroPython/djep
|
pyconde/proposals/cms_app.py
|
Python
|
bsd-3-clause
| 327
| 0.003058
|
fr
|
om cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
from .menu import ProposalsMenu
class ProposalsApp(CMSApp):
name = _("Proposals app")
urls = ["pyconde.proposals.urls"]
menus = [ProposalsMenu]
apphook_pool.register(ProposalsApp)
| |
fifengine/fifengine
|
engine/python/fife/extensions/pychan/widgets/slider.py
|
Python
|
lgpl-2.1
| 7,472
| 0.054738
|
# -*- coding: utf-8 -*-
# ####################################################################
# Copyright (C) 2005-2019 by the FIFE team
# http://www.fifengine.net
# This file is part of FIFE.
#
# FIFE is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# ####################################################################
from __future__ import absolute_import
from fife import fifechan
from fife.extensions.pychan.attrs import IntAttr, FloatAttr
from .widget import Widget
class Slider(Widget):
""" A slider widget
Use a callback to read out the slider value every time the marker
is moved.
New Attributes
==============
- orientation: 1 = horizontal, 0=vertical
- scale_start: float: default 0.0
- scale_end: float: default 1.0
- step_length: float: default scale_end/10
- marker_length: int: default 10
FIXME:
- update docstrings
"""
HORIZONTAL = fifechan.Slider.Horizontal
VERTICAL = fifechan.Slider.Vertical
ATTRIBUTES = Widget.ATTRIBUTES + [ IntAttr('orientation'),
FloatAttr('scale_start'),
FloatAttr('scale_end'),
FloatAttr('step_length'),
IntAttr('marker_length')
]
DEFAULT_HEXPAND = True
DEFAULT_VEXPAND = False
DEFAULT_SIZE = 10,10
DEFAULT_MIN_SIZE = 10,10
DEFAULT_SCALE_START = 0.0
DEFAULT_SCALE_END = 1.0
DEFAULT_STEP_LENGTH = 0.1
DEFAULT_MARKER_LENGTH = 10
DEFAULT_ORIENTATION = HORIZONTAL
def __init__(self,
parent = None,
name = None,
size = None,
min_size = None,
max_size = None,
fixed_size = None,
margins = None,
padding = None,
helptext = None,
position = None,
style = None,
hexpand = None,
vexpand = None,
font = None,
base_color = None,
background_color = None,
foreground_color = None,
selection_color = None,
border_color = None,
outline_color = None,
border_size = None,
outline_size = None,
position_technique = None,
is_focusable = None,
comment = None,
scale_start = None,
scale_end = None,
step_length = None,
marker_length = None,
orientation = None):
self.real_widget = fifechan.Slider(scale_start or self.DEFAULT_SCALE_START, scale_end or self.DEFAULT_SCALE_END)
self.orientation = self.DEFAULT_ORIENTATION
self.step_length = self.DEFAULT_STEP_LENGTH
self.marker_length = self.DEFAULT_MARKER_LENGTH
super(Slider, self).__init__(parent=parent,
name=name,
size=size,
min_size=min_size,
max_size=max_size,
fixed_size=fixed_size,
margins=margins,
padding=padding,
helptext=helptext,
position=position,
style=style,
hexpand=hexpand,
vexpand=vexpand,
font=font,
|
base_color=base_color,
background_color
|
=background_color,
foreground_color=foreground_color,
selection_color=selection_color,
border_color=border_color,
outline_color=outline_color,
border_size=border_size,
outline_size=outline_size,
position_technique=position_technique,
is_focusable=is_focusable,
comment=comment)
if orientation is not None: self.orientation = orientation
if scale_start is not None: self.scale_start = scale_start
if scale_end is not None: self.scale_end = scale_end
if step_length is not None: self.step_length = step_length
if marker_length is not None: self.marker_length = marker_length
self.accepts_data = True
self._realSetData = self._setValue
self._realGetData = self._getValue
def clone(self, prefix):
sliderClone = Slider(None,
self._createNameWithPrefix(prefix),
self.size,
self.min_size,
self.max_size,
self.fixed_size,
self.margins,
self.padding,
self.helptext,
self.position,
self.style,
self.hexpand,
self.vexpand,
self.font,
self.base_color,
self.background_color,
self.foreground_color,
self.selection_color,
self.border_color,
self.outline_color,
self.border_size,
self.outline_size,
self.position_technique,
self.is_focusable,
self.comment,
self.scale_start,
self.scale_end,
self.step_length,
self.marker_length,
self.orientation)
return sliderClone
def _setScale(self, start, end):
"""setScale(self, double scaleStart, double scaleEnd)"""
if type(start) != float:
raise RuntimeError("Slider expects float for start scale")
if type(end) != float:
raise RuntimeError("Slider expects float for end scale")
self.real_widget.setScale(start, end)
def _getScaleStart(self):
"""getScaleStart(self) -> double"""
return self.real_widget.getScaleStart()
def _setScaleStart(self, start):
"""setScaleStart(self, double scaleStart)"""
if type(start) != float:
raise RuntimeError("Slider expects float for start scale")
self.real_widget.setScaleStart(start)
scale_start = property(_getScaleStart, _setScaleStart)
def _getScaleEnd(self):
"""getScaleEnd(self) -> double"""
return self.real_widget.getScaleEnd()
def _setScaleEnd(self, end):
"""setScaleEnd(self, double scaleEnd)"""
if type(end) != float:
raise RuntimeError("Slider expects float for end scale")
self.real_widget.setScaleEnd(end)
scale_end = property(_getScaleEnd, _setScaleEnd)
def _getValue(self):
"""getValue(self) -> double"""
return self.real_widget.getValue()
def _setValue(self, value):
"""setValue(self, double value)"""
if type(value) != float:
raise RuntimeError("Slider only accepts float values")
self.real_widget.setValue(value)
value = property(_getValue, _setValue)
def _setMarkerLength(self, length):
"""setMarkerLength(self, int length)"""
if type(length) != int:
raise RuntimeError("Slider only accepts int for Marker length")
self.real_widget.setMarkerLength(length)
def _getMarkerLength(self):
"""getMarkerLength(self) -> int"""
return self.real_widget.getMarkerLength()
marker_length = property(_getMarkerLength, _setMarkerLength)
def _setOrientation(self, orientation):
"""setOrientation(self, Orientation orientation)"""
self.real_widget.setOrientation(orientation)
def _getOrientation(self):
"""getOrientation(self) -> int"""
return self.real_widget.getOrientation()
orientation = property(_getOrientation, _setOrientation)
def _setStepLength(self, length):
"""setStepLength(self, double length)"""
if type(length) != float:
raise RuntimeError("Slider only accepts floats for step length")
self.real_widget.setStepLength(length)
def _getStepLength(self):
"""getStepLength(self) -> double"""
return self.real_widget.getStepLength()
step_length = property(_getStepLength, _setStepLength)
|
renatorangel/scheduler
|
src/structures/scheduler.py
|
Python
|
apache-2.0
| 566
| 0
|
class Scheduler(object):
"""Define a domain."""
d
|
ef __init__(self, matches, problem):
""".
PARAMETERS TYPE Potential Arguments
-----------------------------------------------
"""
self.matches = matches
schedule = []
self.allSchedules = []
for result in problem.getSolutions():
for k in result.keys():
course = k
local = result[k]
schedule.append((course, local))
self.allSchedules.append(schedule
|
.pop())
|
cupertinomiranda/binutils
|
gdb/testsuite/gdb.perf/lib/perftest/testresult.py
|
Python
|
gpl-2.0
| 2,216
| 0.002708
|
# Copyright (C) 2013-2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
class TestResult(object):
"""Base class to record and report test results.
Method record is to record the results of test case, and report
method is to report the recorded results by a given reporter.
"""
def record(self, parameter, result):
raise NotImplementedError("Abstract Method:record.")
def report(self, reporter, name):
"""Report the test results by reporter."""
raise NotImplementedError("Abstract Method:report.")
class SingleStatisticTestResult(TestResult):
"""Test results for the test case with a single statistic."""
def __init__(self
|
):
super (SingleStatisticTestResult, self).__init__ ()
self.results = dict ()
def record(self, parameter, result):
if parameter in self.results:
self.results[parameter].append(result)
else:
self.results[parameter] = [result]
def report(self, reporter, name):
reporter.start()
for key in sorted(self.results.iterkeys()):
reporter.report(name, key, self.results[key])
reporter.end()
|
class ResultFactory(object):
"""A factory to create an instance of TestResult."""
def create_result(self):
"""Create an instance of TestResult."""
raise NotImplementedError("Abstract Method:create_result.")
class SingleStatisticResultFactory(ResultFactory):
"""A factory to create an instance of SingleStatisticTestResult."""
def create_result(self):
return SingleStatisticTestResult()
|
RJT1990/pyflux
|
pyflux/setup.py
|
Python
|
bsd-3-clause
| 792
| 0.002525
|
import os
PACKAGE_NAME = 'pyflux'
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(PACKAGE_NAME, parent_package, top_path)
config.add_subpackage('__check_build')
config.add_subpackage('arma')
config.add_subpackage('ensembles')
config.add_sub
|
package('families')
|
config.add_subpackage('garch')
config.add_subpackage('gas')
config.add_subpackage('gpnarx')
config.add_subpackage('inference')
config.add_subpackage('output')
config.add_subpackage('ssm')
config.add_subpackage('tests')
config.add_subpackage('var')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
ESOedX/edx-platform
|
openedx/core/djangoapps/catalog/tests/test_utils.py
|
Python
|
agpl-3.0
| 34,566
| 0.00243
|
"""Tests covering utilities for integrating with the catalog service."""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from collections import defaultdict
from datetime import timedelta
import mock
import six
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.utils.timezone import now
from opaque_keys.edx.keys import CourseKey
from course_modes.helpers import CourseMode
from course_modes.tests.factories import CourseModeFactory
from entitlements.tests.factories import CourseEntitlementFactory
from openedx.core.constants import COURSE_UNPUBLISHED
from openedx.core.djangoapps.catalog.cache import (
COURSE_PROGRAMS_CACHE_KEY_TPL,
PATHWAY_CACHE_KEY_TPL,
PROGRAM_CACHE_KEY_TPL,
PROGRAMS_BY_TYPE_CACHE_KEY_TPL,
SITE_PATHWAY_IDS_CACHE_KEY_TPL,
SITE_PROGRAM_UUIDS_CACHE_KEY_TPL
)
from openedx.core.djangoapps.catalog.models import CatalogIntegration
from openedx.core.djangoapps.catalog.tests.factories import (
CourseFactory,
CourseRunFactory,
PathwayFactory,
ProgramFactory,
ProgramTypeFactory
)
from openedx.core.djangoapps.catalog.tests.mixins import CatalogIntegrationMixin
from openedx.core.djangoapps.catalog.utils import (
child_programs,
course_run_keys_for_program,
is_course_run_in_program,
get_course_run_details,
get_course_runs,
get_course_runs_for_course,
get_currency_data,
get_localized_price_text,
get_owners_for_course,
get_pathways,
get_program_types,
get_programs,
get_programs_by_type,
get_visible_sessions_for_entitlement,
normalize_program_type,
)
from openedx.core.djangoapps.content.course_overviews.tests.factories import CourseOverviewFactory
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
from student.tests.factories import CourseEnrollmentFactory, UserFactory
UTILS_MODULE = 'openedx.core.djangoapps.catalog.utils'
User = get_user_model() # pylint: disable=invalid-name
@skip_unless_lms
@mock.patch(UTILS_MODULE + '.logger.info')
@mock.patch(UTILS_MODULE + '.logger.warning')
class TestGetPrograms(CacheIsolationTestCase):
ENABLED_CACHES = ['default']
def setUp(self):
super(TestGetPrograms, self).setUp()
self.site = SiteFactory()
def test_get_many(self, mock_warning, mock_info):
programs = ProgramFactory.create_batch(3)
# Cache details for 2 of 3 programs.
partial_programs = {
PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid']): program for program in programs[:2]
}
cache.set_many(partial_programs, None)
# When called before UUIDs are cached, the function should return an
# empty list and log a warning.
self.assertEqual(get_programs(site=self.site), [])
mock_warning.assert_called_once_with(
u'Failed to get program UUIDs from the cache for site {}.'.format(self.site.domain)
)
mock_warning.reset_mock()
# Cache UUIDs for all 3 programs.
cache.set(
SITE_PROGRAM_UUIDS_CACHE_KEY_TPL.format(domain=self.site.domain),
[program['uuid'] for program in programs],
None
)
actual_programs = get_programs(site=self.site)
# The 2 cached programs should be returned while info and warning
# messages should be logged for the missing one.
self.assertEqual(
set(program['uuid'] for program in actual_programs),
set(program['uuid'] for program in partial_programs.values())
|
)
mock_info.assert_called_with('Failed to get details for 1 programs. Retrying.')
mock_warning.assert_called_with(
u'Failed to get details for program {uuid} from the cache.'.format(uuid=programs[2]['uuid'])
)
mock_warning.reset_mock()
# We can't use a set comparison here because these values are dictionaries
# and aren't hashable. We've already verified that all programs came out
# of the cache above, so all we ne
|
ed to do here is verify the accuracy of
# the data itself.
for program in actual_programs:
key = PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid'])
self.assertEqual(program, partial_programs[key])
# Cache details for all 3 programs.
all_programs = {
PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid']): program for program in programs
}
cache.set_many(all_programs, None)
actual_programs = get_programs(site=self.site)
# All 3 programs should be returned.
self.assertEqual(
set(program['uuid'] for program in actual_programs),
set(program['uuid'] for program in all_programs.values())
)
self.assertFalse(mock_warning.called)
for program in actual_programs:
key = PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid'])
self.assertEqual(program, all_programs[key])
@mock.patch(UTILS_MODULE + '.cache')
def test_get_many_with_missing(self, mock_cache, mock_warning, mock_info):
programs = ProgramFactory.create_batch(3)
all_programs = {
PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid']): program for program in programs
}
partial_programs = {
PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid']): program for program in programs[:2]
}
def fake_get_many(keys):
if len(keys) == 1:
return {PROGRAM_CACHE_KEY_TPL.format(uuid=programs[-1]['uuid']): programs[-1]}
else:
return partial_programs
mock_cache.get.return_value = [program['uuid'] for program in programs]
mock_cache.get_many.side_effect = fake_get_many
actual_programs = get_programs(site=self.site)
# All 3 cached programs should be returned. An info message should be
# logged about the one that was initially missing, but the code should
# be able to stitch together all the details.
self.assertEqual(
set(program['uuid'] for program in actual_programs),
set(program['uuid'] for program in all_programs.values())
)
self.assertFalse(mock_warning.called)
mock_info.assert_called_with('Failed to get details for 1 programs. Retrying.')
for program in actual_programs:
key = PROGRAM_CACHE_KEY_TPL.format(uuid=program['uuid'])
self.assertEqual(program, all_programs[key])
def test_get_one(self, mock_warning, _mock_info):
expected_program = ProgramFactory()
expected_uuid = expected_program['uuid']
self.assertEqual(get_programs(uuid=expected_uuid), None)
mock_warning.assert_called_once_with(
u'Failed to get details for program {uuid} from the cache.'.format(uuid=expected_uuid)
)
mock_warning.reset_mock()
cache.set(
PROGRAM_CACHE_KEY_TPL.format(uuid=expected_uuid),
expected_program,
None
)
actual_program = get_programs(uuid=expected_uuid)
self.assertEqual(actual_program, expected_program)
self.assertFalse(mock_warning.called)
def test_get_from_course(self, mock_warning, _mock_info):
expected_program = ProgramFactory()
expected_course = expected_program['courses'][0]['course_runs'][0]['key']
self.assertEqual(get_programs(course=expected_course), [])
cache.set(
COURSE_PROGRAMS_CACHE_KEY_TPL.format(course_run_id=expected_course),
[expected_program['uuid']],
None
)
cache.set(
PROGRAM_CACHE_KEY_TPL.format(uuid=expected_program['uuid']),
expected_program,
None
)
actual_program = get_programs(course=expected_course)
self.assertEqual(actual_program, [expected_program])
self.assertFalse(mock_
|
Etxea/gestioneide
|
gestioneide/migrations/0050_auto_20180915_1745.py
|
Python
|
gpl-3.0
| 721
| 0.001387
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2018-09-15 15:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gestioneide', '0049_auto_20180516_2
|
118'),
]
operations = [
migrations.AddField(
model_name='empresa',
name='cif',
field=models.CharField(default=b'B12345678', max_length=9),
),
migrations.AlterField(
model_name='empresa',
name='razon_social',
field=models.CharField(default=b'ESCUELAS INTERNACIONAL
|
ES E.I.D.E. S.L.', max_length=255, verbose_name=b'Raz\xc3\xb3n Social'),
),
]
|
satendrapandeymp/Facebook_message_download
|
Message_windows.py
|
Python
|
mit
| 13,157
| 0.031618
|
from fbchat import Client, log
from getpass import getpass
from datetime import datetime
import sys, os, urllib, time, socket, shutil, requests
from glob import glob
from zipfile import ZipFile
socket.setdefaulttimeout(60)
reload(sys)
sys.setdefaultencoding("utf-8")
ending = '</div></div>'
username = str(raw_input("Username: "))
password = getpass()
client = Client(username, password)
zipping = str(raw_input("Want to save your data as a .Zip file y/n?: "))
uid = client.uid
USER = client.fetchUserInfo(client.uid)[client.uid]
self = USER.name
ID = []
NAME = []
docs = ['docx', 'doc', 'pdf', 'pptx', 'txt', 'xlsx']
media = ['mp3', 'mp4', 'aac', 'webm', 'avi', '3gp']
gen = ['jpg', 'png']
def download_file(add, name):
request = requests.get(add, timeout=60, stream=True)
#Open the output file and make sure we write in binary mode
flag = 0
with open(name, 'wb') as fh:
# Walk through the request response in chunks of 1024 * 1024 bytes, so 1MiB
for chunk in request.iter_content(1024 * 1024):
# Write the chunk to the file
flag += 1
if flag > 10:
Log_file.write("This file is bigger than 10MB so download it if you want-- " + add + '\n\n')
break
fh.write(chunk)
def make_zip():
file = open('instruction.txt', 'w')
file.write("Use your facebook password to decrypt Fb_Data.zip file")
file.close()
files = glob("Data\\*\\*\\*")
files += glob("Data\\*\\*")
files += glob("Data\\*")
zipfile = ZipFile("Fb_Data.zip", 'w')
for file in files:
if os.path.isfile(file):
zipfile.write(fi
|
le)
zipfile.close()
shutil.rmtree("Data")
def do_rest(thread):
check = 0
data = str(thread).split(" ")
id = data[len(data)-1].split('(')[1].split(')')[0]
other = data[1]
name = str(
|
data[1])
if len(data) == 4:
other = data[1] + " " + data[2]
name = str(data[1]) + '_' + str(data[2])
if len(data) == 5:
other = data[1] + " " + data[2] + " " + data[3]
name = data[1] + '_' + data[2] + '_' + data[3]
if len(data) == 6:
other = data[1] + " " + data[2] + " " + data[3] + " " + data[4]
name = data[1] + '_' + data[2] + '_' + data[3] + '_' + data[4]
starting = '<html><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8" /> <title>' + other + '- Messages</title> <link rel="stylesheet" href="..\\..\\style.css" type="text/css" /></head><body> <div class="contents"><h1>' + other +'</h1> <div class="thread"> Total number of messages = ' + str(thread.message_count)
Testing = Path_check(other)
folder_name = "Data\\" + other
log_file = folder_name+"\\" + name + ".txt"
filename = folder_name+"\\html\\" + name + ".htm"
file = open(filename, 'wb')
Log_file = open(log_file, 'wb')
file.write(starting)
flag = 1000
num = 0
timestamp = int(19800 + time.time())*1000
if str(data[0]) != '<GROUP':
ID.append(id)
NAME.append(other)
check = 1
while( flag > 999):
messages = client.fetchThreadMessages(thread_id=id, limit=1000, before=timestamp)
timestamp = messages[len(messages)-1].timestamp
for message in messages:
try:
if check == 0:
if message.author not in ID:
USER = client.fetchUserInfo(message.author)[message.author]
other = USER.name
ID.append(message.author)
NAME.append(other)
else:
for i in range(len(ID)):
if message.author == ID[i]:
other = NAME[i]
break
if message.extensible_attachment:
if message.extensible_attachment['story_attachment']['media']:
if message.extensible_attachment['story_attachment']['media']['is_playable']:
add = message.extensible_attachment['story_attachment']['media']['playable_url']
Filename = folder_name + "\\shares\\" + str(message.timestamp) + '.mp4'
if add is not None:
try:
download_file(add, Filename)
if message.author == uid:
file.write('<div class="message"><div class="message_header"><span class="user">' + self + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <video width="500" controls> <source src="..\\..\\..\\' + Filename + '" type="video/mp4"></p> \n' )
else:
file.write('<div class="message"><div class="message_header"><span class="user">' + other + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <video width="500" controls> <source src="..\\..\\..\\' + Filename + '" type="video/mp4"></p> \n' )
except:
Log_file.write("Getting some error now on url -: " + add + '\n\n')
else:
Log_file.write("Look at this separately--" + str(message.extensible_attachment) + '\n\n')
elif message.attachments:
for attachment in message.attachments:
# For Image
time.sleep(.1)
Filename = attachment['filename']
if Filename.split("-")[0] == 'image':
add = attachment['large_preview']['uri']
name = folder_name +"\\images\\"+ attachment['filename']+'.' +attachment['original_extension']
try:
download_file(add, name)
if message.author == uid:
file.write('<div class="message"><div class="message_header"><span class="user">' + self + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <a href="..\\..\\..\\'+ name +'"> <img src="..\\..\\..\\'+ name + '" alt="Folder" width="500" > </a></p> \n' )
else:
file.write('<div class="message"><div class="message_header"><span class="user">' + other + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <a href="..\\..\\..\\'+ name +'"> <img src="..\\..\\..\\'+ name + '" alt="Folder" width="500" > </a></p> \n' )
except:
Log_file.write( "Getting some error now on url -: " + add + '\n\n')
elif len(Filename.split(".")) > 1 and Filename.split(".")[len(Filename.split("."))-1] in docs:
add = attachment['url']
test = urllib.urlopen(add)
temp = test.read().split('replace("')[1]
temp = temp.split('");</script>')[0]
temp = temp.replace("\\","")
Temp = Filename
Filename = folder_name + "\\docs\\" + Filename
try:
download_file(temp, Filename)
if message.author == uid:
file.write('<div class="message"><div class="message_header"><span class="user">' + self + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <a href="..\\..\\..\\'+ Filename +'">' + Temp + '</a></p> \n' )
else:
file.write('<div class="message"><div class="message_header"><span class="user">' + other + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <a href="..\\..\\..\\'+ Filename +'">' + Temp + '</a></p> \n' )
except:
Log_file.write( "Getting some error now on url -: " + temp + '\n\n')
elif len(Filename.split(".")) > 1 and Filename.split(".")[len(Filename.split("."))-1] in media:
try:
add = attachment['playable_url']
Filename = folder_name + "\\media\\" + Filename
download_file(add, Filename)
if message.author == uid:
file.write('<div class="message"><div class="message_header"><span class="user">' + self + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></div></div><p> <video width="500" controls> <source src="..\\..\\..\\' + Filename + '" type="video/mp4"></p> \n' )
else:
file.write('<div class="message"><div class="message_header"><span class="user">' + other + ' </span><span class="meta"> ')
file.write(str(datetime.fromtimestamp(float(int(message.timestamp)/1000))))
file.write('</span></di
|
bitcrystal/buildtools-BaseTools
|
Source/Python/Eot/ParserWarning.py
|
Python
|
bsd-2-clause
| 1,000
| 0.01
|
## @file
# Warning information of Eot
#
# Copyright (c) 2007 - 2010, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
|
class Warning (Exception):
## The constructor
#
# @param self The object pointer
# @param Str The
|
message to record
# @param File The FDF name
# @param Line The Line number that error occurs
#
def __init__(self, Str, File = None, Line = None):
self.message = Str
self.FileName = File
self.LineNumber = Line
self.ToolName = 'EOT'
|
dumel93/project-
|
type_page/migrations/0005_footballtype_comments.py
|
Python
|
mit
| 471
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-12 14:48
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
|
('type_page', '0004_auto_20170711_1241'),
]
operations = [
migrations.AddField(
model_name='footballtype',
name='comments',
field=models.CharField(max_le
|
ngth=128, null=True),
),
]
|
benoitc/offset
|
examples/demo_ticker.py
|
Python
|
mit
| 302
| 0.003311
|
# -
|
*- coding: utf-8 -
#
# This file is part of offset. See the NOTICE for more information.
from offset import run, maintask
from offset.time import Ticker, SECOND
@maintask
def main():
ticker = Ticker(0.1 * SECOND)
for i in range(3):
print(ticker.c.recv())
ticker.stop()
ru
|
n()
|
JakeColtman/bartpy
|
examples/score/core/sin.py
|
Python
|
mit
| 1,238
| 0
|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from bartpy.sklearnmodel import SklearnModel
def run(size=100,
alpha=0.95,
beta=2.0,
n_trees=50):
import warnings
warnings.simplefilter("error", UserWarning)
x = np.linspace(0, 5, size)
X = pd.DataFrame(x)
y = np.random.normal(0, 0.1, size=size) + np.sin(x)
model = SklearnModel(
n_samples=100,
n_burn=50,
n_trees=n_trees,
alpha=alpha,
beta=beta,
n_jobs=1,
n_chains=1)
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.33,
random_state=42,
shuffle=True)
m
|
odel.fit(X_train, y_train)
y_pred = model.predict(X_test)
|
plt.scatter(y_test, y_pred)
plt.show()
rmse = np.sqrt(np.sum(np.square(y_test - y_pred)))
print(rmse)
if __name__ == "__main__":
run(50, 0.95, 2.0)
|
tntrung/youCVML
|
misc/patch.py
|
Python
|
gpl-2.0
| 222
| 0.022523
|
import numpy as np
# ===== Patch normalization by mean intensity ========================
def mean_intensity_norm(
|
patch):
mu = np(np.sum(patch))*1.0/(patch.shape[0]*shape[1])
retur
|
n (patch - mu[np.newaxis,np.newaxis])
|
simon-wenmouth/dealerships
|
dealerships/manufacturers/jeep/download_data.py
|
Python
|
mit
| 628
| 0.009554
|
#!/usr/bin/env python3
import os
import errno
import requests
url='http://www.jeep.com/hostd/getlocatedealers.json?zipCode=60202&zipDistance=2500'
directory_name=os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'data'))
try:
os.makedirs(directory_name)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(directory_name):
|
pass
else:
raise
file_name=os.path.join(directory_name, 'jeep.json')
response = requests.get(url, stream=True)
with open(file_name, 'wb') as fd:
for chunk in response.iter_content(chunk_
|
size=1024):
fd.write(chunk)
|
RRCKI/panda-server
|
pandaserver/test/testDQ.py
|
Python
|
apache-2.0
| 6,730
| 0.021545
|
import commands
from dataservice.DDM import ddm
#print ddm.DQ2ProductionClient.generateUUID()
#print ddm.DQ2.getFilesFromCatalog('aho.xml')
#print ddm.DQ2ProductionClient.dq2_makeblocks('input.data')
ids=['pandatest.000003.dd.input._00047.junk','09801b0a-9fd0-4237-8caf-a37932c26e39',
'pandatest.000003.dd.input._00050.junk','6dd3d367-4aa3-4e1a-9ac3-9ad14b7311f4',
'pandatest.000003.dd.input._00037.junk','817c2c92-467b-4a1b-9482-f2ec8468cf2e',
'pandatest.000003.dd.input._00021.junk','7720527f-817e-40c7-9e29-ce237f59edfa',
'pandatest.000003.dd.input._00023.junk','5f1f9982-85a3-4d1a-9ee9-f1de22c02544',
'pandatest.000003.dd.input._00042.junk','610cc91a-c731-4bce-ac7a-ff5133e7d18b',
'pandatest.000003.dd.input._00027.junk','bd987478-3c59-4551-b12b-2853bac25613',
'pandatest.000003.dd.input._00032.junk','9d0424f3-7552-4282-92f2-dfe74e9a6c12',
'pandatest.000003.dd.input._00009.junk','dce33d4a-4569-49ee-95c5-b619b161c777',
'pandatest.000003.dd.input._00036.junk','2fc9836b-82d6-41b0-b966-a5c37662172d',
'pandatest.000003.dd.input._00031.junk','65b957e0-5ecc-44bb-a1f9-cccb61ca2d16',
'pandatest.000003.dd.input._00025.junk','be29fe82-17e2-4122-b4c8-f49a0b76c81f',
'pandatest.000003.dd.input._00029.junk','afa4322f-409b-4327-9169-229d8d48ad5a',
'pandatest.000003.dd.input._00013.junk','cf236d3b-45fd-4b58-bdfb-59abc983c886',
'pandatest.000003.dd.input._00020.junk','b02f98da-0138-4b58-89ba-a88f37214a89',
'pandatest.000003.dd.input._00001.junk','12ab5bb9-944e-4e75-bb90-b64c462d4cd8',
'pandatest.000003.dd.input._00001.junk','12ab5bb9-944e-4e75-bb90-b64c462d4cd8',
'pandatest.000003.dd.input._00006.junk','c0a422ad-e9f1-44bb-9539-cfef7e739da2',
'pandatest.000003.dd.input._00034.junk','da670db3-3638-4f06-b650-a9315eb2bd63',
'pandatest.000003.dd.input._00046.junk','2fcef270-2e41-472d-83c0-53749b401b74',
'pandatest.000003.dd.input._00012.junk','5e212fa1-201f-494d-a2b2-420b229b08fc',
'pandatest.000003.dd.input._00044.junk','87c8ebcc-a637-4204-b77b-8219e68b98d7',
'pandatest.000003.dd.input._00030.junk','87ad811f-7d39-43d9-8a13-e117079bb208',
'pandatest.000003.dd.input._00022.junk','6b902506-1ee1-46b1-a105-1521a8c0dbca',
'pandatest.000003.dd.input._00017.junk','2bbed213-943c-41be-b9d7-7d86a309b0b2',
'pandatest.000003.dd.input._00049.junk','8366e269-f9ae-4b9c-bd98-df4027c992c7',
'pandatest.000003.dd.input._00015.junk','f3c5f37c-b4c2-4933-9633-467ba3a7c364',
'pandatest.000003.dd.input._00004.junk','35d66be2-9d21-44a3-96f7-903a7abf4a87',
'pandatest.000003.dd.input._00010.junk','2279ea3e-ebbb-4b19-9a69-9868f0cce694',
'pandatest.000003.dd.input._00040.junk','a847dbbb-4f98-4b5b-b353-e29e3e3b3fd5',
'pandatest.000003.dd.input._00007.junk','abfef002-62ca-4d84-9813-6329764e38bd',
'pandatest.000003.dd.input._00048.junk','52854023-67d8-4a0f-99ac-bb1f0bd1dc98',
'pandatest.000003.dd.input._00016.junk','bddf7441-6ac9-4087-bafe-32e47448cdc1',
'pandatest.000003.dd.input._00041.junk','c76999ba-4cdf-49e9-bfa5-ff3525fbf1ab',
'pandatest.000003.dd.input._00003.junk','4865119e-367f-4dd8-bdff-505bd878dfde',
'pandatest.000003.dd.input._00019.junk','b9fce1fd-8d4c-4fc4-932f-12b13263ca0c',
'pandatest.000003.dd.input._00011.junk','f93a4e08-fd4f-45fc-b324-91ff59555b1c',
'pandatest.000003.dd.input._00018.junk','e4894561-9589-40d8-871b-b57d70564384',
'pandatest.000003.dd.input._00002.junk','58934980-5ab3-4a66-b3da-55f86d4b54bd',
'pandatest.000003.dd.input._00005.junk','5993fe60-bc8c-4fd8-aac1-dfd55700c9c3',
'pandatest.000003.dd.input._00028.junk','6c19e1fc-ee8c-4bae-bd4c-c9e5c73aca27',
'pandatest.000003.dd.input._00033.junk','98f79ba1-1793-4253-aac7-bdf90a51d1ee',
'pandatest.000003.dd.input._00039.junk','33660dd5-7cef-422a-a7fc-6c24cb10deb1',
'pandatest.000003.dd.input._00014.junk','5c0e9ed8-05a6-41c4-8c07-39b2be33ebc1',
'pandatest.000003.dd.input._00008.junk','b0c184d1-5f5e-45a6-9cc8-8b0f20a85463',
'pandatest.000003.dd.input._00038.junk','b9171997-4d2b-4075-b154-579ebe9438fa',
'pandatest.000003.dd.input._00026.junk','89e5bdf1-15de-44ae-a388-06c1e7d7e2fc',
'pandatest.000003.dd.input._00024.junk','c77b77a2-e6d1-4360-8751-19d9fb77e1f1',
'pandatest.000003.dd.input._00043.junk','cc6ac2a1-4616-4551-80a7-d96f79252b64',
'pandatest.000003.dd.input._00045.junk','ddbed17a-6d65-4e8d-890a-21e1eaa3e9d6',
'pandatest.000003.dd.input._00035.junk','8ed1875a-eb90-4906-8fc4-0449d300ddfe'
]
for i in range(1):
datasetName='testDQ.%s' % commands.getoutput('/usr/bin/uuidgen')
print datasetName
#['pandatest.000003.dd.input._00004.junk','35d66be2-9d21-44a3-96f7-903a7abf4a87']
#'pandatest.000003.dd.input._00028.junk','6c19e1fc-ee8c-4bae-bd4c-c9e5c73aca27',
# 'pandatest.000003.dd.input._00033.junk','98f79ba1-1793-4253-aac7-bdf90a51d1ee']
print (['registerNewDataset','-c',datasetName]+ids[i*2:i*2+2])
ddm.DQ2.main(['registerNewDataset','-c',datasetName]+ids[i*2:i*2+2])
'''
status,out = ddm.RepositoryClient.main(['queryDatasetByName',datasetName])
exec "vuids = %s" % out.split('\n')[0]
if vuids.has_key(datasetName):
vuid = vuids[datasetName]
print vuid
status,out = ddm.RepositoryClient.main(['resolveVUID',vuid])
status,out = ddm.DQ2.getFilesFromCatalog('baka.xml')
exec "rets = %s" % out.split('\n')[0]
print rets[0]
exec "ids = %s" % out
print ddm.DQ2.main(['addFilesToDataset',datasetName]+ids)
status,out = ddm.DQ2.main(['listFilesI
|
nDataset',datasetName])
print out
'''
print (['registerDatasetLocations','-c',datasetName,'http://dms02.usatlas.bnl.gov/sites/bnl/lrc'])
ddm.DQ2.main(['registerDatasetLocations','-c',datasetName,
'http://dms02.usatlas.bnl.gov/sites/bnl/lrc'])
print (['regist
|
erDatasetSubscription',datasetName,'http://doe-dhcp241.bu.edu:8000/dq2/'])
ddm.DQ2.main(['registerDatasetSubscription',datasetName,'http://doe-dhcp241.bu.edu:8000/dq2/'])
#print ddm.DQ2.main(['eraseDataset',datasetName])
#print ddm.DQ2.main(['eraseDataset',datasetName])
#print ddm.DQ2ProductionClient.dq2_create_dataset(datasetName)
#status,out = ddm.DQ2ProductionClient.dq2_assign_destination(datasetName,'BNL_SE')
#print out
#print ddm.DQ2.main(['eraseDataset',datasetName])
#status,out = ddm.DQ2.main(['listFilesInDataset','panda.destDB.11aed982-8079-4db9-964c-37a284b8597a'])
#print out
ddm.DQ2_iter.listFileReplicasBySites('mc11_7TeV.151900.madgraph_SM_SG_SS_direct_1200_600_395.merge.AOD.e1095_a131_s1353_a145_r2993_tid723983_00',
0,['SARA-MATRIX_DATADISK'],
0,300)
|
findapad/find_a_pad
|
find_a_pad_app/migrations/0004_auto_20170709_1432.py
|
Python
|
mit
| 653
| 0
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-09 14:32
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migra
|
tion):
dependencies = [
('find_a_pad_app', '0003_auto_20170709_1432'),
]
operations = [
migrations.Al
|
terField(
model_name='organization',
name='email',
field=models.CharField(blank=True, max_length=100),
),
migrations.AlterField(
model_name='organization',
name='phone_number',
field=models.CharField(blank=True, max_length=12),
),
]
|
albertz/music-player
|
mac/pyobjc-framework-Cocoa/Examples/AppKit/PyObjCLauncher/FileSettings.py
|
Python
|
bsd-2-clause
| 6,020
| 0.001495
|
from Foundation import *
from AppKit import *
class FileSettings(NSObject):
fsdefault_py = None
fsdefault_pyw = None
fsdefault_pyc = None
default_py = None
default_pyw = None
default_pyc = None
factorySettings = None
prefskey = None
settings = None
def getFactorySettingsForFileType_(cls, filetype):
if filetype == u'Python Script':
curdefault = cls.fsdefault_py
elif filetype == u'Python GUI Script':
curdefault = cls.fsdefault_pyw
elif filetype == u'Python Bytecode Document':
curdefault = cls.fsdefault_pyc
else:
NSLog(u'Funny File Type: %s\n', filetype)
curdefault = cls.fsdefault_py
filetype = u'Python Script'
if curdefault is None:
curdefault = FileSettings.alloc().initForFSDefaultFileType_(filetype)
return curdefault
getFactorySettingsForFileType_ = classmethod(getFactorySettingsForFileType_)
def getDefaultsForFileType_(cls, filetype):
if filetype == u'Python Script':
curdefault = cls.default_py
elif filetype == u'Python GUI Script':
curdefault = cls.default_pyw
elif filetype == u'Python Bytecode Document':
curdefault = cls.default_pyc
else:
NSLog(u'Funny File Type: %s', filetype)
curdefault = cls.default_py
filetype = u'Python Script'
if curdefault is None:
curdefault = FileSettings.alloc().initForDefaultFileType_(filetype)
return curdefault
getDefaultsForFileType_ = classmethod(getDefaultsForFileType_)
def newSettingsForFileType_(cls, filetype):
return FileSettings.alloc().initForFileType_(filetype)
newSettingsForFileType_ = classmethod(newSettingsForFileType_)
def initWithFileSettings_(self, source):
self = super(FileSettings, self).init()
self.settings = source.fileSettingsAsDict().copy()
self.origsource = None
return self
def initForFileType_(self, filetype):
defaults = FileSettings.getDefaultsForFileType_(filetype)
self = self.initWithFileSettings_(defaults)
self.origsource = defaults
return self
def initForFSDefaultFileType_(self, filetype):
self = super(FileSettings, self).init()
if type(self).factorySettings is None:
bndl = NSBundle.mainBundle()
path = bndl.pathForResource_ofType_(u'factorySettings', u'plist')
type(self).factorySettings = NSDictionary.dictionaryWithContentsOfFile_(path)
if type(self).factorySettings is None:
NSLog(u'Missing %s', path)
return None
dct = type(self).factorySettings.get(filetype)
if dct is None:
NSLog(u'factorySettings.plist misses file type "%s"', filetype)
return None
self.applyValuesFromDict_(dct)
interpreters = dct[u'interpreter_list']
mgr = NSFi
|
leManager.defaultManager()
self.settings['in
|
terpreter'] = u'no default found'
for filename in interpreters:
filename = filename.nsstring().stringByExpandingTildeInPath()
if mgr.fileExistsAtPath_(filename):
self.settings['interpreter'] = filename
break
self.origsource = None
return self
def applyUserDefaults_(self, filetype):
dct = NSUserDefaults.standardUserDefaults().dictionaryForKey_(filetype)
if dct:
self.applyValuesFromDict_(dct)
def initForDefaultFileType_(self, filetype):
fsdefaults = FileSettings.getFactorySettingsForFileType_(filetype)
self = self.initWithFileSettings_(fsdefaults)
if self is None:
return self
self.settings['interpreter_list'] = fsdefaults.settings['interpreter_list']
self.settings['scriptargs'] = u''
self.applyUserDefaults_(filetype)
self.prefskey = filetype
return self
def reset(self):
if self.origsource:
self.updateFromSource_(self.origsource)
else:
fsdefaults = FileSettings.getFactorySettingsForFileType_(self.prefskey)
self.updateFromSource_(fsdefaults)
def updateFromSource_(self, source):
self.settings.update(source.fileSettingsAsDict())
if self.origsource is None:
NSUserDefaults.standardUserDefaults().setObject_forKey_(self.fileSettingsAsDict(), self.prefskey)
def applyValuesFromDict_(self, dct):
if self.settings is None:
self.settings = {}
self.settings.update(dct)
def commandLineForScript_(self, script):
cur_interp = None
if self.settings['honourhashbang']:
try:
line = file(script, 'rU').next().rstrip()
except:
pass
else:
if line.startswith('#!'):
cur_interp = line[2:]
if cur_interp is None:
cur_interp = self.settings['interpreter']
cmd = []
cmd.append('"'+cur_interp.replace('"', '\\"')+'"')
if self.settings['debug']:
cmd.append('-d')
if self.settings['verbose']:
cmd.append('-v')
if self.settings['inspect']:
cmd.append('-i')
if self.settings['optimize']:
cmd.append('-O')
if self.settings['nosite']:
cmd.append('-S')
if self.settings['tabs']:
cmd.append('-t')
others = self.settings['others']
if others:
cmd.append(others)
cmd.append('"'+script.replace('"', '\\"')+'"')
cmd.append(self.settings['scriptargs'])
if self.settings['with_terminal']:
cmd.append("""&& echo "Exit status: $?" && python -c 'import sys;sys.stdin.readline()' && exit 1""")
else:
cmd.append('&')
return ' '.join(cmd)
def fileSettingsAsDict(self):
return self.settings
|
pivonroll/Qt_Creator
|
tests/system/shared/classes.py
|
Python
|
gpl-3.0
| 7,863
| 0.004324
|
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt license
|
s may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of
|
the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import operator
# for easier re-usage (because Python hasn't an enum type)
class Targets:
ALL_TARGETS = map(lambda x: 2 ** x , range(7))
(DESKTOP_474_GCC,
DESKTOP_480_DEFAULT,
SIMULATOR,
EMBEDDED_LINUX,
DESKTOP_521_DEFAULT,
DESKTOP_531_DEFAULT,
DESKTOP_541_GCC) = ALL_TARGETS
@staticmethod
def desktopTargetClasses():
desktopTargets = (sum(Targets.ALL_TARGETS) & ~Targets.SIMULATOR & ~Targets.EMBEDDED_LINUX)
if platform.system() == 'Darwin':
desktopTargets &= ~Targets.DESKTOP_541_GCC
return desktopTargets
@staticmethod
def qt4Classes():
return (Targets.DESKTOP_474_GCC | Targets.DESKTOP_480_DEFAULT
| Targets.SIMULATOR | Targets.EMBEDDED_LINUX)
@staticmethod
def getStringForTarget(target):
if target == Targets.DESKTOP_474_GCC:
return "Desktop 474 GCC"
elif target == Targets.DESKTOP_480_DEFAULT:
if platform.system() in ('Windows', 'Microsoft'):
return "Desktop 480 MSVC2010"
else:
return "Desktop 480 GCC"
elif target == Targets.SIMULATOR:
return "Qt Simulator"
elif target == Targets.EMBEDDED_LINUX:
return "Embedded Linux"
elif target == Targets.DESKTOP_521_DEFAULT:
return "Desktop 521 default"
elif target == Targets.DESKTOP_531_DEFAULT:
return "Desktop 531 default"
elif target == Targets.DESKTOP_541_GCC:
return "Desktop 541 GCC"
else:
return None
@staticmethod
def getTargetsAsStrings(targets):
if not isinstance(targets, (tuple,list)):
test.fatal("Wrong usage... This function handles only tuples or lists.")
return None
result = map(Targets.getStringForTarget, targets)
if None in result:
test.fatal("You've passed at least one unknown target!")
return result
@staticmethod
def intToArray(targets):
return filter(lambda x: x & targets, Targets.ALL_TARGETS)
@staticmethod
def arrayToInt(targetArr):
return reduce(operator.or_, targetArr, 0)
@staticmethod
def getDefaultKit():
return Targets.DESKTOP_521_DEFAULT
# this class holds some constants for easier usage inside the Projects view
class ProjectSettings:
BUILD = 1
RUN = 2
# this class defines some constants for the views of the creator's MainWindow
class ViewConstants:
WELCOME, EDIT, DESIGN, DEBUG, PROJECTS, HELP = range(6)
FIRST_AVAILABLE = 0
# always adjust the following to the highest value of the available ViewConstants when adding new
LAST_AVAILABLE = HELP
# this function returns a regex of the tooltip of the FancyTabBar elements
# this is needed because the keyboard shortcut is OS specific
# if the provided argument does not match any of the ViewConstants it returns None
@staticmethod
def getToolTipForViewTab(viewTab):
if viewTab == ViewConstants.WELCOME:
toolTip = ur'Switch to <b>Welcome</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.EDIT:
toolTip = ur'Switch to <b>Edit</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.DESIGN:
toolTip = ur'Switch to <b>Design</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.DEBUG:
toolTip = ur'Switch to <b>Debug</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.PROJECTS:
toolTip = ur'Switch to <b>Projects</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
elif viewTab == ViewConstants.HELP:
toolTip = ur'Switch to <b>Help</b> mode <span style="color: gray; font-size: small">(Ctrl\+|\u2303)%d</span>'
else:
return None
return toolTip % (viewTab + 1)
class SubprocessType:
QT_WIDGET=0
QT_QUICK_APPLICATION=1
QT_QUICK_UI=2
USER_DEFINED=3
@staticmethod
def getWindowType(subprocessType, qtQuickVersion="1.1"):
if subprocessType == SubprocessType.QT_WIDGET:
return "QMainWindow"
if subprocessType == SubprocessType.QT_QUICK_APPLICATION:
qqv = "2"
if qtQuickVersion[0] == "1":
qqv = "1"
return "QtQuick%sApplicationViewer" % qqv
if subprocessType == SubprocessType.QT_QUICK_UI:
if qtQuickVersion == "1.1":
return "QDeclarativeViewer"
else:
return "QQuickView"
if subprocessType == SubprocessType.USER_DEFINED:
return "user-defined"
test.fatal("Could not determine the WindowType for SubprocessType %s" % subprocessType)
return None
class QtInformation:
QT_VERSION = 0
QT_BINPATH = 1
QT_LIBPATH = 2
class LibType:
SHARED = 0
STATIC = 1
QT_PLUGIN = 2
@staticmethod
def getStringForLib(libType):
if libType == LibType.SHARED:
return "Shared Library"
if libType == LibType.STATIC:
return "Statically Linked Library"
if libType == LibType.QT_PLUGIN:
return "Qt Plugin"
return None
class Qt5Path:
DOCS = 0
EXAMPLES = 1
@staticmethod
def getPaths(pathSpec):
if pathSpec == Qt5Path.DOCS:
path52 = "/doc"
path53 = "/Docs/Qt-5.3"
path54 = "/Docs/Qt-5.4"
elif pathSpec == Qt5Path.EXAMPLES:
path52 = "/examples"
path53 = "/Examples/Qt-5.3"
path54 = "/Examples/Qt-5.4"
else:
test.fatal("Unknown pathSpec given: %s" % str(pathSpec))
return []
if platform.system() in ('Microsoft', 'Windows'):
return ["C:/Qt/Qt5.2.1/5.2.1/msvc2010" + path52,
"C:/Qt/Qt5.3.1" + path53, "C:/Qt/Qt5.4.1" + path54]
elif platform.system() == 'Linux':
if __is64BitOS__():
return map(os.path.expanduser, ["~/Qt5.2.1/5.2.1/gcc_64" + path52,
"~/Qt5.3.1" + path53, "~/Qt5.4.1" + path54])
return map(os.path.expanduser, ["~/Qt5.2.1/5.2.1/gcc" + path52,
"~/Qt5.3.1" + path53, "~/Qt5.4.1" + path54])
else:
return map(os.path.expanduser, ["~/Qt5.2.1/5.2.1/clang_64" + path52,
"~/Qt5.3.1" + path53])
|
tomlepaine/jobber
|
jobber/scripts/progress.py
|
Python
|
bsd-3-clause
| 130
| 0
|
from jobber import jobber
from redis import Redis
client = Redis()
progress = jobber.JobPr
|
ogress(c
|
lient=client)
progress.run()
|
divergentdave/inspectors-general
|
inspectors/agriculture.py
|
Python
|
cc0-1.0
| 11,641
| 0.007559
|
#!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from utils import utils, inspector
# http://www.usda.gov/oig/rptsaudits.htm
archive = 1978
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
# - Some reports have links with a '.PDF' extension, but they can only be
# accessed using a '.pdf' extension. There is a 404 otherwise. The
# `LOWER_PDF_REPORT_IDS` constant contains a list of the report ids that this
# applies to.
# - The link to the congressional testimony statement from 2/26/2003 should
# point to http://www.usda.gov/oig/webdocs/Testimonybudgt-2004.pdf, not
# http://www.usda.gov/oig/webdocs/IGtestimony110302.pdf
SEMIANNUAL_REPORTS_URL = "http://www.usda.gov/oig/rptssarc.htm"
AGENCY_BASE_URL = "http://www.usda.gov/oig/"
TESTIMONIES_URL = "http://www.usda.gov/oig/rptsigtranscripts.htm"
INVESTIGATION_URLS = "http://www.usda.gov/oig/newinv.htm"
OTHER_REPORT_TYPES = {
"investigation": INVESTIGATION_URLS,
"semiannual_report": SEMIANNUAL_REPORTS_URL,
"testimony": TESTIMONIES_URL,
}
AGENCY_URLS = {
"AARC": "rptsauditsaarc.htm",
"AMS": "rptsauditsams.htm",
"APHIS": "rptsauditsaphis.htm",
"ARS": "rptsauditsars.htm",
"CR": "rptsauditscr.htm",
"CCC": "rptsauditsccc.htm",
"CSRE": "rptsauditscsrees.htm",
"FSA": "rptsauditsfsa.htm",
"FNS": "rptsauditsfns.htm",
"FSIS": "rptsauditsfsis.htm",
"FAS": "rptsauditsfas.htm",
"FS": "rptsauditsfs.htm",
"GIPSA": "rptsauditsgipsa.htm",
"NASS": "rptsauditsnass.htm",
"NIFA": "rptsauditsnifa.htm",
"NRCS": "rptsauditsnrcs.htm",
"REE": "rptsauditsree.htm",
"RMA": "rptsauditsrma.htm",
"RBS": "rptsauditsrbs.htm",
"RBEG": "rptsauditsrbeg.htm",
"RD": "rptsauditsrd.htm",
"RHS": "rptsauditsrhs.htm",
"RUS": "rptsauditsrus.htm",
"USDA": "rptsauditsmulti.htm",
}
AGENCY_NAMES = {
"AARC": "Alternative Agricultural Research & Comm. Center",
"AMS": "Agricultural Marketing Service",
"APHIS": "Animal Plant Health Inspection Service",
"ARS": "Agricultural Research Service",
"CR": "Civil Rights",
"CCC": "Commodity Credit Corporation",
"CSRE": "Cooperative State Research, Ed. & Extension Service",
"FSA": "Farm Service Agency",
"FNS": "Food and Nutrition Service",
"FSIS": "Food Safety and Inspection Service",
"FAS": "Foreign Agricultural Service",
"FS": "Forest Service",
"GIPSA": "Grain Inspection, Packers and Stockyards Administration",
"NASS": "National Agricultural Statistics Service",
"NIFA": "National Institute of Food and Agriculture",
"NRCS": "Natural Resources Conservation Service",
"REE": "Research, Education, and Economics",
"RMA": "Risk Management Agency",
"RBS": "Rural Business-Cooperative Service",
"RBEG": "Rural Business Enterprise Grant",
"RD": "Rural Development",
"RHS": "Rural Housing Service",
"RUS": "Rural Utilities Service",
"USDA": "USDA (Multi-Agency)",
}
REPORT_PUBLISHED_MAPPING = {
"TestimonyBlurb2": datetime.datetime(2004, 7, 14),
}
# These reports have links that end with a '.PDF' extension, but must can only
# be accessed usin
|
g a '.pdf' extension.
LOWER_PDF_REPORT_IDS = [
"sarc1978_2_Part_1",
"sarc1979_2",
"sarc1980_2",
"sarc1981_2",
"sarc1982_2",
"sarc1983_2",
"sarc1984_2",
"sarc1985_2",
"sarc1986_2",
"sarc1987_2",
"sarc1988_2",
"sarc1989_2",
"sarc1990_2",
"sarc1991_2",
"sarc1992_2",
"sarc1993_2",
"sarc1994_2",
"sarc
|
1995_2",
"sarc1996_2",
"sarc1997_2",
]
def run(options):
year_range = inspector.year_range(options, archive)
# Pull the audit reports
all_audit_reports = {}
for agency_slug, agency_path in AGENCY_URLS.items():
agency_url = urljoin(AGENCY_BASE_URL, agency_path)
doc = utils.beautifulsoup_from_url(agency_url)
results = doc.select("ul li")
if not results:
results = [ancestor_tag_by_name(x, 'tr') for x in
doc.select('img[src$="pdf-pic1.gif"]')]
if not results:
raise inspector.NoReportsFoundError("Department of Agriculture (%s)" % agency_slug)
for result in results:
report = report_from(result, agency_url, year_range,
report_type='audit', agency_slug=agency_slug)
if report:
report_id = report["report_id"]
title = report["title"]
key = (report_id, title)
if key in all_audit_reports:
all_audit_reports[key]["agency"] = all_audit_reports[key]["agency"] \
+ ", " + agency_slug.lower()
else:
all_audit_reports[key] = report
for report in all_audit_reports.values():
inspector.save_report(report)
for report_type, url in OTHER_REPORT_TYPES.items():
doc = utils.beautifulsoup_from_url(url)
results = doc.select("ul li")
if not results:
raise inspector.NoReportsFoundError("Department of Agriculture (other reports)")
for result in results:
report = report_from(result, url, year_range, report_type=report_type)
if report:
inspector.save_report(report)
DATE_FORMATS = ['%m/%d/%Y', '%m/%Y']
def report_from(result, page_url, year_range, report_type, agency_slug="agriculture"):
published_on = None
try:
# Try to find the link with text first. Sometimes there are hidden links
# (no text) that we want to ignore.
link = result.find_all("a", text=True)[0]
except IndexError:
# If none of the links have text, try the first one with an image
for temp in result.find_all("a"):
if temp.img:
link = temp
break
# Fallback: pick the first link
else:
link = result.find_all("a")[0]
report_url = urljoin(page_url, link.get('href').strip())
if result.name == 'li':
title = link.text.strip()
elif result.name == 'tr':
# Remove the date and parenthetical metadata from the result, and save
# the date for later. What's left will be the title.
published_on_element = result.strong.extract()
if result.em:
while result.em:
result.em.extract()
title = result.text.strip()
else:
title = result.text
title = title[:title.find('(')].strip()
published_on_text = published_on_element.text.strip().rstrip(":")
for date_format in DATE_FORMATS:
try:
published_on = datetime.datetime.strptime(published_on_text, date_format)
except ValueError:
pass
# Normalize titles
title = title.rstrip(",")
if title.endswith("(PDF)"):
title = title[:-5]
if title.endswith("(PDF), (Report No: 30601-01-HY, Size: 847,872 bytes)"):
title = title[:-52]
title = title.rstrip(" ")
title = title.replace("..", ".")
title = title.replace(" ", " ")
title = title.replace("REcovery", "Recovery")
title = title.replace("Directy ", "Direct ")
if title == title.upper():
title = title.title()
# These entries on the IG page have the wrong URLs associated with them. The
# correct URLs were guessed or retrieved from an earlier version of the page,
# via the Internet Archive Wayback Machine.
if (report_url == "http://www.usda.gov/oig/webdocs/IGtestimony110302.pdf" and
title == "Statement Of Phyllis K. Fong Inspector General: Before "
"The House Appropriations Subcommittee On Agriculture, Rural "
"Development, Food And Drug Administration And Related Agencies"):
report_url = "http://www.usda.gov/oig/webdocs/Testimonybudgt-2004.pdf"
elif (report_url == "http://www.usda.gov/oig/webdocs/Ebt.PDF" and
title == "Statement Of Roger C. Viadero: Before The U.S. House Of "
"Representatives Committee On Agriculture Subcommittee On Department "
"Operations, Oversight, Nutrition, And Forestry on the Urban "
"Resources Partnership Program"):
report_url = "http://www.usda.gov/oig/webdocs/URP-Testimony.PDF"
elif (report_url == "http://www.usda.gov/oig/webdocs/foodaidasst.PDF" and
title == "Testimony Of Roger C. Viadero: Before The United States "
"Senate Committee On Agriculture, Nutrition, And Forestry On The "
"Department's Processing Of Civil Rights Complaints"):
report_url = "http://www.usda.gov/oig/webdocs/IGstestimony.PDF
|
PaesslerAG/django-act-as-auth
|
tests/testapp/tests.py
|
Python
|
bsd-3-clause
| 17,168
| 0
|
import django
from django.utils.six.moves.urllib import parse
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth.models import User
from django.contrib.auth import signals as auth_signals, REDIRECT_FIELD_NAME
from django.contrib.auth.forms import AuthenticationForm
from django.test import TransactionTestCase
from django.test.utils import override_settings
from djactasauth.backends import \
FilteredModelBackend, ActAsBackend, OnlySuperuserCanActAsBackend
from djactasauth.util import act_as_login_url, get_login_url
from testapp.sixmock import patch, call
django_11_or_later = django.VERSION[:2] >= (1, 11)
def create_user(
username, password='password', is_superuser=False, is_staff=False):
user = User(username=username, is_superuser=is_superuser)
user.set_password(password)
user.save()
return user
def auth_through_backend(backend, **kwargs):
if django_11_or_later:
args = [None] # request
else:
args = []
return backend.authenticate(*args, **kwargs)
class FilteredBackendTestCase(TransactionTestCase):
def test_it_is_a_model_backend(self):
self.assertTrue(
issubclass(FilteredModelBackend, ModelBackend),
FilteredModelBackend.__mro__)
def test_can_declare_filters_which_apply_to_get_user(self):
staff = create_user(
username='staff', is_staff=True, is_superuser=False)
superuser = create_user(
username='superuser', is_staff=True, is_superuser=True)
customer = create_user(
username='customer', is_staff=False, is_superuser=False)
for u in [staff, superuser, customer]:
u.set_password('password')
u.save()
class TestFilteredBackend(FilteredModelBackend):
def __init__(self, filter_kwargs):
self.filter_kwargs = filter_kwargs
def run_scenarios_with(test_method):
self.assertEqual(staff, test_method(staff, dict()))
self.assertEqual(superuser, test_method(superuser, dict()))
self.assertEqual(customer, test_method(customer, dict()))
self.assertEqual(None, test_method(customer, dict(is_staff=True)))
self.assertEqual(
superuser, test_method(superuser, dict(is_superuser=True)))
self.assertEqual(
customer, test_method(
customer, dict(username__startswith='c')))
self.assertEqual(
None, test_method(superuser, dict(username__startswith='c')))
def get_user(user, filter_kwargs):
backend = TestFilteredBackend(filter_kwargs)
return backend.get_user(user.pk)
run_scenarios_with(get_user)
def authenticate(user, filter_kwargs):
backend = TestFilteredBackend(filter_kwargs)
return auth_through_backend(
backend=backend, username=user.username, password='password')
run_scenarios_with(authenticate)
class TestableBackend(object):
def __init__(self):
self.reset()
def authenticate(self, *a, **kw):
if django_11_or_later:
kw.pop('request')
self.calls.append((a, kw))
return self.authenticated_user
def reset(self):
self.calls = []
self.authenticated_user = None
def patched_get_backends(backends):
method_to_patch = \
'_get_backends' if django_11_or_later else 'get_backends'
return patch(
'django.contrib.auth.{}'.format(method_to_patch),
return_value=backends
)
class ActAsBackendAuthenticateTestCase(TransactionTestCase):
def setUp(self):
super(ActAsBackendAuthenticateTestCase, self).setUp()
self.first_test_backend = TestableBackend()
self.second_test_backend = TestableBackend()
self.third_test_backend_not_in_get_backends = TestableBackend()
self.act_as_auth_backend = ActAsBackend()
self.backends = [
self.first_test_backend,
self.act_as_auth_backend,
self.second_test_backend
]
def patched_get_backends(self):
return patched_get_backends(self.backends)
def test_does_not_inherit_from_any_backend(self):
self.assertEqual(
(ActAsBackend, object),
ActAsBackend.__mro__
)
def test_fails_if_multiple_act_as_backends_are_configured(self):
"""
while I can see how one could like to have multiple rules for
when one can becomes another user, I foresee complexity, unexpected
bugs, corner cases, etc. and thus would much rather place the burden
of managing the complexity/interaction between these various rules
on the user of this library - break the rules apart into multiple
methods, and compose them in your own code, so this library can
remain simple
"""
self.backends.append(ActAsBackend())
with self.patched_get_backends():
with self.assertRaises(ValueError):
auth_through_backend(
self.act_as_auth_backend,
username='foo/bar', password='password')
def test_it_tries_all_other_configured_backends(self):
with self.patched_get_backends():
auth_through_backend(
self.act_as_auth_backend,
username='foo/bar', password='password')
self.assertEqual(
[(tuple(), {'password': 'password', 'username': 'foo'})],
self.first_test_backend.calls)
self.assertEqual(
[(tuple(), {'password': 'password', 'username': 'foo'})],
self.second_test_backend.calls)
self.assertEqual([], self.third_test_backend_not_in_get_backends.calls)
def test_first_successful_backend_returned_later_ones_not_called(self):
self.first_test_backend.authenticated_user = User()
with self.patched_get_backends():
auth_through_backend(
self.act_as_auth_backend,
username='foo/bar', password='password')
self.assertEqual(
[(tuple(), {'password': 'password', 'username': 'foo'})],
self.first_test_backend.calls)
self.assertEqual([], self.second_test_backend.calls)
|
def test_cannot
|
_authenticate_regular_user(self):
with self.patched_get_backends():
self.assertEqual(
None,
auth_through_backend(
self.act_as_auth_backend,
username='foo', password='password'))
self.assertEqual([], self.first_test_backend.calls)
self.assertEqual([], self.second_test_backend.calls)
def test_can_become_another_user_with_own_password(self):
create_user(username='admin', password='admin password')
user = create_user(username='user', password='user password')
self.assertEqual(
None, self.authenticate(
username='admin/user', password='user password'))
self.assertEqual(
user, self.authenticate(
username='admin/user', password='admin password'))
@patch("djactasauth.backends.log")
def test_usernames_with_multiple_sepchars_trigger_log_warning(self,
mock_log):
create_user(username='admin', password='foo')
self.assertEqual(None, self.authenticate(username='admin/user/',
password='foo'))
self.assertEqual(None, self.authenticate(username='admin//user',
password='foo'))
self.assertEqual(None, self.authenticate(username='admin/us/er',
password='foo'))
self.assertEqual(None, self.authenticate(username='/admin/user',
password='foo'))
calls = [call(ActAsBackend.too_many_sepchar_msg) for i in range(4)]
mock_log.warn.assert_has_calls(calls)
def test_cannot_become_nonexistent_user(self):
create_user
|
lukecwik/incubator-beam
|
sdks/python/apache_beam/utils/annotations.py
|
Python
|
apache-2.0
| 5,756
| 0.003648
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Deprecated and experimental annotations.
Experimental: Signifies that a public API (public class, method or field) is
subject to incompatible changes, or even removal, in a future release. Note that
the presen
|
ce of this annotation implies nothing about the quality or performance
of the API in question, only the fact that the API or behavior may change in any
way.
Deprecated: Signifies that users are discouraged from using a public API
typically because a better alternative exists, and the current form might be
removed in a future version.
Usage:
For internal use only; no backwards-compatibilit
|
y guarantees.
Annotations come in two flavors: deprecated and experimental
The 'deprecated' annotation requires a 'since" parameter to specify
what version deprecated it.
Both 'deprecated' and 'experimental' annotations can specify the
current recommended version to use by means of a 'current' parameter.
The following example illustrates how to annotate coexisting versions of the
same function 'multiply'.::
def multiply(arg1, arg2):
print(arg1, '*', arg2, '=', end=' ')
return arg1*arg2
# This annotation marks 'old_multiply' as deprecated since 'v.1' and suggests
# using 'multiply' instead.::
@deprecated(since='v.1', current='multiply')
def old_multiply(arg1, arg2):
result = 0
for i in xrange(arg1):
result += arg2
print(arg1, '*', arg2, '(the old way)=', end=' ')
return result
# This annotation marks 'exp_multiply' as experimental and suggests
# using 'multiply' instead.::
@experimental(since='v.1', current='multiply')
def exp_multiply(arg1, arg2):
print(arg1, '*', arg2, '(the experimental way)=', end=' ')
return (arg1*arg2)*(arg1/arg2)*(arg2/arg1)
# If a custom message is needed, on both annotations types the
# arg custom_message can be used.::
@experimental(since='v.1', current='multiply'
custom_message='Experimental since %since%
Please use %current% insted.')
def exp_multiply(arg1, arg2):
print(arg1, '*', arg2, '(the experimental way)=', end=' ')
return (arg1*arg2)*(arg1/arg2)*(arg2/arg1)
# Set a warning filter to control how often warnings are produced.::
warnings.simplefilter("always")
print(multiply(5, 6))
print(old_multiply(5,6))
print(exp_multiply(5,6))
"""
# pytype: skip-file
import warnings
from functools import partial
from functools import wraps
class BeamDeprecationWarning(DeprecationWarning):
"""Beam-specific deprecation warnings."""
# Don't ignore BeamDeprecationWarnings.
warnings.simplefilter('once', BeamDeprecationWarning)
def annotate(label, since, current, extra_message, custom_message=None):
"""Decorates an API with a deprecated or experimental annotation.
Args:
label: the kind of annotation ('deprecated' or 'experimental').
since: the version that causes the annotation.
current: the suggested replacement function.
extra_message: an optional additional message.
custom_message: if the default message does not suffice, the message
can be changed using this argument. A string
whit replacement tokens.
A replecement string is were the previus args will
be located on the custom message.
The following replacement strings can be used:
%name% -> API.__name__
%since% -> since (Mandatory for the decapreted annotation)
%current% -> current
%extra% -> extra_message
Returns:
The decorator for the API.
"""
def _annotate(fnc):
@wraps(fnc)
def inner(*args, **kwargs):
if label == 'deprecated':
warning_type = BeamDeprecationWarning
else:
warning_type = FutureWarning
if custom_message is None:
message = '%s is %s' % (fnc.__name__, label)
if label == 'deprecated':
message += ' since %s' % since
message += '. Use %s instead.' % current if current else '.'
if extra_message:
message += ' ' + extra_message
else:
if label == 'deprecated' and '%since%' not in custom_message:
raise TypeError(
"Replacement string %since% not found on \
custom message")
emptyArg = lambda x: '' if x is None else x
message = custom_message\
.replace('%name%', fnc.__name__)\
.replace('%since%', emptyArg(since))\
.replace('%current%', emptyArg(current))\
.replace('%extra%', emptyArg(extra_message))
warnings.warn(message, warning_type, stacklevel=2)
return fnc(*args, **kwargs)
return inner
return _annotate
# Use partial application to customize each annotation.
# 'current' will be optional in both deprecated and experimental
# while 'since' will be mandatory for deprecated.
deprecated = partial(
annotate, label='deprecated', current=None, extra_message=None)
experimental = partial(
annotate,
label='experimental',
current=None,
since=None,
extra_message=None)
|
ezScrum/ezScrum
|
robotTesting/keywords/lib/DatabaseHandler.py
|
Python
|
gpl-2.0
| 1,547
| 0.007111
|
import MySQLdb
class DatabaseHandler:
def __init__(self):
pass
def is_delete(self, tableName):
reservedTableNameList = ["mantis_user_table", "mantis_tokens_table", "mantis_config_table"]
isDeleteFlag = 1
for name in reservedTableNameList:
isIdentical = cmp(tableName, name)
if isIdentical == 0:
isDeleteFlag = 0
break
return isDeleteFlag
def C
|
lean_Database(self, hostUrl, account, password, databaseName):
print 'clean database1'
db = MySQLdb.connect(host=hostUrl, user=account, passwd=password, db=databaseName)
cursor = db.cursor()
cursor.execute("Show Tables from " + datab
|
aseName)
result = cursor.fetchall()
for record in result:
tableName = record[0]
isDelete = self.is_delete(tableName)
if isDelete == 0:
print "Reserve " + tableName
else :
print "TRUNCATE TABLE `" + tableName + "`"
cursor.execute("TRUNCATE TABLE `" + tableName + "`")
print 'Add admin'
cursor.execute("INSERT INTO `account` VALUES (1, 'admin', 'admin', 'example@ezScrum.tw', '21232f297a57a5a743894a0e4a801fc3', 1, 1379910191599, 1379910191599)")
cursor.execute("INSERT INTO `system` VALUES (1, 1)")
db.commit()
#if __name__ == '__main__':
# databaseHandler = DatabaseHandler()
# databaseHandler.clean_database("localhost", "spark", "spark", "robottest")
|
Bugheist/website
|
comments/migrations/0004_auto_20170727_1308.py
|
Python
|
agpl-3.0
| 559
| 0.001789
|
# -*- coding: utf-8 -*-
# Generated by Djan
|
go 1.11.1 on 2017-07-27 13:08
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comments', '0003_auto_20170726_1348'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='parent',
|
field=models.ForeignKey(default=-1, null=True, on_delete=django.db.models.deletion.CASCADE,
to='comments.Comment'),
),
]
|
voussoir/etiquette
|
frontends/etiquette_flask/backend/endpoints/album_endpoints.py
|
Python
|
bsd-2-clause
| 9,106
| 0.003075
|
import flask; from flask import request
import os
import urllib.parse
from voussoirkit import flasktools
from voussoirkit import gentools
from voussoirkit import stringtools
import etiquette
from .. import common
site = common.site
session_manager = common.session_manager
# Individual albums ################################################################################
@site.route('/album/<album_id>')
def get_album_html(album_id):
album = common.P_album(album_id, response_type='html')
response = common.render_template(
request,
'album.html',
album=album,
view=request.args.get('view', 'grid'),
)
return response
@site.route('/album/<album_id>.json')
def get_album_json(album_id):
album = common.P_album(album_id, response_type='json')
album = album.jsonify()
return flasktools.json_response(album)
@site.route('/album/<album_id>.zip')
def get_album_zip(album_id):
album = common.P_album(album_id, response_type='html')
recursive = request.args.get('recursive', True)
recursive = stringtools.truthystring(recursive)
streamed_zip = etiquette.helpers.zip_album(album, recursive=recursive)
if album.title:
download_as = f'album {album.id} - {album.title}.zip'
else:
download_as = f'album {album.id}.zip'
download_as = etiquette.helpers.remove_path_badchars(download_as)
download_as = urllib.parse.quote(download_as)
outgoing_headers = {
'Content-Type': 'application/octet-stream',
'Content-Disposition': f'attachment; filename*=UTF-8\'\'{download_as}',
}
return flask.Response(streamed_zip, headers=outgoing_headers)
@site.route('/album/<album_id>/add_child', methods=['POST'])
@flasktools.required_fields(['child_id'], forbid_whitespace=True)
def post_album_add_child(album_id):
album = common.P_album(album_id, response_type='json')
child_ids = stringtools.comma_space_split(request.form['child_id'])
children = list(common.P_albums(child_ids, response_type='json'))
print(children)
album.add_children(children, commit=True)
response = album.jsonify()
return flasktools.json_response(response)
@site.route('/album/<album_id>/remove_child', methods=['POST'])
@flasktools.required_fields(['child_id'], forbid_whitespace=True)
def post_album_remove_child(album_id):
album = common.P_album(album_id, response_type='json')
child_ids = stringtools.comma_space_split(request.form['child_id'])
children = list(common.P_albums(child_ids, response_type='json'))
album.remove_children(children, commit=True)
response = album.jsonify()
return flasktools.json_response(response)
@site.route('/album/<album_id>/remove_thumbnail_photo', methods=['POST'])
def post_album_remove_thumbnail_photo(album_id):
album = common.P_album(album_id, response_type='json')
album.set_thumbnail_photo(None)
common.P.commit(message='album remove thumbnail photo endpoint')
return flasktools.json_response(album.jsonify())
@site.route('/album/<album_id>/refresh_directories', methods=['POST'])
def post_album_refresh_directories(album_id):
album = common.P_album(album_id, response_type='json')
for directory in album.get_associated_directories():
if not directory.is_dir:
continue
digest = common.P.digest_directory(directory, new_photo_ratelimit=0.1)
gentools.run(digest)
common.P.commit(message='refresh album directories endpoint')
return flasktools.json_response({})
@site.route('/album/<album_id>/set_thumbnail_photo', methods=['POST'])
@flasktools.required_fields(['photo_id'], forbid_whitespace=True)
def post_album_set_thumbnail_photo(album_id):
album = common.P_album(album_id, response_type='json')
photo = common.P_photo(request.form['photo_id'], response_type='json')
album.set_thumbnail_photo(photo)
common.P.commit(message='album set thumbnail photo endpoint')
return flasktools.json_response(album.jsonify())
# Album photo operations ###########################################################################
@site.route('/album/<album_id>/add_photo', methods=['POST'])
@flasktools.required_fields(['photo_id'], forbid_whitespace=True)
def post_album_add_photo(album_id):
'''
Add a photo or photos to this album.
'''
album = common.P_album(album_id, response_type='json')
photo_ids = stringtools.comma_space_split(request.form['photo_id'])
photos = list(common.P_photos(photo_ids, response_type='json'))
album.add_photos(photos, commit=True)
response = album.jsonify()
return flasktools.json_response(response)
@site.route('/album/<album_id>/remove_photo', methods=['POST'])
@flasktools.required_fields(['photo_id'], forbid_whitespace=True)
def post_album_remove_photo(album_id):
'''
Remove a photo or photos from this album.
'''
album = common.P_album(album_id, response_type='json')
photo_ids = stringtools.comma_space_split(request.form['photo_id'])
photos = list(common.P_photos(photo_ids, response_type='json'))
album.remove_photos(photos, commit=True)
response = album.jsonify()
return flasktools.json_response(response)
# Album tag operations #############################################################################
@site.route('/album/<album_id>/add_tag', methods=['POST'])
def post_album_add_tag(album_id):
'''
Apply a tag to every photo in the album.
'''
response = {}
album = common.P_album(album_id, response_type='json')
tag = request.form['tagname'].strip()
try:
tag = common.P_tag(tag, response_type='json')
except etiquette.exceptions.NoSuchTag as exc:
response = exc.jsonify()
return flasktools.json_response(response, status=404)
recursive = request.form.get('recursive', False)
recursive = stringtools.truthystring(recursive)
album.add_tag_to_all(tag, nested_children=recursive, commit=True)
response['action'] = 'add_tag'
response['tagname'] = tag.name
return flasktools.json_response(response)
# Album metadata operations ########################################################################
@site.route('/album/<album_id>/edit', methods=['POST'])
def post_album_edit(album_id):
'''
Edit the title / description.
'''
album = common.P_album(album_id, response_type='json')
title = request.form.get('title', None)
description = request.form.get('description', None)
album.edit(title=title, description=description, commit=True)
response = album.jsonify(minimal=True)
return flasktools.json_response(response)
@site.route('/album/<album_id>/show_in_folder', methods=['POST'])
def post_album_show_in_folder(album_id):
if not request.is_localhost:
flask.abort(403)
|
album = common.P_album(album_id, response_type='json')
directories = album.get_associated_directories()
if len(directories) != 1:
flask.abort(400)
directory = directories.pop()
if os.name == 'nt':
command = f'start explorer.exe "{direct
|
ory.absolute_path}"'
os.system(command)
return flasktools.json_response({})
flask.abort(501)
# Album listings ###################################################################################
@site.route('/all_albums.json')
@flasktools.cached_endpoint(max_age=15)
def get_all_album_names():
all_albums = {album.id: album.display_name for album in common.P.get_albums()}
response = {'albums': all_albums}
return flasktools.json_response(response)
def get_albums_core():
albums = list(common.P.get_root_albums())
albums.sort(key=lambda x: x.display_name.lower())
return albums
@site.route('/albums')
def get_albums_html():
albums = get_albums_core()
response = common.render_template(
request,
'album.html',
albums=albums,
view=request.args.get('view', 'grid'),
)
return response
@site.route('/albums.json')
def get_albums_json():
albums = get_albums_core()
albums = [album.jsonify(minimal=True) for album in albums]
return flasktools.json_response(albums)
# Album create and delete #########################
|
googleapis/python-talent
|
samples/generated_samples/jobs_v4beta1_generated_profile_service_delete_profile_sync.py
|
Python
|
apache-2.0
| 1,401
| 0.000714
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet fo
|
r DeleteProfile
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-talent
# [START jobs_v4beta1_generated
|
_ProfileService_DeleteProfile_sync]
from google.cloud import talent_v4beta1
def sample_delete_profile():
# Create a client
client = talent_v4beta1.ProfileServiceClient()
# Initialize request argument(s)
request = talent_v4beta1.DeleteProfileRequest(
name="name_value",
)
# Make the request
client.delete_profile(request=request)
# [END jobs_v4beta1_generated_ProfileService_DeleteProfile_sync]
|
NeuromorphicProcessorProject/snn_toolbox
|
tests/core/test_config.py
|
Python
|
mit
| 1,363
| 0
|
# coding=utf-8
"""Test configuration of toolbox."""
import importlib
import os
import pytest
from snntoolbox.bin.utils import update_setup
from snntoolbox.utils.utils import import_configparser
with open(os.path.abspath(os.path.join(os.path.dirname(__file__),
'..', '..', 'requirements.txt'))) as f:
requirements = []
for s in f.readlines():
requirements.append(s.rstrip('\n').split('==')[0])
@pytest.mark.parametrize('required_module', requirements)
def test_imports_from_requirements(required_module):
assert importlib.import_module(required_module)
# Todo: Add configuration that is expected to pass.
_in_and_out = [
({}, False),
({'paths': {'path_wd': os.path.dirname(__file__),
'dataset_path': os.path.dirname(_
|
_file__),
'filename_ann': '98.96'}}, False)
]
@pytest.mark.parametrize('params, expect_pass', _in_and_out)
def test_updating_settings(params, expect_pass, _path_wd)
|
:
configparser = import_configparser()
config = configparser.ConfigParser()
config.read_dict(params)
configpath = os.path.join(str(_path_wd), 'config')
with open(configpath, 'w') as file:
config.write(file)
if expect_pass:
assert update_setup(configpath)
else:
pytest.raises(AssertionError, update_setup, configpath)
|
edx-solutions/edx-platform
|
openedx/core/lib/tests/assertions/events.py
|
Python
|
agpl-3.0
| 9,836
| 0.004778
|
"""Assertions related to event validation"""
import json
import pprint
import six
def assert_event_matches(expected, actual, tolerate=None):
"""
Compare two event dictionaries.
Fail if any discrepancies exist, and output the list of all discrepancies. The intent is to produce clearer
error messages than "{ some massive dict } != { some other massive dict }", instead enumerating the keys that
differ. Produces period separated "paths" to keys in the output, so "context.foo" refers to the following
structure:
{
'context': {
'foo': 'bar' # this key, value pair
}
}
The other key difference between this comparison and `assertEquals` is that it supports differing levels of
tolerance for discrepancies. We don't want to litter our tests full of exact match tests because then anytime we
add a field to all events, we have to go update every single test that has a hardcoded complete event structure in
it. Instead we support making partial assertions about structure and content of the event. So if I say my expected
event looks like this:
{
'event_type': 'foo.bar',
'event': {
'user_id': 10
}
}
This method will raise an assertion error if the actual event either does not contain the above fields in their
exact locations in the hierarchy, or if it does contain them but has different values for them. Note that it will
*not* necessarily raise an assertion error if the actual event contains other fields that are not listed in the
expected event. For example, the following event would not raise an assertion error:
{
'event_type': 'foo.bar',
'referer': 'http://example.com'
'event': {
'user_id': 10
}
}
Note that the extra "referer" field is not considered an error by default.
The `tolerate` parameter takes a set that allows you to specify varying degrees of tolerance for some common
eventing related issues. See the `EventMatchTolerates` class for more information about the various flags that are
supported here.
Example output if an error is found:
Unexpected differences found in structs:
* <path>: not found in actual
* <path>: <expected_value> != <actual_value> (expected != actual)
Expected:
{ <expected event }
Actual:
{ <actual event> }
"<path>" is a "." separated string indicating the key that differed. In the examples above "event.user_id" would
refer to the value of the "user_id" field contained within the dictionary referred to by the "event" field in the
root dictionary.
"""
differences = get_event_differences(expected, actual, tolerate=tolerate)
if len(differences) > 0:
debug_info = [
'',
'Expected:',
block_indent(expected),
'Actual:',
block_indent(actual),
'Tolerating:',
block_indent(EventMatchTolerates.default_if_not_defined(tolerate)),
]
differences = ['* ' + d for d in differences]
message_lines = differences + debug_info
raise AssertionError('Unexpected differences found in structs:\n\n' + '\n'.join(message_lines))
class EventMatchTolerates(object):
"""
Represents groups of flags that specify the level of tolerance for deviation between an expected event and an actual
event.
These are common event specific deviations that we don't want to handle with special case logic throughout our
tests.
"""
# Allow the "event" field to be a string, currently this is the case for all browser events.
STRING_PAYLOAD = 'string_payload'
# Allow unexpected fields to exist in the top level event dictionary.
ROOT_EXTRA_FIELDS = 'root_extra_fields'
# Allow unex
|
pected fields to exist in the "context" dictionary. This is where new fields that appear in multiple
# events are most commonly added, so we freq
|
uently want to tolerate variation here.
CONTEXT_EXTRA_FIELDS = 'context_extra_fields'
# Allow unexpected fields to exist in the "event" dictionary. Typically in unit tests we don't want to allow this
# type of variance since there are typically only a small number of tests for a particular event type.
PAYLOAD_EXTRA_FIELDS = 'payload_extra_fields'
@classmethod
def default(cls):
"""A reasonable set of tolerated variations."""
# NOTE: "payload_extra_fields" is deliberately excluded from this list since we want to detect erroneously added
# fields in the payload by default.
return {
cls.STRING_PAYLOAD,
cls.ROOT_EXTRA_FIELDS,
cls.CONTEXT_EXTRA_FIELDS,
}
@classmethod
def lenient(cls):
"""Allow all known variations."""
return cls.default() | {
cls.PAYLOAD_EXTRA_FIELDS
}
@classmethod
def strict(cls):
"""Allow no variation at all."""
return frozenset()
@classmethod
def default_if_not_defined(cls, tolerates=None):
"""Use the provided tolerance or provide a default one if None was specified."""
if tolerates is None:
return cls.default()
else:
return tolerates
def assert_events_equal(expected, actual):
"""
Strict comparison of two events.
This asserts that every field in the real event exactly matches the expected event.
"""
assert_event_matches(expected, actual, tolerate=EventMatchTolerates.strict())
def get_event_differences(expected, actual, tolerate=None):
"""Given two events, gather a list of differences between them given some set of tolerated variances."""
tolerate = EventMatchTolerates.default_if_not_defined(tolerate)
# Some events store their payload in a JSON string instead of a dict. Comparing these strings can be problematic
# since the keys may be in different orders, so we parse the string here if we were expecting a dict.
if EventMatchTolerates.STRING_PAYLOAD in tolerate:
expected = parse_event_payload(expected)
actual = parse_event_payload(actual)
def should_strict_compare(path):
"""
We want to be able to vary the degree of strictness we apply depending on the testing context.
Some tests will want to assert that the entire event matches exactly, others will tolerate some variance in the
context or root fields, but not in the payload (for example).
"""
if path == [] and EventMatchTolerates.ROOT_EXTRA_FIELDS in tolerate:
return False
elif path == ['event'] and EventMatchTolerates.PAYLOAD_EXTRA_FIELDS in tolerate:
return False
elif path == ['context'] and EventMatchTolerates.CONTEXT_EXTRA_FIELDS in tolerate:
return False
else:
return True
return compare_structs(expected, actual, should_strict_compare=should_strict_compare)
def block_indent(text, spaces=4):
"""
Given a multi-line string, indent every line of it by the given number of spaces.
If `text` is not a string it is formatted using pprint.pformat.
"""
return '\n'.join([(' ' * spaces) + l for l in pprint.pformat(text).splitlines()])
def parse_event_payload(event):
"""
Given an event, parse the "event" field as a JSON string.
Note that this may simply return the same event unchanged, or return a new copy of the event with the payload
parsed. It will never modify the event in place.
"""
if 'event' in event and isinstance(event['event'], six.string_types):
event = event.copy()
try:
event['event'] = json.loads(event['event'])
except ValueError:
pass
return event
def compare_structs(expected, actual, should_strict_compare=None, path=None):
"""
Traverse two structures to ensure that the `actual` structure contains all of the elements within the `expected`
one.
Note that this performs a "deep" comparison, descending in
|
Zetten/pypkgmirror
|
pypkgmirror/__init__.py
|
Python
|
gpl-3.0
| 3,339
| 0.000898
|
# Copyright 2015 Peter van Zetten
#
# This file is part of pypkgmirror.
#
# pypkgmirror is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pypkgmirror is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pypkgmirror. If not, see <http://www.gnu.org/licenses/>.
""" Entry point and main function for pypkgmirror. """
import errno
import multiprocessing
import os
import subprocess
from pypkgmirror.util import conf, log
def mkdir(d):
try:
os.makedirs(d)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def main():
"""
Script entry point for pypkgmirror.
Parses the configuration and assembles a collection of subprocess calls,
then invokes them.
"""
from pypkgmirror.agents import DebmirrorAgent, RsyncAgent, AptlyAgent
if 'loglevel' in conf:
log.setLevel(conf['loglevel'])
mirrors = []
aptly_mirrors = [] # aptly shares a database so these should not be parallel
for _ in conf.get('mirrors', {}).get('debmirror', []):
mirrors.append(DebmirrorAgent(_, conf['mirrors']['debmirror'][_]))
for _ in conf.get('mirrors', {}).get('rsync', []):
mirrors.append(RsyncAgent(_, conf['mirrors']['rsync'][_]))
for _ in conf.get('mirrors', {}).get('aptly', []):
aptly_mirrors.append(AptlyAgent(_, conf['mirrors']['aptly'][_]))
pool = multiprocessing.Pool(2)
pool.map(start_sync, mirrors)
pool.close()
pool.join()
pool = multiprocessing.Pool(1)
pool.map(start_sync, aptly_mirrors)
pool.close()
pool.join()
_subprocess_call(['hardlink', '-fpot', conf['basedir']])
def start_sync(agent):
"""
Performs a full mirror update with the given agent. This should typically
download any new or updated packages from a remote repository, and update
any necessary indexes.
"""
log.info("Syncing repository '%s' hosted at %s", agent.name, agent.host)
outfile_path = "%s/%s.out" % (conf['logdir'], agent.name)
errfile_path = "%s/%s.err" % (conf['logdir'], agent.name)
mkdir(os.path.dirname(outfile_path))
mkdir(agent.basedir)
with open(outfile_path, 'w') as outfile, open(errfile_path, 'w') as errfile:
for call in agent.get_calls():
log.debug(' '.join(call))
if conf.get('noop'):
continue
_subprocess_call(call, outfile, errfile)
def _subprocess_call(call, stdout=None, stderr=None):
"""
Trigger a subprocess execution with optional stdout/stderr redirectio
|
n and
trivial handling of missing executables.
"""
try:
subprocess.call(
|
call, stdout=stdout, stderr=stderr)
except OSError as e:
if e.errno == os.errno.ENOENT:
log.error("The required program %s was not found, no packages synced", call[0])
else:
raise
if __name__ == "__main__":
main()
|
elbeardmorez/quodlibet
|
quodlibet/tests/test_library_libraries.py
|
Python
|
gpl-2.0
| 24,563
| 0.000366
|
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from gi.repository import Gtk
import os
import shutil
from senf import fsnative
from quodlibet.formats import AudioFileError
from quodlibet import config
from quodlibet.util import connect_obj, is_windows
from quodlibet.formats import AudioFile
from quodlibet.compat import text_type, iteritems, iterkeys, itervalues
from tests import TestCase, get_data_path, mkstemp, mkdtemp, skipIf
from .helper import capture_output, get_temp_copy
from quodlibet.library.libraries import Library, PicklingMixin, SongLibrary, \
FileLibrary, AlbumLibrary, SongFileLibrary, iter_paths
class Fake(int):
def __init__(self, _):
self.key = int(self)
def Frange(*args):
return list(map(Fake, range(*args)))
class FakeSong(Fake):
def list(self, tag):
# Turn tag_values into a less-than query, for testing.
if tag <= self:
return []
else:
return [int(self)]
def rename(self, newname):
self.key = newname
class AlbumSong(AudioFile):
"""A mock AudioFile belong to one of three albums,
based on a single number"""
def __init__(self, num, album=None):
super(AlbumSong, self).__init__()
self["~filename"] = fsnative(u"file_%d.mp3" % (num + 1))
self["title"] = "Song %d" % (num + 1)
self["artist"] = "Fakeman"
if album is None:
self["album"] = "Album %d" % (num % 3 + 1)
else:
self["album"] = album
self["labelid"] = self["album"]
class FakeSongFile(FakeSong):
_valid = True
_exists = True
_mounted = True
@property
def mountpoint(self):
return "/" if self._mounted else "/FAKE"
def valid(self):
return self._valid
def exists(self):
return self._exists
def reload(self):
if self._exists:
self._valid = True
else:
raise IOError("doesn't exist")
def mounted(self):
return self._mounted
# Custom range functions, to generate lists of song-like objects
def FSFrange(*args):
return list(map(FakeSongFile, range(*args)))
def FSrange(*args):
return list(map(FakeSong, range(*args)))
def ASrange(*args):
return list(map(AlbumSong, range(*args)))
class TLibrary(TestCase):
Fake = Fake
Frange = staticmethod(Frange)
Library = Library
def setUp(self):
self.library = self.Library()
self.added = []
self.changed = []
self.removed = []
connect_obj(self.library, 'added', list.extend, self.added)
connect_obj(self.library, 'changed', list.extend, self.changed)
connect_obj(self.library, 'removed', list.extend, self.removed)
def test_add(self):
self.library.add(self.Frange(12))
self.failUnlessEqual(self.added, self.Frange(12))
del(self.added[:])
self.library.add(self.Frange(12, 24))
self.failUnlessEqual(self.added, self.Frange(12, 24))
def test_remove(self):
self.library.add(self.Frange(10))
self.assertTrue(self.library.remove(self.Frange(3, 6)))
self.failUnlessEqual(self.removed, self.Frange(3, 6))
# Neither the objects nor their keys should be present.
self.failIf(self.Fake(3) in self.library)
self.failUnless(self.Fake(6) in self.library)
self.failIf(3 in self.library)
self.failUnless(6 in self.library)
def test_remove_when_not_present(self):
self.assertFalse(self.library.remove([self.Fake(12)]))
def test_changed(self):
self.library.add(self.Frange(10))
self.library.changed(self.Frange(5))
while Gtk.events_pending():
Gtk.main_iteration()
|
self.failUnlessEqual(self.changed, self.Frange(5))
def test_changed_not_present(self):
self.library.add(self.Frange(10))
self.library.changed(self.Frange(2, 20, 3))
while Gtk.events_pending():
Gtk.main_iteration()
self.failUnlessEqual(set(self.c
|
hanged), {2, 5, 8})
def test_changed_none_present(self):
self.library.changed(self.Frange(5))
while Gtk.events_pending():
Gtk.main_iteration()
def test___iter__(self):
self.library.add(self.Frange(10))
self.failUnlessEqual(sorted(list(self.library)), self.Frange(10))
def test___iter___empty(self):
self.failIf(list(self.library))
def test___len__(self):
self.failUnlessEqual(len(self.library), 0)
self.library.add(self.Frange(10))
self.failUnlessEqual(len(self.library), 10)
self.library.remove(self.Frange(3))
self.failUnlessEqual(len(self.library), 7)
def test___getitem__(self):
self.library.add(self.Frange(10))
self.failUnlessEqual(self.library[5], 5)
new = self.Fake(12)
new.key = 100
self.library.add([new])
self.failUnlessEqual(self.library[100], 12)
self.failIf(12 in self.library)
def test___getitem___not_present(self):
self.library.add(self.Frange(10))
self.failUnlessRaises(KeyError, self.library.__getitem__, 12)
def test___contains__(self):
self.library.add(self.Frange(10))
new = self.Fake(12)
new.key = 100
self.library.add([new])
for value in [0, 1, 2, 6, 9, 100, new]:
# 0, 1, 2, 6, 9: all added by self.Frange
# 100: key for new
# new: is itself present
self.failUnless(value in self.library, "didn't find %d" % value)
for value in [-1, 10, 12, 101]:
# -1, 10, 101: boundry values
# 12: equal but non-key-equal to new
self.failIf(value in self.library, "found %d" % value)
def test_get(self):
self.failUnless(self.library.get(12) is None)
self.failUnless(self.library.get(12, "foo") == "foo")
new = self.Fake(12)
new.key = 100
self.library.add([new])
self.failUnless(self.library.get(12) is None)
self.failUnless(self.library.get(100) is new)
def test_keys(self):
items = []
for i in range(20):
items.append(self.Fake(i))
items[-1].key = i + 100
self.library.add(items)
self.failUnlessEqual(
sorted(self.library.keys()), list(range(100, 120)))
self.failUnlessEqual(
sorted(iterkeys(self.library)), list(range(100, 120)))
def test_values(self):
items = []
for i in range(20):
items.append(self.Fake(i))
items[-1].key = i + 100
self.library.add(items)
self.failUnlessEqual(sorted(self.library.values()), list(range(20)))
self.failUnlessEqual(
sorted(itervalues(self.library)), list(range(20)))
def test_items(self):
items = []
for i in range(20):
items.append(self.Fake(i))
items[-1].key = i + 100
self.library.add(items)
expected = list(zip(range(100, 120), range(20)))
self.failUnlessEqual(sorted(self.library.items()), expected)
self.failUnlessEqual(sorted(iteritems(self.library)), expected)
def test_has_key(self):
self.failIf(self.library.has_key(10))
new = self.Fake(10)
new.key = 20
self.library.add([new])
self.failIf(self.library.has_key(10))
self.failUnless(self.library.has_key(20))
def tearDown(self):
self.library.destroy()
class FakeAudioFile(AudioFile):
def __init__(self, key):
self["~filename"] = fsnative(text_type(key))
def FakeAudioFileRange(*args):
return list(map(FakeAudioFile, range(*args)))
class TPicklingMixin(TestCase):
class PicklingMockLibrary(PicklingMixin, Library):
"""A library-like class that implements enough to test PicklingMixin"""
def __init__(self):
PicklingMixin.__init__(self)
self._c
|
lkash/test
|
dpkt/stp.py
|
Python
|
bsd-3-clause
| 1,695
| 0.00059
|
# $Id: stp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Spanning Tree Protocol."""
import dpkt
class STP(dpkt.Packet):
__hdr__ = (
|
('proto_id', 'H', 0),
('v', 'B', 0),
('type', 'B', 0),
('flags', 'B', 0),
|
('root_id', '8s', ''),
('root_path', 'I', 0),
('bridge_id', '8s', ''),
('port_id', 'H', 0),
('_age', 'H', 0),
('_max_age', 'H', 0),
('_hello', 'H', 0),
('_fd', 'H', 0)
)
@property
def age(self):
return self._age >> 8
@age.setter
def age(self, age):
self._age = age << 8
@property
def max_age(self):
return self._max_age >> 8
@max_age.setter
def max_age(self, max_age):
self._max_age = max_age << 8
@property
def hello(self):
return self._hello >> 8
@hello.setter
def hello(self, hello):
self._hello = hello << 8
@property
def fd(self):
return self._fd >> 8
@fd.setter
def fd(self, fd):
self._fd = fd << 8
def test_stp():
buf = '\x00\x00\x02\x02\x3e\x80\x00\x08\x00\x27\xad\xa3\x41\x00\x00\x00\x00\x80\x00\x08\x00\x27\xad\xa3\x41\x80\x01\x00\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x02\x00\x14\x00'
stp = STP(buf)
assert stp.proto_id == 0
assert stp.port_id == 0x8001
assert stp.age == 0
assert stp.max_age == 20
assert stp.hello == 2
assert stp.fd == 15
assert str(stp) == buf
stp.fd = 100
assert stp.pack_hdr()[-2:] == '\x64\x00' # 100 << 8
if __name__ == '__main__':
# Runs all the test associated with this class/file
test_stp()
print 'Tests Successful...'
|
ofer43211/unisubs
|
apps/comments/__init__.py
|
Python
|
agpl-3.0
| 763
| 0.006553
|
# Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope
|
that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License
|
for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
|
stackforge/solum
|
solum/api/controllers/v1/component.py
|
Python
|
apache-2.0
| 4,074
| 0
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from pecan import rest
import wsmeext.pecan as wsme_pecan
from solum.api.controllers.v1.datamodel import component
from solum.api.handlers import component_handler
from solum.common import exception
from solum.common import policy
from solum import objects
class ComponentController(rest.RestController):
"""Manages operations on a single component."""
def __init__(self, component_id):
super(ComponentController, self).__init__()
self._id = component_id
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(component.Component)
def get(self):
"""Return this component."""
policy.check('show_component',
pecan.request.security_context)
handler = component_handler.ComponentHandler(
pecan.request.security_context)
host_url = pecan.request.application_url.rstrip('/')
return component.Component.from_db_model(handler.get(self._id),
host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(component.Component, body=component.Component)
def put(self, data):
"""Modify this component."""
policy.check('update_component',
pecan.request.security_context)
handler = component_handler.ComponentHandler(
pecan.request.security_context)
res = handler.update(self._id,
data.as_dict(objects.registry.Component))
host_url = pecan.request.application_url.rstrip('/')
return component.Component.from_db_model(res, host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(None, status_code=204)
def delete(self):
"""Delete this component."""
policy.check('delete_component',
pecan.request.security_context)
handler = component_handler.ComponentHandler(
pecan.request.security_context)
return handler.delete(self._id)
class ComponentsControlle
|
r(rest.RestController):
"""Manages operations on the components collection."""
@pecan.ex
|
pose()
def _lookup(self, component_id, *remainder):
if remainder and not remainder[-1]:
remainder = remainder[:-1]
return ComponentController(component_id), remainder
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose(component.Component, body=component.Component,
status_code=201)
def post(self, data):
"""Create a new component."""
policy.check('create_component',
pecan.request.security_context)
handler = component_handler.ComponentHandler(
pecan.request.security_context)
host_url = pecan.request.application_url.rstrip('/')
return component.Component.from_db_model(
handler.create(data.as_dict(objects.registry.Component)), host_url)
@exception.wrap_wsme_pecan_controller_exception
@wsme_pecan.wsexpose([component.Component])
def get_all(self):
"""Return all components, based on the query provided."""
policy.check('get_components',
pecan.request.security_context)
handler = component_handler.ComponentHandler(
pecan.request.security_context)
host_url = pecan.request.application_url.rstrip('/')
return [component.Component.from_db_model(ser, host_url)
for ser in handler.get_all()]
|
xwdeng/MiniProxyPool
|
miniproxypool/config.py
|
Python
|
mit
| 2,154
| 0.011142
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Auther: Xiaowei Deng
#
# This file is part of Mini Proxy Pool
#
# This program is free software and it is distributed under
# the terms of the MIT license. Please see LICENSE file for details.
PROXY_DB_FILE = "_proxies.db"
VALIDATOR_TIMEOUT = 1 # seconds
VALIDATOR_URL = "http://www.google.ca"
VALIDATOR_THREAD_POOL_SIZE = 20
VALIDATOR_CONNECTIONS_PER_THREAD = 20
INVALID_PROXY_TIMES = 5 # if a proxy cannot be connected for VALIDATOR_DEFINE_INVALID_TIMES time, it is defined as invalid
INVALID_PROXY_IF_DELETE = True
VALIDATE_THREAD_RUN_PERIOD = 5 * 60 # seconds wait after each validation
LOAD_PORXIES_FROM_SOURCES_THREAD_RUN_PERIOD = 30 * 60 # seconds wait after each loading from sites
REST_SRV_IP = "0.0.0.0"
REST_SRV_PORT = 9876
REST_API_PATH_GET_ALL_VALID = "/api/v1/proxies/*"
# Free proxy sites
PROXY_SOURCE_SITES = [
{
'url_base': "https://free-proxy-list.net",
'pattern': "((?:\d{1,3}\.){1,3}\d{1,3})<\/td><td>(\d{1,6})(.{1,200})<td class='hx'>(.{2,3})",
'ip_ind': 0,
'port_ind': 1,
'protocal_ind': 3
},
{
|
'url_base': 'https://www.us-proxy.org',
'pattern': "((?:\d{1,3}\.){1,3}\d{1,3})<\/td><td>(\d{1,6})(.{1,200})<td class='hx'>(.{2,3})",
'ip_ind': 0,
'port_ind': 1,
'protocal_ind': 3 # todo: to specify the protocol: http or https
},
{
'url_base': "http://spys.me/proxy.txt",
'pattern': '((?:\d{1,3}\.){1,3}\d{1,3}):(\d{1,6})',
'ip_ind': 0,
'port_ind': 1,
'proto
|
cal_ind': None
}
]
PROXY_SOURCE_FILES = [
'custom_proxies_list.txt'
]
DEFAULT_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2693.2 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Accept-Encoding': 'gzip, default',
}
|
pombredanne/lineup
|
lineup/framework.py
|
Python
|
mit
| 1,964
| 0.000509
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import socket
import traceback
from lineup.datastructures impo
|
rt Queue
class Node(object):
def __init__(self, *args, **kw):
self.initialize(*args, **kw)
def initialize(self, *args, **kw):
pass
@property
def id(self):
return '|'.join([self.get_hostname(), str(os.getpid())])
@property
def taxonomy(self):
class_name = self.__class__.__name__
module_name = self.__class__.__module__
return '.'.join([module_name, class_name])
def get_name(self):
return geta
|
ttr(self, 'name', None) or self.taxonomy
def get_hostname(self):
return socket.gethostname()
def make_worker(self, Worker, index):
return Worker(self, self.input, self.output)
def start(self):
for worker in self.workers:
worker.start()
def feed(self, item):
self.input.put(item)
def enqueue_error(self, source_class, instructions, exception):
print exception, source_class, instructions
def wait_and_get_work(self):
return self.output.get()
@property
def running(self):
return all([w.alive for w in self.workers])
def are_running(self):
if self.running:
return True
self.start()
return self.running
class Pipeline(Node):
def initialize(self):
self.queues = self.get_queues(*args, **kw)
self.workers = [self.make_worker(Worker, index) for index, Worker in enumerate(steps)]
@property
def input(self):
return self.queues[0]
@property
def output(self):
return self.queues[-1]
def get_queues(self):
steps = getattr(self, 'steps', None) or []
return [Queue() for _ in steps] + [Queue()]
def make_worker(self, Worker, index):
return Worker(self, self.queues[index], self.queues[index + 1])
|
allotory/basilinna
|
config.py
|
Python
|
mit
| 850
| 0.002353
|
# -*- coding: utf-8 -*-
import os
basedir
|
= os.path.abspath(os.path.dirname(__file__))
CSRF_ENABLED = True
SECRET_KEY = 'UsTd+_P&kv#jdQ!3Oc.Kb$yd,ey/B2i-aM8em'
SITE_NAME = 'basilinna'
MYSQL_DB = 'basilinna'
MYSQL_USER = 'root'
MYSQL_PASSWD = 'root'
MYSQL_HOST = 'localhost'
MYSQL_POST = 3306
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'mysql+mysqlconnector://%s:%s@%s:%s/%s?charset=utf8' % (MYSQL_USER, MYSQL_
|
PASSWD, MYSQL_HOST, MYSQL_POST, MYSQL_DB)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'migrations')
HOST = 'http://localhost:5000/'
UPLOAD_FOLDER = '.\\app\\static\\uploads'
UPLOAD_AVATAR_FOLDER = '.\\app\\static\\avatars'
ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg', 'gif'])
# 16M
MAX_CONTENT_LENGTH = 16 * 1024 * 1024
# pagination
POSTS_PER_PAGE = 5
|
2015fallhw/cdw11
|
static/plugin/liquid_tags/test_audio.py
|
Python
|
agpl-3.0
| 1,494
| 0
|
from . import audio
import pytest
import re
@pytest.mark.parametrize('input,expected', [
('http://foo.bar https://bar.foo',
('http://foo.bar', 'https://bar.foo', None)),
('http://test.foo',
('http://test.foo', None, None)),
('https://test.foo',
('https://test.foo', None, None)),
('http://foo.foo https://bar.bar http://zonk.zonk',
('http://foo.foo', 'https://bar.bar', 'http://zonk.zonk'))
])
def test_regex(input, expected):
assert re.match(audio.AUDIO, input).groups() == expected
@pytest.mark.parametrize('input,expected', [
('http://foo.foo/foo.mp3',
('<audio controls>'
'<source src="http://foo.foo/foo.mp3" type="audio/mpeg">'
'Your browser does not support the audio element.</audio>')),
('https://foo.foo/foo.ogg http://bar.bar/b
|
ar.opus',
('<audio controls>'
'<source src="https:
|
//foo.foo/foo.ogg" type="audio/ogg">'
'<source src="http://bar.bar/bar.opus" type="audio/ogg">'
'Your browser does not support the audio element.</audio>')),
('http://1.de/1.wav http://2.de/2.mp4 http://3.de/3.ogg',
('<audio controls>'
'<source src="http://1.de/1.wav" type="audio/wav">'
'<source src="http://2.de/2.mp4" type="audio/mp4">'
'<source src="http://3.de/3.ogg" type="audio/ogg">'
'Your browser does not support the audio element.</audio>'))
])
def test_create_html(input, expected):
assert audio.create_html(input) == expected
|
hei-hilman/microblog
|
app/forms.py
|
Python
|
bsd-3-clause
| 1,059
| 0.004721
|
from flask.ext.wtf import Form
from wtforms import StringField, BooleanField, TextAreaField
from wtforms.validators import DataRequired, Length
from app.models import User
class LoginForm(Form):
openid = StringField('openid', validators=[DataRequired()])
remember_me = BooleanField('remember_me', default=False)
class EditForm(Form):
nickname = StringField('nickname', validators=[DataRequired()])
about_me = TextAreaField('about_me', validators=[Length(min=0, max=140)])
def __init__(self, original_nickname, *args, **kwargs):
Form.__init__(self,
|
*args, **kwargs)
self.original_nickname = original_nickname
def validate(self):
if not Form.validate(self):
ret
|
urn False
if self.nickname.data == self.original_nickname:
return True
user = User.query.filter_by(nickname=self.nickname.data).first()
if user != None:
self.nickname.errors.append('This nickname is already in use. Please choose another one.')
return False
return True
|
SiLab-Bonn/online_monitor
|
online_monitor/stop_online_monitor.py
|
Python
|
mit
| 362
| 0.002762
|
#!/usr/
|
bin/env python
import psutil
def main():
for proc in psutil.process_iter():
if any(name in proc.name() for name in ['start_produc
|
er', 'start_converter', 'start_online']) or any(name in ''.join(proc.cmdline()) for name in ['start_producer', 'start_converter', 'start_online']):
proc.kill()
if __name__ == '__main__':
main()
|
robosafe/mc-vs-bdi
|
tiago_simulator/scripts/grasp_demo.py
|
Python
|
gpl-3.0
| 1,252
| 0.023962
|
#!/usr/bin/env python
import rospy
import actionlib
from play_motion_msgs.msg import PlayMotionAction, PlayMotionGoal
from sensor_msgs.msg import JointState
if __name__ == "__main__":
rospy.init_node("grasp_demo")
rospy.loginfo("Waiting for play_motion...")
client = actionlib.SimpleActionClient("/play_motion", PlayMotionAction)
client.wait_for_server()
rospy.loginfo("...connected.")
rospy.wait_for_message("/joint_states", JointState)
rospy.sleep(3.0)
rospy.loginfo("Grasping demo...")
goal = PlayMotionGoal()
goal.motion_name = 'home'
goal.skip_planning = True
client.send_goal(goal)
client.wait_for_result(rospy.Duration(15.0))
goal.motion_name = 'look_at_object_demo'
|
goal.skip_planning = True
client.send_goal(goal)
client.wait_for_result(rospy.Duration(5.0))
goal.motion_name = 'pregrasp_demo'
goal.skip_planning = True
|
client.send_goal(goal)
client.wait_for_result(rospy.Duration(40.0))
goal.motion_name = 'grasp_demo'
goal.skip_planning = True
client.send_goal(goal)
client.wait_for_result(rospy.Duration(80.0))
goal.motion_name = 'pick_demo'
goal.skip_planning = True
client.send_goal(goal)
client.wait_for_result(rospy.Duration(40.0))
rospy.loginfo("Grasping demo OK.")
|
moto-timo/robotframework
|
src/robot/running/timeouts/__init__.py
|
Python
|
apache-2.0
| 3,987
| 0.000251
|
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from robot.utils import (Sortable, py2to3, secs_to_timestr, timestr_to_secs,
IRONPYTHON, JYTHON, WINDOWS)
from robot.errors import TimeoutError, DataError, FrameworkError
if JYTHON:
from .jython import Timeout
elif IRONPYTHON:
from .ironpython import Timeout
elif WINDOWS:
from .windows import Timeout
else:
from .posix import Timeout
@py2to3
class _Timeout(Sortable):
def __init__(self, timeout=None, message='', variables=None):
self.string = timeout or ''
self.message = message
self.secs = -1
self.starttime = -1
self.error = None
if variables:
self.replace_variables(variables)
@property
def active(self):
return self.starttime > 0
def replace_variables(self, variables):
try:
self.string = variables.replace_string(self.string)
if not self:
return
self.secs = timestr_to_secs(self.string)
self.string = secs_to_timestr(self.secs)
self.message = variables.replace_string(self.message)
except (DataError, ValueError) as err:
self.secs = 0.000001 # to make timeout active
self.error = (u'Setting %s timeout failed: %s'
% (self.type.lower(), err))
def start(self):
if self.secs > 0:
self.starttime = time.time()
def time_left(self):
if not self.active:
return -1
elapsed = time.time() - self.starttime
# Timeout granularity is 1ms. Without rounding some t
|
imeout tests fail
# intermittently on Windows, probably due to threading.Event.wait().
return round(self.secs - elapsed, 3)
def timed_out(self):
return self.active and self.time_left() <= 0
def __unicode__(self):
return self.string
@property
def _sort_key(self):
return (not self.active, self.ti
|
me_left())
def __nonzero__(self):
return bool(self.string and self.string.upper() != 'NONE')
def run(self, runnable, args=None, kwargs=None):
if self.error:
raise DataError(self.error)
if not self.active:
raise FrameworkError('Timeout is not active')
timeout = self.time_left()
if timeout <= 0:
raise TimeoutError(self.get_message())
executable = lambda: runnable(*(args or ()), **(kwargs or {}))
return Timeout(timeout, self._timeout_error).execute(executable)
def get_message(self):
if not self.active:
return '%s timeout not active.' % self.type
if not self.timed_out():
return '%s timeout %s active. %s seconds left.' \
% (self.type, self.string, self.time_left())
return self._timeout_error
@property
def _timeout_error(self):
if self.message:
return self.message
return '%s timeout %s exceeded.' % (self.type, self.string)
class TestTimeout(_Timeout):
type = 'Test'
_keyword_timeout_occurred = False
def set_keyword_timeout(self, timeout_occurred):
if timeout_occurred:
self._keyword_timeout_occurred = True
def any_timeout_occurred(self):
return self.timed_out() or self._keyword_timeout_occurred
class KeywordTimeout(_Timeout):
type = 'Keyword'
|
roofit-dev/parallel-roofit-scripts
|
int2ext_precision_solution.py
|
Python
|
apache-2.0
| 1,674
| 0.001792
|
import numpy as np
# for the double bounded trafo
def i2e(v, u, l):
return l + 0.5 * (u - l) * (np.sin(v) + 1)
def e2i(v, u, l, eps2):
piby2 = 2. * np.arctan(1.)
distnn = 8. * np.sqrt(eps2)
vlimhi = piby2 - distnn
vlimlo = -piby2 + distnn
yy = 2. * (v - l) / (u - l) - 1.
yy2 = yy * yy
if yy2 > (1. - eps2):
if yy < 0.:
print("vlimlo")
return vlimlo
else:
|
print("vlimhi")
return vlimhi
else:
print("arcsin")
return np.arcsin(yy)
def i2e2i(v_hex, u, l):
eps2 = float.fromhex('0x1p-24')
i = np.longdouble(float.fromhex(v_hex))
e = i2e(i, u, l)
i2 = e2i(e, u, l, eps2)
return i, i2
def print_i2e2i(i, i2):
print(i, i2)
print(float.hex(float(i)), float.hex(
|
float(i2)))
# this is the first number in my test that goes wrong
right1, wrong1 = i2e2i('-0x1.abadef0339ab8p-3', 3, -3)
print_i2e2i(right1, wrong1)
# prints:
# -0.20882784584610703 -0.208827845846
# -0x1.abadef0339ab8p-3 -0x1.abadef0339ab9p-3
# i.e. the last bit is now one higher than before
# let's try another:
print_i2e2i(*i2e2i('-0x1.abadef0339ab9p-3', 3, -3))
# that goes fine...
# another:
print_i2e2i(*i2e2i('-0x1.abadef0339abap-3', 3, -3))
# aha, this also goes wrong, with same result!
print_i2e2i(*i2e2i('-0x1.abadef0339abbp-3', 3, -3))
# also!
print_i2e2i(*i2e2i('-0x1.abadef0339ab7p-3', 3, -3))
# also! still same value!
print_i2e2i(*i2e2i('-0x1.abadef0339ab6p-3', 3, -3))
# still wrong, now different value.
print_i2e2i(*i2e2i('-0x1.abadef0339ab5p-3', 3, -3))
# that is a correct one again.
# So basically in this range ~1/3 of results are wrong...
|
google/tf-quant-finance
|
tf_quant_finance/models/legacy/brownian_motion.py
|
Python
|
apache-2.0
| 19,227
| 0.003173
|
# Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""N-dimensional Brownian Motion.
Implements the Ito process defined by:
```
dX_i = a_i(t) dt + Sum[S_{ij}(t) dW_{j}, 1 <= j <= n] for each i in {1,..,n}
```
where `dW_{j}, 1 <= j <= n` are n independent 1D Brownian increments. The
coefficient `a_i` is the drift and the matrix `S_{ij}` is the volatility of the
proce
|
ss.
For more details, see Ref [1].
#### References:
[1]: Brent Oksendal. Stochastic Differential Equations: An Introduction with
Applications. Springer. 2010.
"""
import tensorflow.compat.v2 as tf
from tf_quant_finance.math.random_ops import multivariate_normal as mvn
from tf_quant_finance.models.legacy import brownian_motion_utils as bmu
from tf_quant_finance.mode
|
ls.legacy import ito_process
class BrownianMotion(ito_process.ItoProcess):
"""The multi dimensional Brownian Motion."""
def __init__(self,
dim=1,
drift=None,
volatility=None,
total_drift_fn=None,
total_covariance_fn=None,
dtype=None,
name=None):
"""Initializes the Brownian motion class.
Represents the Ito process:
```None
dX_i = a_i(t) dt + Sum(S_{ij}(t) dW_j for j in [1 ... n]), 1 <= i <= n
```
`a_i(t)` is the drift rate of this process and the `S_{ij}(t)` is the
volatility matrix. Associated to these parameters are the integrated
drift and covariance functions. These are defined as:
```None
total_drift_{i}(t1, t2) = Integrate(a_{i}(t), t1 <= t <= t2)
total_covariance_{ij}(t1, t2) = Integrate(inst_covariance_{ij}(t),
t1 <= t <= t2)
inst_covariance_{ij}(t) = (S.S^T)_{ij}(t)
```
Sampling from the Brownian motion process with time dependent parameters
can be done efficiently if the total drift and total covariance functions
are supplied. If the parameters are constant, the total parameters can be
automatically inferred and it is not worth supplying then explicitly.
Currently, it is not possible to infer the total drift and covariance from
the instantaneous values if the latter are functions of time. In this case,
we use a generic sampling method (Euler-Maruyama) which may be
inefficient. It is advisable to supply the total covariance and total drift
in the time dependent case where possible.
#### Example
The following is an example of a 1 dimensional brownian motion using default
arguments of zero drift and unit volatility.
```python
process = bm.BrownianMotion()
times = np.array([0.2, 0.33, 0.7, 0.9, 1.88])
num_samples = 10000
with tf.Session() as sess:
paths = sess.run(process.sample_paths(
times,
num_samples=num_samples,
initial_state=np.array(0.1),
seed=1234))
# Compute the means at the specified times.
means = np.mean(paths, axis=0)
print (means) # Mean values will be near 0.1 for each time
# Compute the covariances at the given times
covars = np.cov(paths.reshape([num_samples, 5]), rowvar=False)
# covars is a 5 x 5 covariance matrix.
# Expected result is that Covar(X(t), X(t')) = min(t, t')
expected = np.minimum(times.reshape([-1, 1]), times.reshape([1, -1]))
print ("Computed Covars: {}, True Covars: {}".format(covars, expected))
```
Args:
dim: Python int greater than or equal to 1. The dimension of the Brownian
motion.
Default value: 1 (i.e. a one dimensional brownian process).
drift: The drift of the process. The type and shape of the value must be
one of the following (in increasing order of generality) (a) A real
scalar `Tensor`. This corresponds to a time and component independent
drift. Every component of the Brownian motion has the same drift rate
equal to this value. (b) A real `Tensor` of shape `[dim]`. This
corresponds to a time independent drift with the `i`th component as the
drift rate of the `i`th component of the Brownian motion. (c) A Python
callable accepting a single positive `Tensor` of general shape (referred
to as `times_shape`) and returning a `Tensor` of shape `times_shape +
[dim]`. The input argument is the times at which the drift needs to be
evaluated. This case corresponds to a general time and direction
dependent drift rate.
Default value: None which maps to zero drift.
volatility: The volatility of the process. The type and shape of the
supplied value must be one of the following (in increasing order of
generality) (a) A positive real scalar `Tensor`. This corresponds to a
time independent, diagonal volatility matrix. The `(i, j)` component of
the full volatility matrix is equal to zero if `i != j` and equal to the
supplied value otherwise. (b) A positive real `Tensor` of shape `[dim]`.
This corresponds to a time independent volatility matrix with zero
correlation. The `(i, j)` component of the full volatility matrix is
equal to zero `i != j` and equal to the `i`th component of the supplied
value otherwise. (c) A positive definite real `Tensor` of shape `[dim,
dim]`. The full time independent volatility matrix. (d) A Python
callable accepting a single positive `Tensor` of general shape (referred
to as `times_shape`) and returning a `Tensor` of shape `times_shape +
[dim, dim]`. The input argument are the times at which the volatility
needs to be evaluated. This case corresponds to a general time and axis
dependent volatility matrix.
Default value: None which maps to a volatility matrix equal to identity.
total_drift_fn: Optional Python callable to compute the integrated drift
rate between two times. The callable should accept two real `Tensor`
arguments. The first argument contains the start times and the second,
the end times of the time intervals for which the total drift is to be
computed. Both the `Tensor` arguments are of the same dtype and shape.
The return value of the callable should be a real `Tensor` of the same
dtype as the input arguments and of shape `times_shape + [dim]` where
`times_shape` is the shape of the times `Tensor`. Note that it is an
error to supply this parameter if the `drift` is not supplied.
Default value: None.
total_covariance_fn: A Python callable returning the integrated covariance
rate between two times. The callable should accept two real `Tensor`
arguments. The first argument is the start times and the second is the
end times of the time intervals for which the total covariance is
needed. Both the `Tensor` arguments are of the same dtype and shape. The
return value of the callable is a real `Tensor` of the same dtype as the
input arguments and of shape `times_shape + [dim, dim]` where
`times_shape` is the shape of the times `Tensor`. Note that it is an
error to supply this argument if the `volatility` is not supplied.
Default value: None.
dtype: The default dtype to use when converting values to `Tensor`s.
Default value: None which means that default dtypes inferred by
TensorFlow are used.
name: str. The name scope under which ops created by the methods of this
class are nested.
Default value: None which maps to the default name `brownian_motion`.
|
certik/pyjamas
|
pgen/test_support.py
|
Python
|
apache-2.0
| 18,242
| 0.003728
|
"""Supporting definitions for the Python regression tests."""
if __name__ != 'test.test_support':
raise ImportError, 'test_support must be imported from the test package'
import sys
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
class TestSkipped(Error):
"""Test skipped.
This can be raised to indicate that a test was deliberatly
skipped, but not because a feature wasn't available. For
example, if some resource can't be used, such as the network
appears to be unavailable, this should be raised instead of
TestFailed.
"""
class ResourceDenied(TestSkipped):
"""Test skipped because it requested a disallowed resource.
This is raised when a test calls requires() for a resource that
has not be enabled. It is used to distinguish between expected
and unexpected skips.
"""
verbose = 1 # Flag set to 0 by regrtest.py
use_resources = None # Flag set to [] by regrtest.py
max_memuse = 0 # Disable bigmem tests (they will still be run with
# small sizes, to make sure they work.)
# _original_stdout is meant to hold stdout at the time regrtest began.
# This may be "the real" stdout, or IDLE's emulation of stdout, or whatever.
# The point is to have some flavor of stdout the user can actually see.
_original_stdout = None
def record_original_stdout(stdout):
global _original_stdout
_original_stdout = stdout
def get_original_stdout():
return _original_stdout or sys.stdout
def unload(name):
try:
del sys.modules[name]
except KeyError:
pass
def unlink(filename):
import os
try:
os.unlink(filename)
except OSError:
pass
def forget(modname):
'''"Forget" a module was ever imported by removing it from sys.modules and
deleting any .pyc and .pyo
|
files.'''
unload(modname)
import os
for dirname in sys.path:
unlink(os.path.join(dirname, mo
|
dname + os.extsep + 'pyc'))
# Deleting the .pyo file cannot be within the 'try' for the .pyc since
# the chance exists that there is no .pyc (and thus the 'try' statement
# is exited) but there is a .pyo file.
unlink(os.path.join(dirname, modname + os.extsep + 'pyo'))
def is_resource_enabled(resource):
"""Test whether a resource is enabled. Known resources are set by
regrtest.py."""
return use_resources is not None and resource in use_resources
def requires(resource, msg=None):
"""Raise ResourceDenied if the specified resource is not available.
If the caller's module is __main__ then automatically return True. The
possibility of False being returned occurs when regrtest.py is executing."""
# see if the caller's module is __main__ - if so, treat as if
# the resource was set
if sys._getframe().f_back.f_globals.get("__name__") == "__main__":
return
if not is_resource_enabled(resource):
if msg is None:
msg = "Use of the `%s' resource not enabled" % resource
raise ResourceDenied(msg)
def bind_port(sock, host='', preferred_port=54321):
"""Try to bind the sock to a port. If we are running multiple
tests and we don't try multiple ports, the test can fails. This
makes the test more robust."""
import socket, errno
# Find some random ports that hopefully no one is listening on.
# Ideally each test would clean up after itself and not continue listening
# on any ports. However, this isn't the case. The last port (0) is
# a stop-gap that asks the O/S to assign a port. Whenever the warning
# message below is printed, the test that is listening on the port should
# be fixed to close the socket at the end of the test.
# Another reason why we can't use a port is another process (possibly
# another instance of the test suite) is using the same port.
for port in [preferred_port, 9907, 10243, 32999, 0]:
try:
sock.bind((host, port))
if port == 0:
port = sock.getsockname()[1]
return port
except socket.error, (err, msg):
if err != errno.EADDRINUSE:
raise
print >>sys.__stderr__, \
' WARNING: failed to listen on port %d, trying another' % port
raise TestFailed, 'unable to find port to listen on'
FUZZ = 1e-6
def fcmp(x, y): # fuzzy comparison function
if type(x) == type(0.0) or type(y) == type(0.0):
try:
x, y = coerce(x, y)
fuzz = (abs(x) + abs(y)) * FUZZ
if abs(x-y) <= fuzz:
return 0
except:
pass
elif type(x) == type(y) and type(x) in (type(()), type([])):
for i in range(min(len(x), len(y))):
outcome = fcmp(x[i], y[i])
if outcome != 0:
return outcome
return cmp(len(x), len(y))
return cmp(x, y)
try:
unicode
have_unicode = 1
except NameError:
have_unicode = 0
is_jython = sys.platform.startswith('java')
import os
# Filename used for testing
if os.name == 'java':
# Jython disallows @ in module names
TESTFN = '$test'
elif os.name == 'riscos':
TESTFN = 'testfile'
else:
TESTFN = '@test'
# Unicode name only used if TEST_FN_ENCODING exists for the platform.
if have_unicode:
# Assuming sys.getfilesystemencoding()!=sys.getdefaultencoding()
# TESTFN_UNICODE is a filename that can be encoded using the
# file system encoding, but *not* with the default (ascii) encoding
if isinstance('', unicode):
# python -U
# XXX perhaps unicode() should accept Unicode strings?
TESTFN_UNICODE = "@test-\xe0\xf2"
else:
# 2 latin characters.
TESTFN_UNICODE = unicode("@test-\xe0\xf2", "latin-1")
TESTFN_ENCODING = sys.getfilesystemencoding()
# TESTFN_UNICODE_UNENCODEABLE is a filename that should *not* be
# able to be encoded by *either* the default or filesystem encoding.
# This test really only makes sense on Windows NT platforms
# which have special Unicode support in posixmodule.
if (not hasattr(sys, "getwindowsversion") or
sys.getwindowsversion()[3] < 2): # 0=win32s or 1=9x/ME
TESTFN_UNICODE_UNENCODEABLE = None
else:
# Japanese characters (I think - from bug 846133)
TESTFN_UNICODE_UNENCODEABLE = eval('u"@test-\u5171\u6709\u3055\u308c\u308b"')
try:
# XXX - Note - should be using TESTFN_ENCODING here - but for
# Windows, "mbcs" currently always operates as if in
# errors=ignore' mode - hence we get '?' characters rather than
# the exception. 'Latin1' operates as we expect - ie, fails.
# See [ 850997 ] mbcs encoding ignores errors
TESTFN_UNICODE_UNENCODEABLE.encode("Latin1")
except UnicodeEncodeError:
pass
else:
print \
'WARNING: The filename %r CAN be encoded by the filesystem. ' \
'Unicode filename tests may not be effective' \
% TESTFN_UNICODE_UNENCODEABLE
# Make sure we can write to TESTFN, try in /tmp if we can't
fp = None
try:
fp = open(TESTFN, 'w+')
except IOError:
TMP_TESTFN = os.path.join('/tmp', TESTFN)
try:
fp = open(TMP_TESTFN, 'w+')
TESTFN = TMP_TESTFN
del TMP_TESTFN
except IOError:
print ('WARNING: tests will fail, unable to write to: %s or %s' %
(TESTFN, TMP_TESTFN))
if fp is not None:
fp.close()
unlink(TESTFN)
del os, fp
def findfile(file, here=__file__):
"""Try to find a file on sys.path and the working directory. If it is not
found the argument passed to the function is returned (this does not
necessarily signal failure; could still be the legitimate path)."""
import os
if os.path.isabs(file):
return file
path = sys.path
path = [os.path.dirname(here)] + path
for dn in path:
|
bennylope/pygeocodio
|
docs/conf.py
|
Python
|
bsd-3-clause
| 8,264
| 0.000242
|
# -*- coding: utf-8 -*-
#
# pygeocodio documentation build configuration file, created by
# sphinx-quickstart on Wed Jan 22 14:09:09 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"pygeocodio"
copyright = u"2014-2019, Ben Lopatin"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.11"
# The full version, including alpha/beta/rc tags.
release = "0.11.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names t
|
o template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink
|
= True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "pygeocodiodoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
("index", "pygeocodio.tex", u"pygeocodio Documentation", u"Ben Lopatin", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "pygeocodio", u"pygeocodio Documentation", [u"Ben Lopatin"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"pygeocodio",
u"pygeocodio Documentation",
u"Ben Lopatin",
"pygeocodio",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do n
|
pylada/pylada-light
|
tests/ipython/conftest.py
|
Python
|
gpl-3.0
| 2,390
| 0.000837
|
###############################
# This file is part of PyLaDa.
#
# Copyright (C) 2013 National Renewable Energy Lab
#
# PyLaDa is a high throughput computational platform for Physics. It aims to
# make it easier to submit large numbers of jobs on supercomputers. It
# provides a python interface to physical input, such as crystal structures,
# as well as to a number of DFT (VASP, CRYSTAL) and atomic potential programs.
# It is able to organise and launch computational jobs on PBS and SLURM.
#
# PyLaDa is free software: you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# PyLaDa is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyLaDa. If not, see <http://www.gnu.org/licenses/>.
###############################
from pytest import fixture
@fixture
def shell():
from IPython.core.interactiveshell import InteractiveShell
shell = InteractiveShell.instance()
shell.magic("load_ext pylada")
return shell
def Extract(outdir=None):
from os.path import exists
from os import getcwd
from collections import namedtuple
from pickle import load
from pylada.misc import chdir
if outdir == None:
outdir = getcwd()
Extract = nam
|
edtuple("Extract", ["success", "directory", "indiv", "functional"])
if not exists(outdir):
return Extract(False, outdir, None, functional)
with chdir(outdir):
if not exists("OUTCAR"):
return Extract(False, outdir, None, functional)
with open("OUTCAR", "rb") as file:
indiv, value = load(file)
return Extract(True, outdir, indiv, func
|
tional)
def call_functional(indiv, outdir=None, value=False, **kwargs):
from pylada.misc import local_path
from pickle import dump
path = local_path(outdir)
path.ensure(dir=True)
dump((indiv, value), path.join("OUTCAR").open("wb"))
return Extract(outdir)
call_functional.Extract = Extract
@fixture
def functional():
return call_functional
|
thecrackofdawn/Peach2.3
|
Peach/Analyzers/shark.py
|
Python
|
mit
| 32,258
| 0.048112
|
'''
Wireshark Analyzer(s)
@author: Michael Eddington
@version: $Id$
'''
#
# Copyright (c) Michael Eddington
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# Michael Eddington (mike@phed.org)
# $Id$
from Peach.analyzer import *
from Peach.Engine.common import *
class WireSharkAnalyzer(Analyzer):
'''
Analyzers produce data and state models. Examples of analyzers would be
the parsing of Peach PIT XML files, tokenizing a string, building a data
model based on XML file, etc.
'''
#: Does analyzer support asCommandLine()
supportCommandLine = True
def asParser(self, uri):
'''
Called when Analyzer is used as default PIT parser.
Should produce a Peach DOM.
'''
raise Exception("asParser not supported")
def asDataElement(self, parent, args, dataBuffer):
'''
Called when Analyzer is used in a data model.
Should return a DataElement such as Block, Number or String.
'''
raise Exception("asDataElement not supported")
def asCommandLine(self, args):
'''
Called when Analyzer is used from command line. Analyzer
should produce Peach PIT XML as output.
'''
inFile = args["in"]
if args.has_key("proto"):
proto = args["proto"]
else:
proto = None
if args.has_key("out"):
outFile = args["out"]
else:
outFile = None
xml = DoTheShark(inFile, proto)
if outFile != None:
fd = open(outFile, "wb+")
fd.write(xml)
fd.close()
else:
print xml
def asTopLevel(self, peach, args):
'''
Called when Analyzer is used from top level.
From the top level producing zero or more data models and
state models is possible.
'''
raise Exception("asTopLevel not supported")
import sys, struct, re
from Ft.Xml import Parse
def debug(str):
sys.stderr.write("debug: %s\n" % str)
#pdml/packet/proto
# method
# 1. Check for children, if we have them make block and recurse
# 2. Look for value show attribute and see if it contains a sub portion of the
# data (treat this different)
# 3. Look for items labled "len" or "length" and try and match them up
# 4. Optionally look at RFC's and try and match things up
class PeachShark:
def __init__(self):
self._currentPos = 0
self._regexIp = re.compile("^\d+\.\d+\.\d+\.\d+$")
self._regexFlagBit1 = re.compile("^(\.*)(\d+)(\.*)")
self._relations = {}
self._findStack = []
self._templates = []
def inStr(self, str, values):
str = str.lower()
for value in values:
if str.find(value) > -1:
#debug("found str")
return True
#debug("No: %s" % str)
return False
def findSizeRelation(self, sizeNode, node):
# We know two things:
#
# 1. Sizes always come first
# 2. It will be the size of something :P
#
# Prevent infinit looping
if node in self._findStack:
return None
self._findStack.append(node)
size = self.findSizeGetSize(sizeNode)
# Search from us forward
sibling = sizeNode.nextSibling
while sibling != None:
checkSize = self._getNodeSize(sibling)
if checkSize == size:
return sibling
sibling = sibling.nextSibling
# That didn't work look from parent
for child in node.childNodes:
if child != sizeNode:
checkSize = self._getNodeSize(child)
if checkSize == size:
return child
ret = self.findSizeRelation(sizeNode, child)
if ret != None:
return ret
# Search from parent forward
sibling = node.nextSibling
while sibling != None:
if not sibling.hasAttributeNS(None, 'size'):
sibling = sibling.nextSibling
continue
checkSize = int(sibling.getAttributeNS(None, 'size'))
if checkSize == size:
return sibling
ret = self.findSizeRelation(sizeNode, sibling)
if ret != None:
return ret
sibling = sibling.nextSibling
# !!!TODO!!! Sometimes length can indicate the rest of our siblings
# but they may not be in a block of there own.
# -> Detect
# -> Force into a bock
#
#sibling = node.previousSibling
#while sibling != None:
# sizeUptoMe += int(sibling.getAttributeNS(None, 'size'))
# sibling = sibling.previousSibling
#
## This is good, but not wh
|
at we want!
#i
|
f (parentSize - sizeUptoMe) == size:
# return True
#else:
# debug("Nope: ParentSize: %d - SizeUptoMe: %d -- Size: %d" % (parentSize, sizeUptoMe, size))
return None
def findSizes(self, nodes):
'''
Find nodes that could be sizes or lengths.
'''
if nodes == None:
return []
findValues = ["length", "size"]
sizeNodes = []
for node in nodes:
if node == None:
continue
name = node.getAttributeNS(None, 'name')
show = node.getAttributeNS(None, 'show')
showName = node.getAttributeNS(None, 'showname')
if self.inStr(show, findValues) or self.inStr(showName, findValues) or self.inStr(name, findValues):
#debug("findSizes(): Found size: %s:%s" % (node.nodeName, name))
sizeNodes.append(node)
for n in self.findSizes(node.childNodes):
sizeNodes.append(n)
return sizeNodes
def findSizeGetSize(self, node):
'''
Take a size/length node and figure out it's value.
'''
ret = None
if node.hasAttributeNS(None, 'show') and len(node.getAttributeNS(None, 'show')) > 0:
try:
return int(node.getAttributeNS(None, 'show'))
except:
pass
if node.hasAttributeNS(None, 'value') and len(node.getAttributeNS(None, 'value')) > 0:
try:
return int(node.getAttributeNS(None, 'value'), 16)
except:
pass
try:
return int(re.compile(r"(\d+)").search(node.getAttributeNS(None, 'show')).group(1))
except:
pass
debug(str("Failed on %s:%s" % (node.getAttributeNS(None, 'name'), node.nodeName)))
debug(str("Show: " + node.getAttributeNS(None, 'show')))
debug(str("Value: "+ node.getAttributeNS(None, 'value')))
raise Exception("OMG!!")
def findSizeRelationCheckSelf(self, node):
'''
Check if parent - me + prior siblings == size
'''
parentSize = self._getNodeSize(node.parentNode)
sizeUptoMe = self._getNodeSize(node)
size = self.findSizeGetSize(node)
#debug("%d:%d" % (parentSize,size))
# If our parent is the size we are indicating
# then return True!
if parentSize == size:
return True
return False
def findSizeRelations(self, nodes):
'''
Find and resolve size relations.
'''
debug("Finding relations: " + nodes[0].nodeName)
if nodes[0].nodeName == 'proto':
parentNode = nodes[0]
else:
parentNode = nodes[0].parentNode
for node in self.findSizes(nodes):
#debug("findSizeRelations()... %s:%s" % (node.nodeName, node.getAttributeNS(None, 'name')))
if self.findSizeRelationCheckSelf(node):
debug("findSizeRelations: Found relation to parent: %s and %s" % (node.getAttributeNS(None, 'name'), node.parentNode.getAttributeNS(None, 'name')))
self._relations[node] = node.parentNode
else:
ret = self.findSizeRelation(node, parentNode)
if ret != None:
debug("findSizeRelations: Found relation: %s and %s" % (node.getAttributeNS(None, 'name'), ret.getAttributeNS(None, 'name')))
self._relations[node] = ret
def removeTextNodes(self, node):
for child in node.childNodes:
if child
|
mooseman/pdteco
|
pdteco.py
|
Python
|
unlicense
| 6,753
| 0.041636
|
# pdteco.py
# A public-domain Python implementation of the core commands of TECO.
# This code is released to the public domain.
# "Share and enjoy....." ;)
#
# *** To do...... ***
# NOTE - the built-in functions f.tell() and f.seek() should be very
# useful.
# From the Python docs -
# f.tell() returns an integer giving the file object’s current position
# in the file, measured in bytes from the beginning of the file.
# To change the file object’s position, use f.seek(offset, from_what).
# The position is computed from adding offset to a reference point;
# the reference point is selected by the from_what argument.
# A from_what value of 0 measures from the beginning of the file,
# 1 uses the current file position, and 2 uses the end of the file
# as the reference point.
# from_what can be omitted and defaults to 0, using the beginning of
# the file as the reference point.
# NOTE - Most TECO commands follow this general pattern -
# nX string ESC
# We need to implement the following types of commands -
# a) File commands -
# - ERfname$ - open file "fname" for read access
# - EBfname$ - open file for read/write with backup
# - EWfname$ - open file for writing.
# - EX$$ - close output file and exit.
# b) The eight basic Teco functions
# - DOT (current value of POINT)
# - nC - Move POINT \T{n} characters forward.
# - nD - Delete \T{n} characters.
# - Istring\A{ESC} - Insert text.
# - nJ - Move POINT to absolute position \T{n}
# - m,nK - Kill a range of characters.
# - Sstring\A{ESC} - Search for a string.
# - Z - Current buffer size.
# c) Line-oriented commands -
# - nL - Move to beginning of $\T{n}^{th}$ line from \POINT{}.
# - nK - Kill from point to beginning of $\T{n}^{th}$ following
# line.
# d) Looping -
# - n< - Begin \T{n}-iteration loop.
# - > - End loop.
# - n; - Exit loop if $\T{n} \geq 0$.
# e) Conditionals -
# - n"x - ( To be completed..... )
# f) "Q-registers", to store results.
# g) Conversion functions, from numbers to strings and vice versa.
# Helper functions
# Move n characters left or right from current position
# Use f.seek(n, 1) where 1 denotes "measure from current position"
import string, linecache, os, fileinput, curses
class editor(object):
def __init__(self):
self.dot = 0
self.buf = []
# The "Q-registers" (variables)
self.qregs = {}
self.fname = None
self.pos = self.line = 0
# Open a file
def open(self, fname):
#self.f = f.readlines()
self.f = open(fname, 'r+')
# Move to a line
|
def move2line(self, line):
pass
# Move by a given number of bytes from the current position
def moveinline(self, n):
self.f.seek(n, 1)
# Show the current position of the pointer.
def showptr(self):
return self.f.tell()
# Print a given number of bytes
def display(self, n):
self.f.read(n)
|
# Search for some text
def search(self, str):
pass
# Replace some text
def replace(self, target, repwith):
pass
# Insert some text
def ins_text(self, txt):
pass
# Delete some text
def del_text(self, txt):
pass
# Now the curses side of things.
# A class to handle keystrokes
class keyhandler:
def __init__(self, scr):
self.scr = scr
# Dictionary to store our data in. This uses the line-number as
# the key, and the line text as the data.
self.data = {}
self.stuff = ""
# A variable to save the line-number of text.
self.win_y = self.win_x = 0
# The screen size (number of rows and columns).
(self.max_y, self.max_x) = self.scr.getmaxyx()
# The top and bottom lines. These are defined because they help
# with page-up and page-down.
self.topline = 0
self.bottomline = self.max_y - 1
# Set page size (for page-up and page-down)
self.pagesize = self.max_y-1
curses.noecho()
self.scr.keypad(1)
self.scr.scrollok(1)
self.scr.idlok(1)
self.scr.setscrreg(0, self.max_y-1)
self.scr.refresh()
def action(self):
while (1):
curses.echo()
(y, x) = self.scr.getyx()
c=self.scr.getch() # Get a keystroke
if c in (curses.KEY_ENTER, 10):
#self.nextline()
pass
elif c==curses.KEY_BACKSPACE:
pass
elif c==curses.KEY_DC:
curses.noecho()
#self.removechar()
self.scr.refresh()
elif c==curses.KEY_UP:
curses.noecho()
self.scr.refresh()
# Ctrl-G quits the app
elif c==curses.ascii.BEL:
break
elif 0<c<256:
c=chr(c)
if x < self.max_x-2:
self.stuff += c
else:
self.nextline()
# Main loop
def main(stdscr):
a = keyhandler(stdscr)
a.action()
# Run the code from the command-line
if __name__ == '__main__':
try:
stdscr = curses.initscr()
curses.noecho() ; curses.cbreak()
stdscr.keypad(1)
main(stdscr) # Enter the main loop
# Set everything back to normal
stdscr.keypad(0)
curses.echo() ; curses.nocbreak()
curses.endwin() # Terminate curses
except:
# In the event of an error, restore the terminal
# to a sane state.
stdscr.keypad(0)
curses.echo() ; curses.nocbreak()
curses.endwin()
traceback.print_exc() # Print the exception
|
waymo-research/waymo-open-dataset
|
waymo_open_dataset/metrics/ops/motion_metrics_ops_test.py
|
Python
|
apache-2.0
| 13,287
| 0.001806
|
# Copyright 2021 The Waymo Open Dataset Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the Lic
|
ense.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for
|
the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from waymo_open_dataset.metrics.ops import py_metrics_ops
from waymo_open_dataset.protos import motion_metrics_pb2
class MotionMetricsOpsTest(tf.test.TestCase):
"""Unit tests for motion metrics."""
def _BuildConfig(self, additional_config_str=''):
"""Builds a metrics config."""
config = motion_metrics_pb2.MotionMetricsConfig()
config_text = """
track_steps_per_second: 10
prediction_steps_per_second: 10
track_history_samples: 0
track_future_samples: 4
step_configurations {
measurement_step: 3
lateral_miss_threshold: 1.0
longitudinal_miss_threshold: 2.0
}
max_predictions: 6
speed_scale_lower: 1.0
speed_scale_upper: 1.0
speed_lower_bound: 1.4
speed_upper_bound: 11.0
""" + additional_config_str
text_format.Parse(config_text, config)
return config
def _CreateTestScenario(self):
gt_scenario_id = ['test']
gt_object_id = [[1, 2]]
gt_object_type = [[1, 1]]
gt_is_valid = np.ones([1, 2, 5], dtype=np.bool)
gt_trajectory = np.reshape([[[2, 2, 1, 1, 0.78539816, 20.0, 20.0],
[4, 4, 1, 1, 0.78539816, 20.0, 20.0],
[6, 6, 1, 1, 0.78539816, 20.0, 20.0],
[8, 8, 1, 1, 0.78539816, 20.0, 20.0],
[10, 10, 1, 1, 0.78539816, 20.0, 20.0]],
[[-1, 0, 1, 1, 3.14159, -10.0, 0.0],
[-2, 0, 1, 1, 3.14159, -10.0, 0.0],
[-3, 0, 1, 1, 3.14159, -10.0, 0.0],
[-4, 0, 1, 1, 3.14159, -10.0, 0.0],
[-5, 0, 1, 1, 3.14159, -10.0, 0.0]]],
[1, 2, 5, 7])
pred_gt_indices = np.reshape([0, 1], (1, 1, 2))
pred_gt_indices_mask = np.ones((1, 1, 2)) > 0.0
return {
'scenario_id': gt_scenario_id,
'object_id': gt_object_id,
'object_type': gt_object_type,
'gt_is_valid': gt_is_valid,
'gt_trajectory': gt_trajectory,
'pred_gt_indices': pred_gt_indices,
'pred_gt_indices_mask': pred_gt_indices_mask,
}
def setUp(self):
super(MotionMetricsOpsTest, self).setUp()
self._config = self._BuildConfig()
self._gt = self._CreateTestScenario()
def _RunEval(self, pred_score, pred_trajectory, gt=None, config=None):
if not gt:
gt = self._gt
if not config:
config = self._config
g = tf.Graph()
with g.as_default():
(min_ade, min_fde, miss_rate, overlap_rate,
mean_ap) = py_metrics_ops.motion_metrics(
config=config.SerializeToString(),
prediction_trajectory=pred_trajectory,
prediction_score=pred_score,
ground_truth_trajectory=gt['gt_trajectory'],
ground_truth_is_valid=gt['gt_is_valid'],
prediction_ground_truth_indices=gt['pred_gt_indices'],
prediction_ground_truth_indices_mask=gt['pred_gt_indices_mask'],
object_type=gt['object_type'],
object_id=gt['object_id'],
scenario_id=gt['scenario_id'])
with self.test_session(graph=g) as sess:
return sess.run([min_ade, min_fde, miss_rate, overlap_rate, mean_ap])
def testComputeMissRateNoMisses(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
def testComputeMissRateNoMisses2(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[-2, 0], [-3, 0], [-4, 0], [-5, 0]],
[[4, 4], [6, 6], [8, 8], [10, 10]]],
(1, 1, 1, 2, 4, 2))
gt = copy.deepcopy(self._gt)
gt['pred_gt_indices'] = np.reshape([1, 0], (1, 1, 2))
val = self._RunEval(pred_score, pred_trajectory, gt=gt)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
def testComputeMissRateLateral_2(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape(
[[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 1.01], [-3, 1.01], [-4, 1.01], [-5, 1.01]]], (1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.0)
def testComputeMissRateLateral_1(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [9.292, 10.708]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.0)
def testComputeMissRateLongitudinal_2(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-7.01, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.0)
def testComputeMissRateLongitudinal_1(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [11.415, 11.415]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 1.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 0.0)
def testComputeNoMissLongitudinal_1(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [11.414, 11.414]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0]]],
(1, 1, 1, 2, 4, 2))
val = self._RunEval(pred_score, pred_trajectory)
# miss_rate of Vehicle.
self.assertEqual(val[2][0], 0.0)
# mean_ap of Vehicle.
self.assertEqual(val[4][0], 1.0)
def testComputeVelocityScalingLatitudinal(self):
pred_score = np.reshape([0.5], (1, 1, 1))
pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [10, 10]],
[[-2, 0], [-3, 0], [-4, 0], [-5, 0.75]]],
(1, 1, 1, 2, 4, 2))
config = motion_metrics_pb2.MotionMetricsConfig()
config.CopyFrom(self._config)
config.speed_scale_lower = 0.5
config.speed_scale_upper = 1.0
config.speed_lower_bound = 1.0
config.speed_upper_bound = 3.0
val = self._RunEval(pred_score, pred_trajectory, config=config)
# miss_rate of Vehicle.
self.assertEqual(val[2][0]
|
blakerohde/sidewalk
|
sidewalk/core/exceptions.py
|
Python
|
isc
| 1,924
| 0.04262
|
"""
sidewalk.exceptions
This module contains custom exceptions that can be thrown by Sidewalk.
:copyright: (c) 2013 by Blake Rohde.
:license: ISC, see LICENSE for more details.
"""
class SidewalkSettingsFileIOError(Exception):
"""Settings file IOError."""
def __init__(self, filename, permission):
self.filename = filename
self.permission = permission
def __str__(self):
return repr('%s %s' % (
self.filename,
self.permission
))
class SidewalkSectionNotDefined(Exception):
"""The specified settings file does not contain a required section."""
def __init__(self, filename, section):
self.filename = filename
self.section = section
def __str__(self):
return repr('%s %s' % (
self.filename,
self.section
))
class SidewalkKeyDoesNotExist(Exception):
"""Activity processor requested is not defined."""
def __init__(self, key):
self.key = key
def __str__(self):
return repr(self.key)
class SidewalkGroupDoesNotExist(Exception):
"""Activity pro
|
cessor group requested is not defined."""
def __init__(self, group_key):
self.group_key = group_key
def __str__(self):
return repr(self.group_key)
class SidewalkModuleImportError(Exception):
"""Activity processor module could not be imported."""
d
|
ef __init__(self, module):
self.module = module
def __str__(self):
return repr(self.module)
class SidewalkMethodDoesNotExist(Exception):
"""The Activity processor (method) does exist in the specified module."""
def __init__(self, module, method):
self.module = module
self.method = method
def __str__(self):
return repr('%s %s' % (
self.module,
self.method
))
class SidewalkRogueActivityProcessor(Exception):
"""The Activity processor threw an unhandled exception."""
def __init__(self, activity_processor):
self.activity_processor = activity_processor
def __str__(self):
return repr(self.activity_processor)
|
viswanathgs/EratoSCP
|
src/mountremote.py
|
Python
|
gpl-3.0
| 2,361
| 0.034307
|
import gtk
import pygtk
import pexpect
import gio
import commands
import gobject
class RemoteMounter:
def login_remote(self, host, port, username, password):
'''
Mount the remote file system and update the remote file
chooser to the corresponding location.
Any remote filesystem, previously mounted by the application,
is first unmounted.
'''
if self.is_mounted:
self.unmount_remote()
self.mount_remote(host, port, username, password)
remote_uri = 'file:///home/' + self.local_username + '/.gvfs/'
self.remote_file_chooser.set_current_folder_uri(remote_uri)
# gobject.idle_add(self.remote_file_chooser.set_uri, remote_uri)
def unmount_remote(self):
'''
Unmount a previously mounted remote file system.
Also, set the remote file chooser widget insensitive.
'''
if self.is_mounted:
(status, output) = commands.getstatusoutput('gvfs-mount -u sftp://' + self.last_mount)
self.is_mounted = False
gobject.idle_add(self.remote_file_chooser.set_sensitive, False)
def already_mounted(self, host, username):
'''
Return True if the remote filesystem has already been mounted,
else return False.
'''
(status, output) = commands.getstatusoutput('ls /home/' + self.local_username + '/.gvfs/')
if outpu
|
t.find('sftp for ' + username + ' on ' + host) != -1:
return True
return False
def mount_remote(self, host, port, username, password):
'''
Mount the remote filesystem if it is not m
|
ounted already.
Also, set the remote file chooser widget sensitive.
'''
if port == '':
port = 22
remote = username + '@' + host + ':' + str(port)
if not self.already_mounted(host, username):
child = pexpect.spawn('gvfs-mount sftp://' + remote)
child.expect('Password:\s*')
child.sendline(password)
child.expect(pexpect.EOF)
self.is_mounted = True
self.last_mount = remote
gobject.idle_add(self.remote_file_chooser.set_sensitive, True)
def __init__(self, remote_file_chooser):
'''
Constructor. Assign a data member of point to the remote
file chooser widget.
Initialize data members related to previous mounts
to their defaults.
Obtain the name of the local user.
'''
self.remote_file_chooser = remote_file_chooser
self.is_mounted = False
self.last_mount = ''
(status, self.local_username) = commands.getstatusoutput('whoami')
|
dcf21/4most-4gp
|
src/pythonModules/fourgp_specsynth/fourgp_specsynth/__init__.py
|
Python
|
mit
| 666
| 0.001502
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This python module defines a wrapper for synthesising spectra using TurboSpectrum.
"""
import logging
fr
|
om numpy import RankWarning
from warnings import simplefilter
from .turbospectrum import TurboSpectrum
from .solar_abundances import solar_abundances
__version__ = "20190301.1"
logger = logging.getLogger(__name__)
logger.setLevel(logging.I
|
NFO) # TODO: Remove this when stable.
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
"%(asctime)s [%(levelname)-8s] %(message)s"))
logger.addHandler(handler)
simplefilter("ignore", RankWarning)
simplefilter("ignore", RuntimeWarning)
|
SphinxKnight/kuma
|
kuma/wiki/tests/conftest.py
|
Python
|
mpl-2.0
| 11,242
| 0.000089
|
# -*- coding: utf-8 -*-
"""py.test fixtures for kuma.wiki.tests."""
import base64
import json
from collections import namedtuple
from datetime import datetime
import pytest
from django.contrib.auth.models import Permission
from waffle.testutils import override_flag
from ..models import Document, DocumentDeletionLog, Revision
BannedUser = namedtuple('BannedUser', 'user ban')
Contributors = namedtuple('Contributors', 'valid banned inactive')
DocWithContributors = namedtuple('DocWithContributors', 'doc contributors')
DocHierarchy = namedtuple('DocHierarchy', 'top middle_top middle_bottom bottom')
KumaScriptToolbox = namedtuple(
'KumaScriptToolbox',
'errors errors_as_headers macros_response'
)
@pytest.fixture
def inactive_wiki_user(db, django_user_model):
"""An inactive test user."""
return django_user_model.objects.create(
is_active=False,
username='wiki_user_slacker',
email='wiki_user_slacker@example.com',
date_joined=datetime(2017, 4, 19, 10, 58))
@pytest.fixture
def banned_wiki_user(db, django_user_model, wiki_user):
"""A banned test user."""
user = django_user_model.objects.create(
username='bad_wiki_user',
email='bad_wiki_user@example.com',
date_joined=datetime(2017, 4, 18, 9, 15)
)
ban = user.bans.create(by=wiki_user, reason='because')
return BannedUser(user=user, ban=ban)
@pytest.fixture
def wiki_moderator(db, django_user_model):
"""A user with moderator permissions."""
moderator = django_user_model.objects.create(
username='moderator',
email='moderator@example.com',
date_joined=datetime(2018, 8, 21, 18, 19))
moderator.user_permissions.add(
Permission.objects.get(codename='purge_document'),
Permission.objects.get(codename='delete_document'),
Permission.objects.get(codename='restore_document')
)
return moderator
@pytest.fixture
def moderator_client(client, wiki_moderator):
"""A test client with wiki_moderator logged in."""
wiki_moderator.set_password('password')
wiki_moderator.save()
client.login(username=wiki_moderator.username, password='password')
with override_flag('kumaediting', True):
yield client
@pytest.fixture
def edit_revision(root_doc, wiki_user):
"""A revision that edits an English document."""
root_doc.current_revision = Revision.objects.create(
document=root_doc,
creator=wiki_user,
content='<p>The root document.</p>',
comment='Done with initial version.',
created=datetime(2017, 4, 14, 12, 30))
root_doc.save()
return root_doc.current_revision
@pytest.fixture
def trans_revision(trans_doc):
return trans_doc.current_revision
@pytest.fixture
def trans_edit_revision(trans_doc, edit_revision, wiki_user):
"""A further edit to the translated document."""
trans_doc.current_revision = Revision.objects.create(
document=trans_doc,
creator=wiki_user,
based_on=edit_revision,
content='<p>Le document racine.</p>',
title='Racine du Document',
created=datetime(2017, 4, 14, 20, 25))
trans_doc.save()
return trans_doc.current_revision
@pytest.fixture
def deleted_doc(wiki_moderator):
"""A recently deleted but unpurged document."""
deleted_doc = Document.objects.create(
locale='en-US', slug='Doomed', title='Doomed Document')
Revision.objects.create(
document=deleted_doc,
creator=wiki_moderator,
content='<p>This document is doomed...</p>',
title='Doomed Document',
created=datetime(2018, 8, 21, 17, 3))
deleted_doc.delete()
DocumentDeletionLog.objects.create(
user=wiki_moderator,
reason="Deleted doomed document",
locale='en-US',
slug='Doomed')
DocumentDeletionLog.objects.filter(user=wiki_moderator).update(
timestamp=datetime(2018, 8, 21, 17, 22))
return deleted_doc
@pytest.fixture
def doc_hierarchy(wiki_user, wiki_user_2, wiki_user_3):
top_doc = Document.objects.create(
locale='en-US',
slug='top',
title='Top Document'
)
Revision.objects.create(
document=top_doc,
creator=wiki_user,
content='<p>Top...</p>',
title='Top Document',
created=datetime(2017, 4, 24, 13, 49)
)
top_de_doc = Document.objects.create(
locale='de',
slug='oben',
title='Oben Dokument',
rendered_html='<p>Oben...</p>',
parent=top_doc
)
Revision.objects.create(
document=top_de_doc,
creator=wiki_user_2,
based_on=top_doc.current_revision,
content='<p>Oben...</p>',
title='Oben Dokument',
created=datetime(2017, 4, 30, 10, 3)
)
top_fr_doc = Document.objects.create(
locale='fr',
slug='haut',
title='Haut Document',
rendered_html='<p>Haut...</p>',
parent=top_doc
)
Revision.objects.create(
document=top_fr_doc,
creator=wiki_user_3,
based_on=top_doc.current_revision,
content='<p>Haut...</p>',
title='Haut Document',
is_approved=True,
created=datetime(2017, 4, 30, 12, 1)
)
top_it_doc = Document.objects.create(
locale='it',
slug='superiore',
title='Superiore Documento',
rendered_html='<p>Superiore...</p>',
parent=top_doc
)
Revision.objects.create(
document=top_it_doc,
creator=wiki_user_2,
based_on=top_doc.current_revision,
content='<p>Superiore...</p>',
title='Superiore Documento',
created=datetime(2017, 4, 30, 11, 17)
)
middle_top_doc = Document.objects.create(
locale='en-US',
slug='top/middle-top',
title='Middle-Top Document',
parent_topic=top_doc
)
Revision.objects.create(
document=middle_top_doc,
creator=wiki_user,
content='<p>Middle-Top...</p>',
title='Middle-Top Document',
created=datetime(2017, 4, 24, 13, 50)
)
middle_bottom_doc = Document.objects.create(
locale='en-US',
slug='top/middle-top/middle-bottom',
title='Middle-Bottom Document',
parent_topic=middle_top_doc
)
Revision.objects.create(
document=middle_bottom_doc,
creator=wiki_user,
content='<p>Middle-Bottom...</p>',
title='Middle-Bottom Document',
created=datetime(2017, 4, 24, 13, 51)
)
bottom_doc = Document.objects.create(
locale='en-US',
slug='top/middle-top/middle-bottom/bottom',
title='Bottom Document',
parent_topic=middle_bottom_doc
)
Revision.objects.create(
document=bottom_doc,
creator=wiki_user,
content='<p>Bottom...</p><div id="Quick_Links"><p>sidebar</p></div>',
title='Bottom Document',
created=datetime(2017, 4, 24, 13, 52)
)
return DocHierarchy(
top=top_doc,
middle_top=middle_top_doc,
midd
|
le_bottom=middle_bottom_doc,
bottom=bottom_doc,
)
@pytest.fixture
def root_doc_with_mixed_contributors(root_doc, wiki_user, wiki_user_2,
inactive_wiki_user, banned_wiki_user):
"""
A top-level English document with mixed contributors (some are valid,
some are banned, and some are inactive).
"""
root_doc.current_revision = Revision.objects.create(
document=root_doc,
creator=wiki_user_2,
content='<p>The root
|
document.</p>',
comment='Done with the initial version.',
created=datetime(2017, 4, 17, 12, 35))
root_doc.save()
root_doc.current_revision = Revision.objects.create(
document=root_doc,
creator=inactive_wiki_user,
content='<p>The root document re-envisioned.</p>',
comment='Done with the second revision.',
created=datetime(2017, 4, 18, 10, 15))
root_doc.save()
root_doc.current_revision = Revision.objects.create(
document=root_doc,
creator=banned_wiki_user.user,
content='<p>The root document re-envisioned with malice.</
|
percyfal/bokeh
|
examples/plotting/file/tap.py
|
Python
|
bsd-3-clause
| 898
| 0.005568
|
import numpy as np
from bokeh.plotting import figure, show, output_file
from bokeh.models import TapTool
xx, yy = np.meshgrid(range(0,101,4), range(0,101,4))
x = xx.flatten()
y = yy.flatten()
N = len(x)
inds = [str(i) for i in np.arange(N)]
radii = np.random.random(size=N)*0.4 + 1.7
colors = [
"#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)
]
TOOLS="crosshair,pan,wheel_zoom,box_zoom,reset,tap,sa
|
ve"
p = figure(title="Tappy Scatter", tools=TOOLS)
cr = p.circle(x, y, radius=radii,
|
fill_color=colors, fill_alpha=0.6, line_color=None)
tr = p.text(x, y, text=inds, alpha=0.5, text_font_size="5pt",
text_baseline="middle", text_align="center")
# in the browser console, you will see messages when circles are clicked
tool = p.select_one(TapTool).renderers = [cr]
output_file("tap.html", title="tap.py example")
show(p) # open a browser
|
guyrt/teaching
|
2017/Com597I/babynames/problem8.py
|
Python
|
mit
| 607
| 0.003295
|
import ssadata
# how many boys names are also girls names?
# implementation details:
# find all names in the boys data set that are
# also
|
keys in the girls data set.
num_shared_names = 0
for name in ssadata.boys:
if name in ssadata.girls:
num_shared_names = num_shared_names + 1
print(str(num_shared_names) + " names out of " + str(len(ssadata.boys)) + " are shared.")
num_shared_names = 0
for name in ssadata.girls:
if name in ssadata.boys:
num_shared_names = num_shared
|
_names + 1
print(str(num_shared_names) + " names out of " + str(len(ssadata.girls)) + " are shared.")
|
balanced/balanced-python
|
balanced/exc.py
|
Python
|
mit
| 2,557
| 0.000391
|
from __future__ import unicode_literals
import httplib
import wac
class BalancedError(Exception):
def __str__(self):
attrs = ', '.join([
'{0}={1}'.format(k, repr(v))
for k, v in self.__dict__.iteritems()
])
return '{0}({1})'.format(self.__class__.__name__, attrs)
class ResourceError(BalancedError):
pass
class NoResultFound(BalancedError):
pass
class MultipleResultsFound(BalancedError):
pass
class FundingSourceNotCreditable(Exception):
pass
def convert_error(ex):
if not hasattr(ex.response, 'data'):
return ex
return HTTPError.from_response(**ex.response.data)(ex)
class HTTPError(BalancedError, wac.Error):
class __metaclass__(type):
def __new__(meta_cls, name, bases, dikt):
cls = type.__new__(meta_cls, name, bases, dikt)
cls.types = [
getattr(cls, k)
for k in dir(cls)
if k.isupper() and isinstance(getattr(cls, k), basestring)
]
cls.type_to_error.update(zip(cls.types, [cls] * len(cls.types)))
return cls
def __init__(self, requests_ex):
super(wac.Error, self).__init__(requests_ex)
self.status_code = requests_ex.response.status_code
data = getattr(requests_ex.response, 'data', {})
for k, v in data.get('errors', [{}])[0].iteritems():
setattr(self, k, v)
@classmethod
def format_message(cls, requests_ex):
data = getattr(requests_ex.response, 'data', {})
status = httplib.r
|
esponses[requests_ex.response.status_code]
error = data['errors'][0]
status = error.pop('status', status)
status_code = error.pop('status_code',
requests_ex.response.status_code)
desc = error.pop('description', None)
message = ': '.join(str(v) for v in [stat
|
us, status_code, desc] if v)
return message
@classmethod
def from_response(cls, **data):
try:
err = data['errors'][0]
exc = cls.type_to_error.get(err['category_code'], HTTPError)
except:
exc = HTTPError
return exc
type_to_error = {}
class FundingInstrumentVerificationFailure(HTTPError):
pass
class BankAccountVerificationFailure(FundingInstrumentVerificationFailure):
AUTH_NOT_PENDING = 'bank-account-authentication-not-pending'
AUTH_FAILED = 'bank-account-authentication-failed'
AUTH_DUPLICATED = 'bank-account-authentication-already-exists'
|
jencce/stuff
|
py/class.py
|
Python
|
gpl-2.0
| 693
| 0.049062
|
#! /usr/bin/env python
class myclass:
"zx class"
i = 11111
def f(self):
return 'hw'
def pi(self):
print "pi i={0}".format(self.i)
def si(self, v):
self.i = v
def pp(self):
print 'pp',
self.pi()
x = myclass()
x.f()
x.pi()
x.si(9)
x.pi()
x.pp()
print '============================'
class newclass(myclass):
j = 2222
def pj(self):
print "j={0}".format(self.j)
def sj(self, v):
self.j = v
# override baseclass method with same name
# nothing to do with args, but can fail
# compatibility
def pi(self):
print 'new pi {}'.format(self.i)
y = newclass()
y.f()
y.pi()
y.pj()
y.sj(9)
y.pj()
y.pp()
myclass.pi(y)
print "=====
|
=========
|
==========="
z=myclass()
z.pp()
|
Odingod/mne-python
|
mne/io/fiff/tests/test_raw.py
|
Python
|
bsd-3-clause
| 38,869
| 0
|
from __future__ import print_function
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Denis Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import os
import os.path as op
import glob
from copy import deepcopy
import warnings
import itertools as itt
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_equal)
from nose.tools import assert_true, assert_raises, assert_not_equal
from mne.datasets import testing
from mne.io.constants import FIFF
from mne.io import Raw, concatenate_raws, read_raw_fif
from mne.io.tests.test_raw import _test_concat
from mne import (concatenate_events, find_events, equalize_channels,
compute_proj_raw, pick_types, pick_channels)
from mne.utils import (_TempDir, requires_pandas, slow_test,
requires_mne, run_subprocess, run_tests_if_main)
from mne.externals.six.moves import zip, cPickle as pickle
from mne.io.proc_history import _get_sss_rank
from mne.io.pick import _picks_by_type
warnings.simplefilter('always') # enable b/c these tests throw warnings
data_dir = op.join(testing.data_path(download=False), 'MEG', 'sample')
fif_fname = op.join(data_dir, 'sample_audvis_trunc_raw.fif')
base_dir = op.join(op.dirname(__file__), '..', '..', 'tests', 'data')
test_fif_fname = op.join(base_dir, 'test_raw.fif')
test_fif_gz_fname = op.join(base_dir, 'test_raw.fif.gz')
ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
fif_bad_marked_fname = op.join(base_dir, 'test_withbads_raw.fif')
bad_file_works = op.join(base_dir, 'test_bads.txt')
bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt')
hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt')
hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
@slow_test
def test_concat():
"""Test RawFIF concatenation"""
_test_concat(read_raw_fif, test_fif_fname)
@testing.requires_testing_data
def test_hash_raw():
"""Test hashing raw objects
"""
raw = read_raw_fif(fif_fname)
assert_raises(RuntimeError, raw.__hash__)
raw = Raw(fif_fname).crop(0, 0.5, False)
raw.preload_data()
raw_2 = Raw(fif_fname).crop(0, 0.5, False)
raw_2.preload_data()
assert_equal(hash(raw), hash(raw_2))
# do NOT use assert_equal here, failing output is terrible
assert_equal(pickle.dumps(raw), pickle.dumps(raw_2))
|
raw_2._data[0, 0] -= 1
assert_not_equal(hash(raw), hash(raw_2))
@testing.requires_testing_data
def test_subject_info():
"""Test reading subject information
"""
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 1, False
|
)
assert_true(raw.info['subject_info'] is None)
# fake some subject data
keys = ['id', 'his_id', 'last_name', 'first_name', 'birthday', 'sex',
'hand']
vals = [1, 'foobar', 'bar', 'foo', (1901, 2, 3), 0, 1]
subject_info = dict()
for key, val in zip(keys, vals):
subject_info[key] = val
raw.info['subject_info'] = subject_info
out_fname = op.join(tempdir, 'test_subj_info_raw.fif')
raw.save(out_fname, overwrite=True)
raw_read = Raw(out_fname)
for key in keys:
assert_equal(subject_info[key], raw_read.info['subject_info'][key])
raw_read.anonymize()
assert_true(raw_read.info.get('subject_info') is None)
out_fname_anon = op.join(tempdir, 'test_subj_info_anon_raw.fif')
raw_read.save(out_fname_anon, overwrite=True)
raw_read = Raw(out_fname_anon)
assert_true(raw_read.info.get('subject_info') is None)
@testing.requires_testing_data
def test_copy_append():
"""Test raw copying and appending combinations
"""
raw = Raw(fif_fname, preload=True).copy()
raw_full = Raw(fif_fname)
raw_full.append(raw)
data = raw_full[:, :][0]
assert_equal(data.shape[1], 2 * raw._data.shape[1])
@slow_test
@testing.requires_testing_data
def test_rank_estimation():
"""Test raw rank estimation
"""
iter_tests = itt.product(
[fif_fname, hp_fif_fname], # sss
['norm', dict(mag=1e11, grad=1e9, eeg=1e5)]
)
for fname, scalings in iter_tests:
raw = Raw(fname)
(_, picks_meg), (_, picks_eeg) = _picks_by_type(raw.info,
meg_combined=True)
n_meg = len(picks_meg)
n_eeg = len(picks_eeg)
raw = Raw(fname, preload=True)
if 'proc_history' not in raw.info:
expected_rank = n_meg + n_eeg
else:
mf = raw.info['proc_history'][0]['max_info']
expected_rank = _get_sss_rank(mf) + n_eeg
assert_array_equal(raw.estimate_rank(scalings=scalings), expected_rank)
assert_array_equal(raw.estimate_rank(picks=picks_eeg,
scalings=scalings),
n_eeg)
raw = Raw(fname, preload=False)
if 'sss' in fname:
tstart, tstop = 0., 30.
raw.add_proj(compute_proj_raw(raw))
raw.apply_proj()
else:
tstart, tstop = 10., 20.
raw.apply_proj()
n_proj = len(raw.info['projs'])
assert_array_equal(raw.estimate_rank(tstart=tstart, tstop=tstop,
scalings=scalings),
expected_rank - (1 if 'sss' in fname else n_proj))
@testing.requires_testing_data
def test_output_formats():
"""Test saving and loading raw data using multiple formats
"""
tempdir = _TempDir()
formats = ['short', 'int', 'single', 'double']
tols = [1e-4, 1e-7, 1e-7, 1e-15]
# let's fake a raw file with different formats
raw = Raw(test_fif_fname).crop(0, 1, copy=False)
temp_file = op.join(tempdir, 'raw.fif')
for ii, (fmt, tol) in enumerate(zip(formats, tols)):
# Let's test the overwriting error throwing while we're at it
if ii > 0:
assert_raises(IOError, raw.save, temp_file, fmt=fmt)
raw.save(temp_file, fmt=fmt, overwrite=True)
raw2 = Raw(temp_file)
raw2_data = raw2[:, :][0]
assert_allclose(raw2_data, raw[:, :][0], rtol=tol, atol=1e-25)
assert_equal(raw2.orig_format, fmt)
def _compare_combo(raw, new, times, n_times):
for ti in times: # let's do a subset of points for speed
orig = raw[:, ti % n_times][0]
# these are almost_equals because of possible dtype differences
assert_allclose(orig, new[:, ti][0])
@slow_test
@testing.requires_testing_data
def test_multiple_files():
"""Test loading multiple files simultaneously
"""
# split file
tempdir = _TempDir()
raw = Raw(fif_fname).crop(0, 10, False)
raw.preload_data()
raw.preload_data() # test no operation
split_size = 3. # in seconds
sfreq = raw.info['sfreq']
nsamp = (raw.last_samp - raw.first_samp)
tmins = np.round(np.arange(0., nsamp, split_size * sfreq))
tmaxs = np.concatenate((tmins[1:] - 1, [nsamp]))
tmaxs /= sfreq
tmins /= sfreq
assert_equal(raw.n_times, len(raw.times))
# going in reverse order so the last fname is the first file (need later)
raws = [None] * len(tmins)
for ri in range(len(tmins) - 1, -1, -1):
fname = op.join(tempdir, 'test_raw_split-%d_raw.fif' % ri)
raw.save(fname, tmin=tmins[ri], tmax=tmaxs[ri])
raws[ri] = Raw(fname)
events = [find_events(r, stim_channel='STI 014') for r in raws]
last_samps = [r.last_samp for r in raws]
first_samps = [r.first_samp for r in raws]
# test concatenation of split file
assert_raises(ValueError, concatenate_raws, raws, True, events[1:])
all_raw_1, events1 = concatenate_raws(raws, preload=False,
events_list=events)
assert_equal(raw.first_samp, all_raw_1.first_samp)
assert_equal(raw.last_samp, all_raw_1.last_samp)
assert_allclose(raw[:, :][0], all_raw_1[:, :][0])
raws[0] = Raw(fname)
all_raw_2 = concatenate_raws(raws, preload=True)
assert_allclose(raw[:, :][0], all_raw_2[:, :][0])
# test proper event treatment for split files
ev
|
batxes/4Cin
|
SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/SHH_WT_models12702.py
|
Python
|
gpl-3.0
| 17,583
| 0.025081
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((3171.68, 9029.21, -1139.01), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((3173.32, 7641.65, -7.59267), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((3803.4, 8043.31, 1799.07), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((2666.72, 9556.45, 456.26), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometr
|
y"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((2401.45, 10810.3, 1711.38), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geom
|
etry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((3826.97, 10319.9, 3558.07), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((5267.4, 10161.5, 4525.44), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((4933.12, 10729, 4046.72), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((6683.05, 9391.65, 5497.93), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((8208.82, 10014.2, 5535.15), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((8867.56, 8468.95, 6393.47), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((8619.61, 7593.49, 5646.83), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((9013.94, 6288.27, 4803.55), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((8872.4, 7041.74, 3460.97), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((10190.2, 5903.46, 2028.9), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((9719.44, 3094.41, 731.581), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((8077.39, 2602.05, 1658.32), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((8868.48, 2528.27, 2892.63), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((9165.75, 3921.61, 3841.11), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((10301.5, 4486.01, 4688.54), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((8522.51, 5837.01, 5636.16), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((9493.49, 4172.69, 4953.76), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((9034.89, 3774.53, 5847.71), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((8777.35, 2523.89, 5440.65), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((8573.36, 2540.82, 4012.57), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((8998.9, 1643.92, 2683.61), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((8990.55, 2742.59, 3735.47), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((8309.5, 4779.28, 4133.88), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((8469.04, 4310.3, 5552.9), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((8405.08, 5264.39, 6396.31), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((8116.19, 4748.65, 6198.76), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((8458.16, 6381.13, 5980.73), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"
|
Azure/azure-sdk-for-python
|
sdk/search/azure-mgmt-search/azure/mgmt/search/operations/_shared_private_link_resources_operations.py
|
Python
|
mit
| 27,117
| 0.00579
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SharedPrivateLinkResourcesOperations(object):
"""SharedPrivateLinkResourcesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.search.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
search_service_name, # type: str
|
shared_private_link_resource_name, # type: str
shared_private_link_resource, # type: "_models.SharedPrivateLinkResource"
search_management_request_options=None, # type: Optional["_models.SearchManagementRequestOptions"]
**kwargs # type: Any
):
# type: (...) -> Optional["_models.SharedPrivateLinkResource"]
cls = kwargs.pop('cls', None) # typ
|
e: ClsType[Optional["_models.SharedPrivateLinkResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_client_request_id = None
if search_management_request_options is not None:
_client_request_id = search_management_request_options.client_request_id
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'searchServiceName': self._serialize.url("search_service_name", search_service_name, 'str'),
'sharedPrivateLinkResourceName': self._serialize.url("shared_private_link_resource_name", shared_private_link_resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _client_request_id is not None:
header_parameters['x-ms-client-request-id'] = self._serialize.header("client_request_id", _client_request_id, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(shared_private_link_resource, 'SharedPrivateLinkResource')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('SharedPrivateLinkResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Search/searchServices/{searchServiceName}/sharedPrivateLinkResources/{sharedPrivateLinkResourceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
search_service_name, # type: str
shared_private_link_resource_name, # type: str
shared_private_link_resource, # type: "_models.SharedPrivateLinkResource"
search_management_request_options=None, # type: Optional["_models.SearchManagementRequestOptions"]
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.SharedPrivateLinkResource"]
"""Initiates the creation or update of a shared private link resource managed by the search
service in the given resource group.
:param resource_group_name: The name of the resource group within the current subscription. You
can obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param search_service_name: The name of the Azure Cognitive Search service associated with the
specified resource group.
:type search_service_name: str
:param shared_private_link_resource_name: The name of the shared private link resource managed
by the Azure Cognitive Search service within the specified resource group.
:type shared_private_link_resource_name: str
:param shared_private_link_resource: The definition of the shared private link resource to
create or update.
:type shared_private_link_resource: ~azure.mgmt.search.models.SharedPrivateLinkResource
:param search_management_request_options: Parameter group.
:type search_management_request_options: ~azure.mgmt.search.models.SearchManagementRequestOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either SharedPrivateLinkResource or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.search.models.SharedPrivateLinkR
|
robofit/artable
|
art_simple_tracker/scripts/fake_detector.py
|
Python
|
lgpl-2.1
| 2,036
| 0.002456
|
#!/usr/bin/env python
import rospy
from art_msgs.msg import ObjInstance, InstancesArray
import time
import sys
import random
from tf import transformations
from math import pi
def a2q(q, arr):
q.x = arr[0]
q.y = arr[1]
q.z = arr[2]
q.w = arr[3]
class FakeDetector:
def __init__(self, obj_id, frame_id, pos, rpy, noise):
self.object_publisher = rospy.Publisher('/art/object_detector/object',
InstancesArray, queue_size=10, latch=True)
self.frame_id = frame_id
self.pos = pos
self.noise = noise
self.obj = ObjInstance()
self.obj.object_id = obj_id
self.obj.object_type = "fake_object_type"
angles = list(rpy)
for idx in range(0, len(angles)):
angles[idx] = angles[idx] / 360.0 * 2 * pi
# TODO apply noise also to orientation
a2q(self.obj.pose.orientation, transformations.quaternion_from_euler(*angles))
self.timer = rospy.Timer(rospy.Duration(0.1), self.timer_callback)
def timer_callback(self, evt):
ia = InstancesArray()
ia.header.stamp = rospy.Time.now()
ia.header.frame_id = self.frame_id
self.obj.pose.position.x = self.pos[0] + random.uniform(-self.noise, self.noise)
self.obj.pose.position.y = self.pos[1] + random.uniform(-self.noise, self.noise)
self.obj.pose.position.z = self.pos[2] + random.uniform(-self.noise, self.noise)
ia.instances = [self.obj]
self.object_publisher.publish(i
|
a)
if __name__ == '__main__':
rospy.init_node('fake_detector', anonymous=True)
tr
|
y:
pos = (float(sys.argv[3]), float(sys.argv[4]), float(sys.argv[5]))
rpy = (float(sys.argv[6]), float(sys.argv[7]), float(sys.argv[8]))
FakeDetector(sys.argv[1], sys.argv[2], pos, rpy, float(sys.argv[9]))
rospy.spin()
except rospy.ROSInterruptException:
pass
except IndexError:
print("Arguments: obj_id frame_id x y z r p y noise")
|
vagonbar/GNUnetwork
|
gwn/utils/testing/__init__.py
|
Python
|
gpl-3.0
| 90
| 0.011111
|
#!/usr/bin/env python
# -*-
|
coding: utf-8 -*-
'''Testing and te
|
sting related modules.
'''
|
jumpserver/jumpserver
|
apps/assets/api/asset.py
|
Python
|
gpl-3.0
| 10,108
| 0.001187
|
# -*- coding: utf-8 -*-
#
from rest_framework.viewsets import ModelViewSet
from rest_framework.generics import RetrieveAPIView, ListAPIView
from django.shortcuts import get_object_or_404
from django.db.models import Q
from common.utils import get_logger, get_object_or_none
from common.mixins.api import SuggestionMixin
from users.models import User, UserGroup
from users.serializers import UserSerializer, UserGroupSerializer
from users.filters import UserFilter
from perms.models import AssetPermission
from perms.serializers import AssetPermissionSerializer
from perms.filters import AssetPermissionFilter
from orgs.mixins.api import OrgBulkModelViewSet
from orgs.mixins import generics
from assets.api import FilterAssetByNodeMixin
from ..models import Asset, Node, Platform
from .. import serializers
from ..tasks import (
update_assets_hardware_info_manual, test_assets_connectivity_manual,
test_system_users_connectivity_a_asset, push_system_users_a_asset
)
from ..filters import FilterAssetByNodeFilterBackend, LabelFilterBackend, IpInFilterBackend
logger = get_logger(__file__)
__all__ = [
'AssetViewSet', 'AssetPlatformRetrieveApi',
'AssetGatewayListApi', 'AssetPlatformViewSet',
'AssetTaskCreateApi', 'AssetsTaskCreateApi',
'AssetPermUserListApi', 'AssetPermUserPermissionsListApi',
'AssetPermUserGroupListApi', 'AssetPermUserGroupPermissionsListApi',
]
class AssetViewSet(SuggestionMixin, FilterAssetByNodeMixin, OrgBulkModelViewSet):
"""
API endpoint that allows Asset to be viewed or edited.
"""
model = Asset
filterset_fields = {
'hostname': ['exact'],
'ip': ['exact'],
'system_users__id': ['exact'],
'platform__base': ['exact'],
'is_active': ['exact'],
'protocols': ['exact', 'icontains']
}
search_fields = ("hostname", "ip")
ordering_fields = ("hostname", "ip", "port", "cpu_cores")
ordering = ('hostname', )
serializer_classes = {
'default': serializers.AssetSerializer,
'suggestion': serializers.MiniAssetSerializer
}
rbac_perms = {
'match': 'assets.match_asset'
}
extra_filter_backends = [FilterAssetByNodeFilterBackend, LabelFilterBackend, IpInFilterBackend]
def set_assets_node(self, assets):
if not isinstance(assets, list):
assets = [assets]
node_id = self.request.query_params.get('node_id')
if not node_id:
return
n
|
ode = get_object_or_none(Node, pk=node_id)
if not node:
return
node.assets.add(*assets)
def perform_create(self, serializer):
assets = serializer.sa
|
ve()
self.set_assets_node(assets)
class AssetPlatformRetrieveApi(RetrieveAPIView):
queryset = Platform.objects.all()
serializer_class = serializers.PlatformSerializer
rbac_perms = {
'retrieve': 'assets.view_gateway'
}
def get_object(self):
asset_pk = self.kwargs.get('pk')
asset = get_object_or_404(Asset, pk=asset_pk)
return asset.platform
class AssetPlatformViewSet(ModelViewSet):
queryset = Platform.objects.all()
serializer_class = serializers.PlatformSerializer
filterset_fields = ['name', 'base']
search_fields = ['name']
def check_object_permissions(self, request, obj):
if request.method.lower() in ['delete', 'put', 'patch'] and obj.internal:
self.permission_denied(
request, message={"detail": "Internal platform"}
)
return super().check_object_permissions(request, obj)
class AssetsTaskMixin:
def perform_assets_task(self, serializer):
data = serializer.validated_data
action = data['action']
assets = data.get('assets', [])
if action == "refresh":
task = update_assets_hardware_info_manual.delay(assets)
else:
# action == 'test':
task = test_assets_connectivity_manual.delay(assets)
return task
def perform_create(self, serializer):
task = self.perform_assets_task(serializer)
self.set_task_to_serializer_data(serializer, task)
def set_task_to_serializer_data(self, serializer, task):
data = getattr(serializer, '_data', {})
data["task"] = task.id
setattr(serializer, '_data', data)
class AssetTaskCreateApi(AssetsTaskMixin, generics.CreateAPIView):
model = Asset
serializer_class = serializers.AssetTaskSerializer
def create(self, request, *args, **kwargs):
pk = self.kwargs.get('pk')
request.data['asset'] = pk
request.data['assets'] = [pk]
return super().create(request, *args, **kwargs)
def check_permissions(self, request):
action = request.data.get('action')
action_perm_require = {
'refresh': 'assets.refresh_assethardwareinfo',
'push_system_user': 'assets.push_assetsystemuser',
'test': 'assets.test_assetconnectivity',
'test_system_user': 'assets.test_assetconnectivity'
}
perm_required = action_perm_require.get(action)
has = self.request.user.has_perm(perm_required)
if not has:
self.permission_denied(request)
def perform_asset_task(self, serializer):
data = serializer.validated_data
action = data['action']
if action not in ['push_system_user', 'test_system_user']:
return
asset = data['asset']
system_users = data.get('system_users')
if not system_users:
system_users = asset.get_all_system_users()
if action == 'push_system_user':
task = push_system_users_a_asset.delay(system_users, asset=asset)
elif action == 'test_system_user':
task = test_system_users_connectivity_a_asset.delay(system_users, asset=asset)
else:
task = None
return task
def perform_create(self, serializer):
task = self.perform_asset_task(serializer)
if not task:
task = self.perform_assets_task(serializer)
self.set_task_to_serializer_data(serializer, task)
class AssetsTaskCreateApi(AssetsTaskMixin, generics.CreateAPIView):
model = Asset
serializer_class = serializers.AssetsTaskSerializer
class AssetGatewayListApi(generics.ListAPIView):
serializer_class = serializers.GatewayWithAuthSerializer
rbac_perms = {
'list': 'assets.view_gateway'
}
def get_queryset(self):
asset_id = self.kwargs.get('pk')
asset = get_object_or_404(Asset, pk=asset_id)
if not asset.domain:
return []
queryset = asset.domain.gateways.filter(protocol='ssh')
return queryset
class BaseAssetPermUserOrUserGroupListApi(ListAPIView):
def get_object(self):
asset_id = self.kwargs.get('pk')
asset = get_object_or_404(Asset, pk=asset_id)
return asset
def get_asset_related_perms(self):
asset = self.get_object()
nodes = asset.get_all_nodes(flat=True)
perms = AssetPermission.objects.filter(Q(assets=asset) | Q(nodes__in=nodes))
return perms
class AssetPermUserListApi(BaseAssetPermUserOrUserGroupListApi):
filterset_class = UserFilter
search_fields = ('username', 'email', 'name', 'id', 'source', 'role')
serializer_class = UserSerializer
def get_queryset(self):
perms = self.get_asset_related_perms()
users = User.objects.filter(
Q(assetpermissions__in=perms) | Q(groups__assetpermissions__in=perms)
).distinct()
return users
class AssetPermUserGroupListApi(BaseAssetPermUserOrUserGroupListApi):
serializer_class = UserGroupSerializer
def get_queryset(self):
perms = self.get_asset_related_perms()
user_groups = UserGroup.objects.filter(assetpermissions__in=perms).distinct()
return user_groups
class BaseAssetPermUserOrUserGroupPermissionsListApiMixin(generics.ListAPIView):
model = AssetPermission
serializer_class = AssetPermissionSerializer
filterset_class = AssetPermissionFilter
search_fields = ('name',)
rbac_perms = {
'list': 'perms.v
|
fako/datascope
|
src/online_discourse/elastic.py
|
Python
|
gpl-3.0
| 1,813
| 0
|
from elasticsearch import Elasticsearch
from django.conf import settings
def get_es_client(silent=False):
"""
Returns the elasticsearch client which uses the configuration file
"""
es_client = Elasticsearch([settings.ELASTIC_SEARCH_HOST],
scheme='http',
port=9200,
http_compress=True)
# test if it works
if not silent and not es_client.cat.health(request_timeout=30):
raise ValueError('Credentials do not work for Elastic search')
return es_client
def get_index_config(lang):
"""
Returns the elasticsearch index configuration.
Configures the analysers based on the language passed in.
"""
return {
"settings": {
"index": {
|
"number_of_shards": 1,
"number_of_replicas": 0
}
},
'mappings': {
'_doc': {
'properties': {
'title': {
'type': 'text',
|
'analyzer': settings.ELASTIC_SEARCH_ANALYSERS[lang]
},
'content': {
'type': 'text',
'analyzer': settings.ELASTIC_SEARCH_ANALYSERS[lang]
},
'url': {'type': 'text'},
'title_plain': {'type': 'text'},
'content_plain': {'type': 'text'},
'author': {
'type': 'keyword'
},
'source': {
'type': 'keyword'
},
'argument_score': {
'type': 'float'
}
}
}
}
}
|
asdf2014/superset
|
superset/cache_util.py
|
Python
|
apache-2.0
| 1,105
| 0
|
from superset import tables_cache
from flask import request
def view_cache_key(*unused_args, **unused_kwargs):
args_hash = hash(frozenset(request.args.items()))
return 'view/{}/{}'.format(request.path, args_hash)
def memoized_func(timeout=5 * 60, key=view_cache_key):
"""Use this decorator to cache functions that have predefined first arg.
memoized_func uses simple_cache and stored the data in memory.
Key is a callable function that takes function arguments and
returns the
|
caching key.
"""
def wrap(f):
if tables_cache:
def wrapped_f(cls, *args, **kwargs):
cache_key = key(*args, **kwargs)
o = tables_cache.get(cache_key)
if not kwargs['force'] and o is not None:
|
return o
o = f(cls, *args, **kwargs)
tables_cache.set(cache_key, o, timeout=timeout)
return o
else:
# noop
def wrapped_f(cls, *args, **kwargs):
return f(cls, *args, **kwargs)
return wrapped_f
return wrap
|
calaldees/KaraKara
|
processmedia2/processmedia_libs/processed_files_manager.py
|
Python
|
gpl-3.0
| 3,137
| 0.000638
|
import os
import shutil
import hashlib
from collections import namedtuple, defaultdict
from calaldees.files.scan import fast_scan
ProcessedFileType = namedtuple('ProcessedFileType', ('source_hash_group', 'dict_key', 'attachment_type', 'ext', 'salt'))
class ProcessedFilesManager(object):
FILE_TYPES = (
ProcessedFileType('media', 'image1', 'image', 'jpg', ''),
ProcessedFileType('media', 'image2', 'image', 'jpg', ''),
ProcessedFileType('media', 'image3', 'image', 'jpg', ''),
ProcessedFileType('media', 'image4', 'image', 'jpg', ''),
ProcessedFileType('media', 'video', 'video', 'mp4', ''),
ProcessedFileType('media', 'preview', 'preview', 'mp4', ''),
ProcessedFileType('data', 'srt', 'srt', 'srt', ''),
ProcessedFileType('data', 'tags', 'tags', 'txt', ''),
)
FILE_TYPE_LOOKUP = {
processed_file_type.attachment_type: processed_file_type
for processed_file_type in FILE_TYPES
}
def __init__(self, path):
self.path = path
def get_processed_files(self, hash_dict):
if not hash_dict:
return {}
return {
file_type.dict_key: ProcessedFile(
self.path,
(hash_dict[file_type.source_hash_group], file_type.dict_key, file_type.salt),
file_type
)
for file_type in self.FILE_TYPES
}
@property
def scan(self):
return fast_scan(self.path)
class ProcessedFile(object):
def __init__(self, path, hashs, processed_file_type):
self.hash = gen_string_hash(hashs)
self.processed_file_type = processed_file_type
self.path = path
@property
def ext(self):
return self.processed_file_type.ext
@property
def attachment_type(self):
return self.processed_file_type.attachment_type
@property
def folder(self):
return self.hash[0]
@property
def relative(self):
return os.path.join(self.folder, '{}.{}'.format(self.hash, self.ext))
@property
def absolute(self):
return os.path.abspath(os.path.join(self.path, self.relative))
def _create_folders_if_needed
|
(self):
os.makedirs(os.path.join(self.path, self.folder), exist_ok=True)
def move(self, source_file):
"""
It is important that 'move' is used rather than opening a stream to the
absolute pa
|
th directly.
The remote destination could be 'scp' or another remote service.
Always using move allows for this abstraction at a later date
"""
self._create_folders_if_needed()
shutil.move(source_file, self.absolute)
def copy(self, source_file):
self._create_folders_if_needed()
shutil.copy2(source_file, self.absolute)
@property
def exists(self):
return os.path.exists(self.absolute)
def gen_string_hash(hashs):
if isinstance(hashs, str):
hash_str = hashs
else:
hasher = hashlib.sha256()
hasher.update(''.join(sorted(hashs)).encode('utf-8'))
hash_str = hasher.hexdigest()
return hash_str
|
masterkorp/obfsproxy
|
obfsproxy/common/hmac_sha256.py
|
Python
|
bsd-3-clause
| 230
| 0.004348
|
import hashlib
import hmac
def hmac_sha256_di
|
gest(key, msg):
"""
Return the HMAC-SHA256 message authentication code of the message
'msg' wit
|
h key 'key'.
"""
return hmac.new(key, msg, hashlib.sha256).digest()
|
nvbn/thefuck
|
tests/rules/test_git_remote_seturl_add.py
|
Python
|
mit
| 920
| 0
|
import pytest
|
from thefuck.rules.git_remote_seturl_add import match, get_new_command
from thefuck.types import Command
@pytest.mark.parametrize('command', [
Command('git remote set-url origin url', "fatal
|
: No such remote")])
def test_match(command):
assert match(command)
@pytest.mark.parametrize('command', [
Command('git remote set-url origin url', ""),
Command('git remote add origin url', ''),
Command('git remote remove origin', ''),
Command('git remote prune origin', ''),
Command('git remote set-branches origin branch', '')])
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize('command, new_command', [
(Command('git remote set-url origin git@github.com:nvbn/thefuck.git', ''),
'git remote add origin git@github.com:nvbn/thefuck.git')])
def test_get_new_command(command, new_command):
assert get_new_command(command) == new_command
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractMyTranslations.py
|
Python
|
bsd-3-clause
| 355
| 0.028169
|
def extractMyTranslations(item):
"""
Parser for 'My Translations'
"""
vol, chp, frag
|
, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'WATTT' in item['tags']:
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return Fal
|
se
|
awslabs/chalice
|
chalice/local.py
|
Python
|
apache-2.0
| 29,826
| 0
|
"""Dev server used for running a chalice app locally.
This is intended only for local development purposes.
"""
from __future__ import print_function
import re
import threading
import time
import uuid
import base64
import functools
import warnings
from collections import namedtuple
import json
from six.moves.BaseHTTPServer import HTTPServer
from six.moves.BaseHTTPServer import BaseHTTPRequestHandler
from six.moves.socketserver import ThreadingMixIn
from typing import (
List,
Any,
Dict,
Tuple,
Callable,
Optional,
Union,
) # noqa
from chalice.app import Chalice # noqa
from chalice.app import CORSConfig # noqa
from chalice.app import ChaliceAuthorizer # noqa
from chalice.app import CognitoUserPoolAuthorizer # noqa
from chalice.app import RouteEntry # noqa
from chalice.app import Request # noqa
from chalice.app import AuthResponse # noqa
from chalice.app import BuiltinAuthConfig # noqa
from chalice.config import Config # noqa
from chalice.compat import urlparse, parse_qs
MatchResult = namedtuple('MatchResult', ['route', 'captured', 'query_params'])
EventType = Dict[str, Any]
ContextType = Dict[str, Any]
HeaderType = Dict[str, Any]
ResponseType = Dict[str, Any]
HandlerCls = Callable[..., 'ChaliceRequestHandler']
ServerCls = Callable[..., 'HTTPServer']
class Clock(object):
def time(self):
# type: () -> float
return time.time()
def create_local_server(app_obj, config, host, port):
# type: (Chalice, Config, str, int) -> LocalDevServer
app_obj.__class__ = LocalChalice
return LocalDevServer(app_obj, config, host, port)
class LocalARNBuilder(object):
ARN_FORMAT = ('arn:aws:execute-api:{region}:{account_id}'
':{api_id}/{stage}/{method}/{resource_path}')
LOCAL_REGION = 'mars-west-1'
LOCAL_ACCOUNT_ID = '123456789012'
LOCAL_API_ID = 'ymy8tbxw7b'
LOCAL_STAGE = 'api'
def build_arn(self, method, path):
# type: (str, str) -> str
# In API Gateway the method and URI are separated by a / so typically
# the uri portion omits the leading /. In the case where the entire
# url is just '/' API Gateway adds a / to the end so that the arn end
# with a '//'.
if path != '/':
path = path[1:]
return self.ARN_FORMAT.format(
region=self.LOCAL_REGION,
account_id=self.LOCAL_ACCOUNT_ID,
api_id=self.LOCAL_API_ID,
stage=self.LOCAL_STAGE,
method=method,
reso
|
urce_path=path
)
class ARNMatcher(object):
def __init__(self, target_arn):
# type: (str) -> None
self._arn = target_arn
def _resource_match(self, resource):
# type: (str) -> bool
# Arn matching supports two special case characetrs that are not
# escapable. * represents a glob which translates to a non-greedy
# match of any number of characters. ? which is any single character.
# These are easy to transla
|
te to a regex using .*? and . respectivly.
escaped_resource = re.escape(resource)
resource_regex = escaped_resource.replace(r'\?', '.').replace(
r'\*', '.*?')
resource_regex = '^%s$' % resource_regex
return re.match(resource_regex, self._arn) is not None
def does_any_resource_match(self, resources):
# type: (List[str]) -> bool
for resource in resources:
if self._resource_match(resource):
return True
return False
class RouteMatcher(object):
def __init__(self, route_urls):
# type: (List[str]) -> None
# Sorting the route_urls ensures we always check
# the concrete routes for a prefix before the
# variable/capture parts of the route, e.g
# '/foo/bar' before '/foo/{capture}'
self.route_urls = sorted(route_urls)
def match_route(self, url):
# type: (str) -> MatchResult
"""Match the url against known routes.
This method takes a concrete route "/foo/bar", and
matches it against a set of routes. These routes can
use param substitution corresponding to API gateway patterns.
For example::
match_route('/foo/bar') -> '/foo/{name}'
"""
# Otherwise we need to check for param substitution
parsed_url = urlparse(url)
query_params = parse_qs(parsed_url.query, keep_blank_values=True)
path = parsed_url.path
# API Gateway removes the trailing slash if the route is not the root
# path. We do the same here so our route matching works the same way.
if path != '/' and path.endswith('/'):
path = path[:-1]
parts = path.split('/')
captured = {}
for route_url in self.route_urls:
url_parts = route_url.split('/')
if len(parts) == len(url_parts):
for i, j in zip(parts, url_parts):
if j.startswith('{') and j.endswith('}'):
captured[j[1:-1]] = i
continue
if i != j:
break
else:
return MatchResult(route_url, captured, query_params)
raise ValueError("No matching route found for: %s" % url)
class LambdaEventConverter(object):
LOCAL_SOURCE_IP = '127.0.0.1'
"""Convert an HTTP request to an event dict used by lambda."""
def __init__(self, route_matcher, binary_types=None):
# type: (RouteMatcher, List[str]) -> None
self._route_matcher = route_matcher
if binary_types is None:
binary_types = []
self._binary_types = binary_types
def _is_binary(self, headers):
# type: (Dict[str,Any]) -> bool
return headers.get('content-type', '') in self._binary_types
def create_lambda_event(self, method, path, headers, body=None):
# type: (str, str, Dict[str, str], str) -> EventType
view_route = self._route_matcher.match_route(path)
event = {
'requestContext': {
'httpMethod': method,
'resourcePath': view_route.route,
'identity': {
'sourceIp': self.LOCAL_SOURCE_IP
},
'path': path.split('?')[0],
},
'headers': {k.lower(): v for k, v in headers.items()},
'pathParameters': view_route.captured,
'stageVariables': {},
}
if view_route.query_params:
event['multiValueQueryStringParameters'] = view_route.query_params
else:
# If no query parameters are provided, API gateway maps
# this to None so we're doing this for parity.
event['multiValueQueryStringParameters'] = None
if self._is_binary(headers) and body is not None:
event['body'] = base64.b64encode(body).decode('ascii')
event['isBase64Encoded'] = True
else:
event['body'] = body
return event
class LocalGatewayException(Exception):
CODE = 0
def __init__(self, headers, body=None):
# type: (HeaderType, Optional[bytes]) -> None
self.headers = headers
self.body = body
class InvalidAuthorizerError(LocalGatewayException):
CODE = 500
class ForbiddenError(LocalGatewayException):
CODE = 403
class NotAuthorizedError(LocalGatewayException):
CODE = 401
class LambdaContext(object):
def __init__(self, function_name, memory_size,
max_runtime_ms=3000, time_source=None):
# type: (str, int, int, Optional[Clock]) -> None
if time_source is None:
time_source = Clock()
self._time_source = time_source
self._start_time = self._current_time_millis()
self._max_runtime = max_runtime_ms
# Below are properties that are found on the real LambdaContext passed
# by lambda and their associated documentation.
# Name of the Lambda function that is executing.
self.function_name = function_name
# The Lambda function version that is executing. If an alias is used
|
irzaip/chippy
|
cp_hand.py
|
Python
|
lgpl-3.0
| 1,479
| 0.006085
|
###########################################
# #
# cp_hand.py #
# author: irza pulungan #
# #
# this py will forward incoming MQTT #
# message to Serial USB port arduino #
# loaded with custom sketch #
# #
###########################################
import serial
import paho.mqtt.client as mqtt
ser = serial.Serial("/dev/ttyACM0",9600)
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Conne
|
cted with result code "+str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then
|
subscriptions will be renewed.
client.subscribe("$SYS/#")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
if (msg.topic == "servo"):
print str(msg.payload)
ser.write(str(msg.payload)+"\n\r")
ser.flushInput()
ser.flushOutput()
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("192.168.0.100", 1883, 60)
client.subscribe("servo",0)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
client.loop_forever()
|
ericgarrigues/ansible-modules-extras
|
cloud/gandi/gandi_iface.py
|
Python
|
gpl-3.0
| 7,326
| 0.000546
|
#!/usr/bin/python
# Copyright 2013 Gandi SAS
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gandi_iface
version_added: "2.0"
short_description: create, attach, detach or delete Gandi network interfaces
description:
- Manage Gandi network interfaces
options:
state:
description:
- desired state of the resource
required: false
default: "created"
choices: ["created", "deleted"]
aliases: []
datacenter:
description:
- datacenter location for servers
required: true
choices: ["Saint Denis", "Bissen", "Baltimore"]
bandwith:
description:
- bandwith ot the interface in bits/s (float)
required: false
vlan:
description:
- private vlan name the interface belongs to (str)
required: false
default: null
ip_address:
description:
- CIDR IPv4|IPv6 address ot the interface on the vlan (str)
required: false
default: null
ip_version:
description:
- ip version of the interface (str)
required: false
default: null
requirements: [ "libcloud" ]
author: Eric Garrigues <eric@gandi.net>
'''
EXAMPLES = '''
# Basic provisioning example. Create a new iface on vlan mypvlan
# Luxembourg datacenter
- gandi_iface:
vlan: mypvlan
datacenter: "Bissen"
ip_address: 192.168.0.1
ip_version: 4
bandwidth: 50000.0
'''
import sys
USER_AGENT_PRODUCT = "Ansible-gandi"
USER_AGENT_VERSION = "v0.1"
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.gandi import GandiException
_ = Provider.GANDI
except ImportError:
print("failed=True " +
"msg='libcloud with Gandi support required for this module'")
sys.exit(1)
# Load in the libcloud secrets file
try:
import secrets
except ImportError:
secrets = None
ARGS = getattr(secrets, 'GANDI_PARAMS', ())
if not ARGS:
print("failed=True " +
"msg='Missing Gandi connection in libcloud secrets file.'")
sys.exit(1)
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
# XXX : better error management
return error
def _get_by_name(name, entities):
find = [x for x in entities if x.name == name]
return find[0] if find else None
def _get_by_id(id, entities):
find = [x for x in entities if x.id == id]
return find[0] if find else None
def get_datacenter(driver, name):
"""Get datacenter by name
"""
dcs = driver.list_locations()
return _get_by_name(name, dcs)
def get_pvlan(driver, name):
pvlans = driver.ex_list_pvlans()
return _get_by_name(name, pvlans)
def get_iface(driver, id):
ifaces = driver.ex_list_ifaces()
return _get_by_id(id, ifaces)
def get_iface_info(iface):
"""Retrieves interface information from an interace object and returns it
as a dictionary.
"""
return({
'vlan': not iface.vlan is None and iface.vlan.name or None,
'bandwidth': iface.extra.get('bandwidth'),
'datacenter_id': iface.extra.get('datacenter_id')
})
def create_iface(module, driver):
"""Creates a new pvlan.
module : AnsibleModule object
driver: authenticated libcloud driver on Gandi provider
Returns:
A Dictionary with information about the vlan that was created.
"""
iface = {}
ip_address = module.params.get('ip_address')
ip_version = module.params.get('ip_version')
pvlan_name = module.params.get('vlan')
bandwidth = module.params.get('bandwidth')
datacenter = module.params.get('datacenter')
changed = False
lc_location = get_datacenter(driver, datacenter)
if not lc_location:
module.fail_json(msg='Invalid datacenter %s' % datacenter,
changed=False)
pvlan = get_pvlan(driver, pvlan_name)
# module.fail_json(msg=pvlan, changed=False)
if not pvlan and not ip_version:
module.fail_json(msg='ip_version is mandatory when not a vlan',
changed=False)
try:
iface = driver.ex_create_iface(location=lc_location,
ip_version=ip_version,
ip_address=ip_address,
vlan=pvlan,
bandwitdh=bandwidth)
changed = True
except GandiException as e:
module.fail_json(msg='Unexpected error attempting to create iface')
iface_json_data = get_iface_info(iface)
return (changed, iface_json_data)
def delete_iface(module, driver, iface_id):
"""Delete an interface.
module: Ansible module object
driver: authenticated Gandi connection object
iface_id: int id of the interface
Returns a dictionary of with operation status.
"""
changed = False
pvlan = None
try:
iface = get_iface(driver, iface_id)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if iface:
driver.ex_delete_iface(iface)
changed = True
return (changed, iface_id)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['created', 'deleted'],
default='created'),
datacenter=dict(default='Bissen'),
|
ip_version=dict(),
ip_address=dict(),
vlan=dict(),
bandwidth=dict()
)
)
ip_version = module.params.get('ip_version')
ip_address = module.params.get('ip_address')
vlan_name = module.params.get('vlan')
bandwidth
|
= module.params.get('bandwidth')
state = module.params.get('state')
dc = module.params.get('datacenter')
changed = False
try:
gandi = get_driver(Provider.GANDI)(*ARGS)
gandi.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
if not dc and state in ['created']:
module.fail_json(msg='Must specify a "datacenter"', changed=False)
json_output = {'datacenter': dc}
if state in ['deleted']:
json_output['state'] = 'deleted'
(changed, iface_id) = delete_iface(module, gandi, iface_id)
json_output['iface_id'] = iface_id
elif state in ['created']:
json_output['state'] = 'created'
(changed, iface_data) = create_iface(module, gandi)
json_output['iface_data'] = iface_data
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
from ansible.module_utils.basic import *
main()
|
ericpp/hippyvm
|
hippy/module/date/dateinterval_klass.py
|
Python
|
mit
| 9,931
| 0.003927
|
from rpython.rtyper.lltypesystem import rffi, lltype
from rpython.rlib.rarithmetic import intmask
from hippy import consts
from hippy.error import PHPException
from hippy.builtin import wrap_method, ThisUnwrapper, StringArg
from hippy.builtin_klass import GetterSetterWrapper, k_Exception
from hippy.klass import def_class
from hippy.module.date import timelib
from hippy.objects.instanceobject import W_InstanceObject
class W_DateInterval(W_InstanceObject):
pass
@wrap_method(['interp', ThisUnwrapper(W_DateInterval), StringArg(None)],
name='DateInterval::__construct')
def construct(interp, this, spec):
exc_obj = k_Exception.call_args(
interp, [interp.space.wrap('Unknown or bad format (%s)' % spec)]
)
if not (len(spec) > 1 and spec[0] == 'P'):
raise PHPException(exc_obj)
index = 1
time = False
formats = {'y': 0, 'm': 0, 'd':0, 'h':0, 'i':0 ,'s': 0}
while index < len(spec):
format = None
times = 0
if spec[index] == 'T':
index += 1
time = True
while spec[index].isdigit():
times = times * 10
times = times + (ord(spec[index]) - ord('0'))
index += 1
if times:
if spec[index] == 'Y':
format = 'y'
elif spec[index] == 'M' and not time:
format = 'm'
elif spec[index] == 'D':
format = 'd'
elif spec[index] == 'W':
format = 'd'
times *= 7
elif spec[index] == 'H':
format = 'h'
elif spec[index] == 'M' and time:
format = 'i'
elif spec[index] == 'S':
format = 's'
if not formats[format]:
formats[format] = times
else:
raise PHPException(exc_obj)
index += 1
this.time_diff = timelib.timelib_rel_time_ctor()
this.time_diff.c_y = rffi.cast(lltype.Signed, formats['y'])
this.time_diff.c_m = rffi.cast(lltype.Signed, formats['m'])
this.time_diff.c_d = rffi.cast(lltype.Signed, formats['d'])
this.time_diff.c_h = rffi.cast(lltype.Signed, formats['h'])
this.time_diff.c_i = rffi.cast(lltype.Signed, formats['i'])
this.time_diff.c_s = rffi.cast(lltype.Signed, formats['s'])
@wrap_method(['interp', StringArg(None)],
name='DateInterval::createFromDateString', flags=consts.ACC_STATIC)
def create_from_date_string(interp, string):
spec = "P%sY%sM%sDT%sH%sM%sS" % timelib.str_interval_to_time(string)
return k_DateInterval.call_args(interp, [interp.space.wrap(spec)])
@wrap_method(['interp', ThisUnwrapper(W_DateInterval), StringArg(None)], name='DateInterval::format')
def format(interp, this, format):
y = this.time_diff.c_y
m = this.time_diff.c_m
d = this.time_diff.c_d
h = this.time_diff.c_h
i = this.time_diff.c_i
s = this.time_diff.c_s
index = 0
results = []
while index < len(format):
c = format[index]
if c == '%':
index += 1
next_c = format[index]
if next_c == 'Y':
results.append(timelib.format_to(2, y))
elif next_c == 'y':
results.append("%d" % y)
elif next_c == 'M':
results.append(timelib.format_to(2, m))
elif next_c == 'm':
results.append("%d" % m)
elif next_c == 'D':
results.append(timelib.format_to(2, d))
elif next_c == 'd':
results.append("%d" % d)
elif next_c == 'H':
results.append(timelib.format_to(2, h))
elif next_c == 'h':
results.append("%d" % h)
elif next_c == 'I':
results.append(timelib.format_to(2, i))
elif next_c == 'i':
results.append("%d" % i)
elif next_c == 'S':
results.append(timelib.format_to(2, s))
elif next_c == 's':
results.append("%d" % s)
elif next_c == 'a':
if this.time_diff.c_d != -99999:
results.append("%d" % this.time_diff.c_days)
else:
results.append("(unknown)")
elif next_c == 'r':
results.append("-" if int(this.time_diff.c_invert) else "")
elif next_c == 'R':
results.append("-" if int(this.time_diff.c_invert) else "+")
elif next_c == '%':
results.append('%')
else:
results.append("%%%s" % next_c)
else:
results.append(c)
index += 1
return interp.space.wrap("".join(results))
def get_y(interp, this):
return interp.space.wrap(this.time_diff.c_y)
def set_y(interp, this, w_newvalue):
this.time_diff.c_y = interp.space.int_w(w_newvalue)
def get_m(interp, this):
return interp.space.wrap(this.time_diff.c_m)
def set_m(interp, this, w_newvalue):
this.time_diff.c_m = interp.space.int_w(w_newvalue)
def get_d(interp, this):
return interp.space.wrap(this.time_diff.c_d)
def set_d(interp, this, w_newvalue):
this.time_diff.c_d = interp.space.int_w(w_newvalue)
def get_h(interp, this):
return interp.space.wrap(this.time_diff.c_h)
def set_h(interp, this, w_newvalue):
this.time_diff.c_h = interp.space.int_w(w_newvalue)
def get_i(interp, this):
return interp.space.wrap(this.time_diff.c_i)
def set_i(interp, this, w_newvalue):
this.time_diff.c_i = interp.space.int_w(w_newvalue)
def get_s(interp, this):
return interp.space.wrap(this.time_diff.c_s)
def set_s(interp, this, w_newvalue):
this.time_diff.c_s = interp.space.int_w(w_newvalue)
def get_invert(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_invert))
def set_invert(interp, this, w_newvalue):
this.time_diff.c_invert = rffi.cast(rffi.INT, interp.space.int_w(w_newvalue))
def get_days(interp, this):
return interp.space.wrap(this.time_diff.c_days or False)
def set_days(interp, this, w_newvalue):
this.time_diff.c_days = interp.space.int_w(w_newvalue)
def get_weekday(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_weekday))
def set_weekday(interp, this, value):
raise NotImplementedError("bogus cast!")
this.time_diff.c_weekday = rffi.cast(rffi.INT, value)
def get_weekday_behavior(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_weekday_behavior))
def set_weekday_behavior(interp, this, value):
raise NotImplementedError("bogus cast!")
this.time_diff.c_weekday_behavior = rffi.cast(rffi.INT, value)
def get_first_last_day_of(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_first_last_day_of))
def set_first_last_day_of(interp, this, value):
raise NotImplementedError("bogus cast!")
this.time_diff.c_first_last_day_of = rffi.cast(rffi.INT, value)
def get_special_type(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_special.c_type))
def set_special_type(interp, this, value):
raise NotImplementedError("bogus cast!")
this.time_diff.c_special.c_type = rffi.cast(rffi.UINT, value)
def get_special_amount(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_special.c_amount))
def set_special_amount(interp, this, value):
raise NotImplementedError("bogus cast!")
this.time_diff.c_special.c_amount = rffi.cast(lltype.Signed, value)
def get_have_weekday_relative(interp
|
, this):
return interp.space.wrap(intmask(this.time_diff.c_have_weekday_relative))
def set_have_weekday_relative(interp, this, value):
raise NotImplementedError("bogus cast!")
this.time_diff.c_have_weekday_relative = rffi.cast(rffi.UINT, value)
def get_have_special_relative(interp, this):
return interp.space.wrap(intmask(this.time_diff.c_have_special_relative))
def set_have_special_relative(interp, this, value):
raise NotImplementedError("bogus cast!")
this.time_diff.
|
c_have_special_relative = rffi.cast(rffi.UINT, value)
k_DateInterval = def_class(
'DateInterval',
[const
|
lmjohns3/theanets
|
theanets/graph.py
|
Python
|
mit
| 28,912
| 0.001349
|
# -*- coding: utf-8 -*-
r'''This module contains a base class for modeling computation graphs.'''
import downhill
import gzip
import hashlib
import numpy as np
import pickle
import theano
import time
import warnings
from . import layers
from . import losses
from . import regularizers
from . import trainer
from . import util
class Network(object):
'''The network class encapsulates a network computation graph.
Notes
-----
Computation graphs are organized into :ref:`layers <layers>`. Each layer
receives one or more arrays of input data, transforms them, and generates
one or more arrays of output data.
Outputs in a computation graph are named according to their layer and output
type, so the 'pre' output of a layer named 'hid1' would be named 'hid1:pre'.
The 'out' output is the default output for a layer. By default the last
layer in a network is named 'out'.
The parameters in a network graph are optimized by minimizing a :ref:`loss
function <losses>` with respect to some set of training data. Typically the
value produced by 'out:out' is compared to some target value, creating an
error value of some sort. This error value is then propagated back through
the computation graph to update the parameters in the model.
Parameters
----------
layers : int, tuple, dict, or :class:`Layer <theanets.layers.base.Layer>`
A sequence of values specifying the layer configuration for the network.
For more information, please see :ref:`guide-creating-specifying-layers`.
loss : str or :class:`Loss <theanets.losses.Loss>`
The name of a loss function to optimize when training this network
model.
weighted : bool, optional
If True, optimize this model using a "weighted" loss. Weighted losses
typically require an additional array as input during optimization.
For more information, see :ref:`losses-weighted`. Defaults to False.
rng : int or RandomState, optional
A seed or numpy ``RandomState`` instance for generating randomness in
the model. Defaults to 13.
Attributes
----------
layers : list of :class:`Layer <theanets.layers.base.Layer>`
A list of the layers in this network model.
losses : list of :class:`Loss <theanets.losses.Loss>`
A list of losses to be computed when optimizing this network model.
'''
DEFAULT_OUTPUT_ACTIVATION = 'linear'
'''Default activation for the output layer.'''
INPUT_NDIM = 2
'''Number of dimensions for holding input data arrays.'''
OUTPUT_NDIM = 2
'''Number of dimensions for holding output data arrays.'''
def __init__(self, layers=(), loss='mse', weighted=False, rng=13):
self._graphs = {} # cache of symbolic computation graphs
self._functions = {} # cache of callable feedforward functions
self._rng = rng
# create layers based on specs provided in the constructor.
self.layers = []
for i, layer in enumerate(layers):
first = i == 0
last = i == len(layers) - 1
name = 'in' if first else 'out' if last else 'hid{}'.format(i)
activation = self.DEFAULT_OUTPUT_ACTIVATION if last else 'relu'
self.add_layer(layer=layer, name=name, activation=activation)
# bind layers to this graph after construction. this finalizes layer
# shapes and does other consistency checks based on the entire graph.
[l.bind(self) for l in self.layers]
# create a default loss (usually).
self.losses = []
if loss and self.layers:
self.set_loss(loss,
weighted=weighted,
target=self.OUTPUT_NDIM,
output_name=self.layers[-1].output_name)
def add_layer(self, layer=None, **kwargs):
'''Add a :ref:`layer <layers>` to our network graph.
Parameters
----------
layer : int, tuple, dict, or :class:`Layer <theanets.layers.base.Layer>`
A value specifying the layer to add. For more information, please
see :ref:`guide-creating-specifying-layers`.
'''
# if the given layer is a Layer instance, just add it and move on.
if isinstance(layer, layers.Layer):
self.layers.append(layer)
return
form = kwargs.pop('form', 'ff' if self.layers else 'input').lower()
if isinstance(layer, util.basestring):
if not layers.Layer.is_registered(layer):
raise util.ConfigurationError('unknown layer type: {}'.format(layer))
form = layer
layer = None
# if layer is a tuple/list of integers, assume it's a shape.
if isinstance(layer, (tuple, list)) and all(isinstance(x, int) for x in layer):
kwargs['shape'] = tuple(layer)
layer = None
# if layer is some other tuple/list, assume it's a list of:
# - the name of a layers.Layer class (str)
# - the name of an activation function (str)
# - the number of units in the layer (int)
if isinstance(layer, (tuple, list)):
for el in layer:
if isinstance(el, util.basestring) and layers.Layer.is_registered(el):
form = el
elif isinstance(el, util.basestring):
kwargs['activation'] = el
elif isinstance(el, int):
if 'size' in kwargs:
raise util.ConfigurationError(
'duplicate layer sizes! {}'.format(kwargs))
kwargs['size'] = el
layer = None
# if layer is a dictionary, try to extract a form for the layer, and
# override our default keyword arguments with the rest.
if isinstance(layer, dict):
for key, value in layer.items():
if key == 'form':
form = value.lower()
else:
kwargs[key] = value
layer = None
# if neither shape nor size have been specified y
|
et, check that the
# "layer" param is an int and use it for "size".
if 'shape' not in kwargs and 'size' not in kwargs and isinstance(layer, int):
kwargs['size'] = layer
# if it hasn't been provided in some other way yet, set input
# dimensionality based on the model.
|
if form == 'input' and 'shape' not in kwargs:
kwargs.setdefault('ndim', self.INPUT_NDIM)
# set some default layer parameters.
if form != 'input':
kwargs.setdefault('inputs', self.layers[-1].output_name)
kwargs.setdefault('rng', self._rng)
if form.lower() == 'tied' and 'partner' not in kwargs:
# we look backward through our list of layers for a partner.
# any "tied" layer that we find increases a counter by one,
# and any "untied" layer decreases the counter by one. our
# partner is the first layer we find with count zero.
#
# this is intended to handle the hopefully common case of a
# (possibly deep) tied-weights autoencoder.
tied = 1
partner = None
for l in self.layers[::-1]:
tied += 1 if isinstance(l, layers.Tied) else -1
if tied == 0:
partner = l.name
break
else:
raise util.ConfigurationError(
'cannot find partner for "{}"'.format(kwargs))
kwargs['partner'] = partner
layer = layers.Layer.build(form, **kwargs)
# check that graph inputs have unique names.
if isinstance(layer, layers.Input):
if any(layer.name == i.name for i in self.inputs):
raise util.ConfigurationError(
'"{}": duplicate input name!'.format(layer.name))
self.layers.append(layer)
def add_loss(self, loss=None, **kwargs):
'''Add a :ref:`loss function <losses>` to the model.
Parameters
|
Azure/azure-sdk-for-python
|
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_12_01_preview/aio/_container_registry_management_client.py
|
Python
|
mit
| 6,266
| 0.003671
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Awaitable, Optional, TYPE_CHECKING
from azure.core.rest import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from .. import models
from ._configuration import ContainerRegistryManagementClientConfiguration
from .operations import ExportPipelinesOperations, ImportPipelinesOperations, Operations, PipelineRunsOperations, PrivateEndpointConnectionsOperations, RegistriesOperations, ReplicationsOperations, WebhooksOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class ContainerRegistryManagementClient:
"""ContainerRegistryManagementClient.
:ivar export_pipelines: ExportPipelinesOperations operations
:vartype export_pipelines:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.ExportPipelinesOperations
:ivar registries: RegistriesOperations operations
:vartype registries:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.RegistriesOperations
:ivar import_pipelines: ImportPipelinesOperations operations
:vartype import_pipelines:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.ImportPipelinesOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.Operations
:ivar pipeline_runs: PipelineRunsOperations operations
:vartype pipeline_runs:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.PipelineRunsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.PrivateEndpointConnectionsOperations
:ivar replications: ReplicationsOperations operations
:vartype replications:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.ReplicationsOperations
:ivar webhooks: WebhooksOperations operations
:vartype webhooks:
azure.mgmt.containerregistry.v2019_12_01_preview.aio.operations.WebhooksOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The Microsoft Azure subscription ID.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ContainerRegistryManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v
|
for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.export_pipelines = ExportPipelinesOperations(self._client, self._config, se
|
lf._serialize, self._deserialize)
self.registries = RegistriesOperations(self._client, self._config, self._serialize, self._deserialize)
self.import_pipelines = ImportPipelinesOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.pipeline_runs = PipelineRunsOperations(self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.replications = ReplicationsOperations(self._client, self._config, self._serialize, self._deserialize)
self.webhooks = WebhooksOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request: HttpRequest,
**kwargs: Any
) -> Awaitable[AsyncHttpResponse]:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = await client._send_request(request)
<AsyncHttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.AsyncHttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "ContainerRegistryManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
rsnakamura/oldape
|
apetools/devices/iosdevice.py
|
Python
|
apache-2.0
| 3,060
| 0.003922
|
#from apetools.commands.ifconfig import IfconfigCommand
#from apetools.commands.iwconfig import Iwconfig
from apetools.commons.enumerations import OperatingSystem
from apetools.devices.basedevice import BaseDevice
from apetools.commons.errors import ConfigurationError
class IosDevice(BaseDevice):
"""
A class to query ios devices (pretty much nothing is implemented on the ipad).
* this is mostly a dummy to hold settings
"""
def __init__(self, *args, **kwargs):
"""
:param:
- `connection`: a connection to the device
- `interface`: the name of the test interface (to get the ip address)
"""
super(IosDevice, self).__init__(*args, **kwargs)
return
@property
def address(self):
"""
:return: the address of the device
:raise: ConfigurationError if not set by user
"""
if self._address is None:
raise ConfigurationError("'test_address' must be set in config for IOS devices")
return self._address
@property
def mac_address(self):
"""
Not implemented
:return: the MAC address of the device
"""
self.logger.warning('mac address query not implemented')
return 'NA'
@property
def bssid(self):
"""
Not implemented
"""
self.logger.warning('bssid query not implemented')
return 'NA'
@property
def ssid(self):
"""
Not implemented
"""
self.logger.warning('ssid query not implemented')
return 'NA'
@property
def noise(self):
"""
Not Implemented
"""
self.logger.warning('noise query not implemented')
return 'NA'
@property
def channel(self):
"""
Not implemented
"""
self.logger.warning('channel not implemented')
return "NA"
@property
def rssi(self):
|
"""
Not implemented
:return: rssi from the wifi_query
"""
self.logger.warning('rssi query not implemented')
return "NA"
@property
def bitrate(self):
"""
Not implemented
:return: NA
"""
self.logger.warning("bitrate query not implemented")
retu
|
rn "NA"
def disable_wifi(self):
"""
Not implemented
"""
self.logger.warning('disable wifi not implemented')
return
def enable_wifi(self):
"""
Not implemented
"""
self.logger.warning('enable wifi not implemented')
return
def log(self, message):
"""
Sends the message to the syslog (Not implemented)
:param:
- `message`: a string to send to the syslog
:postcondition: message sent to the syslog
"""
# This uses the call interface because the connection has its own logger property
self.logger.warning('log not implemented')
return
# end IosDevice
|
apophys/freeipa
|
ipaserver/install/server/install.py
|
Python
|
gpl-3.0
| 44,536
| 0.000135
|
#
# Copyright (C) 2015 FreeIPA Contributors see COPYING for license
#
from __future__ import print_function
import errno
import logging
import os
import pickle
import shutil
import sys
import tempfile
import textwrap
import six
from ipalib.install import certmonger, sysrestore
from ipapython import ipautil
from ipapython.ipautil import (
format_netloc, ipa_generate_password, run, user_input)
from ipapython.admintool import ScriptError
from ipaplatform import services
from ipaplatform.paths import paths
from ipaplatform.tasks import tasks
from ipalib import api, errors, x509
from ipalib.constants import DOMAIN_LEVEL_0
from ipalib.util import (
validate_domain_name,
no_matching_interface_for_ip_address_warning,
)
import ipaclient.install.ntpconf
from ipaserver.install import (
adtrust, bindinstance, ca, dns, dsinstance,
httpinstance, installutils, kra, krbinstance,
ntpinstance, otpdinstance, custodiainstance, replication, service,
sysupgrade)
from ipaserver.install.installutils import (
IPA_MODULES, BadHostError, get_fqdn, get_server_ip_address,
is_ipa_configured, load_pkcs12, read_password, verify_fqdn,
update_hosts_file)
if six.PY3:
unicode = str
try:
from ipaserver.install import adtrustinstance
_server_trust_ad_installed = True
except ImportError:
_server_trust_ad_installed = False
NoneType = type(None)
logger = logging.getLogger(__name__)
SYSRESTORE_DIR_PATH = paths.SYSRESTORE
def validate_dm_password(password):
if len(password) < 8:
raise ValueError("Password must be at least 8 characters long")
if any(ord(c) < 0x20 for c in password):
raise ValueError("Password must not contain control characters")
if any(ord(c) >= 0x7F for c in password):
raise ValueError("Password must only contain ASCII characters")
# Disallow characters that pkisilent doesn't process properly:
bad_characters = '\\'
if any(c in bad_characters for c in password):
raise ValueError('Password must not contain these characters: %s' %
', '.join('"%s"' % c for c in bad_characters))
# TODO: Check https://fedorahosted.org/389/ticket/47849
# Actual behavior of setup-ds.pl is that it does not accept white
# space characters in password when called interactively but does when
# provided such password in INF file. But it ignores leading and trailing
# white spaces in INF file.
# Disallow leading/trailing whaitespaces
if password.strip() != password:
raise ValueError('Password must not start or end with whitespace.')
def validate_admin_password(password):
if len(password) < 8:
raise ValueError("Password must be at least 8 characters long")
if any(ord(c) < 0x20 for c in password):
raise ValueError("Password must not contain control characters")
if any(ord(c) >= 0x7F for c in password):
raise ValueError("Password must only contain ASCII characters")
# Disallow characters that pkisilent doesn't process properly:
bad_characters = '\\'
if any(c in bad_characters for c in password):
raise ValueError('Password must not contain these characters: %s' %
', '.join('"%s"' % c for c in bad_characters))
def read_cache(dm_password):
"""
Returns a dict of cached answers or empty dict if no cache file exists.
"""
if not os.path.isfile(paths.ROOT_IPA_CACHE):
return {}
top_dir = tempfile.mkdtemp("ipa")
fname = "%s/cache" % top_dir
try:
installutils.decrypt_file(paths.ROOT_IPA_CACHE,
fname,
dm_password,
top_dir)
except Exception as e:
shutil.rmtree(top_dir)
raise Exception("Decryption of answer cache in %s failed, please "
"check your password." % paths.ROOT_IPA_CACHE)
try:
with open(fname, 'rb') as f:
try:
optdict = pickle.load(f)
except Exception as e:
raise Exception("Parse error in %s: %s" %
(paths.ROOT_IPA_CACHE, str(e)))
except IOError as e:
raise Exception("Read error in %s: %s" %
(paths.ROOT_IPA_CACHE, str(e)))
finally:
shutil.rmtree(top_dir)
# These are the only ones that may be overridden
try:
del optdict['external_cert_files']
except KeyError:
pass
return optdict
def write_cache(options):
"""
Takes a dict as input and writes a cached file of answers
"""
top_dir = tempfile.mkdtemp("ipa")
fname = "%s/cache" % top_dir
try:
with open(fname, 'wb') as f:
pickle.dump(options, f)
installutils.encrypt_file(fname,
paths.ROOT_IPA_CACHE,
options['dm_password'],
top_dir)
except IOError as e:
raise Exception("Unable to cache command-line options %s" % str(e))
finally:
shutil.rmtree(top_dir)
def read_host_name(host_default, no_host_dns=False):
print("Enter the fully qualified domain name of the computer")
print("on which you're setting up server software. Using the form")
print("<hostname>.<domainname>")
print("Example: master.example.com.")
print("")
print("")
if host_default == "":
host_default = "master.example.com"
host_name = user_input("Server host name", host_default, allow_empty=False)
print("")
verify_fqdn(host_name, no_host_dns)
return host_name
def read_domain_name(domain_name, unattended):
print("The domain name has been determined based on the host name.")
print("")
if not unattended:
domain_name = str(user_input("Please confirm the domain name",
domain_name))
print("")
return domain_name
def read_realm_name(domain_name, unattended):
print("The kerberos protocol requires a Realm name to be defined.")
print("This is typically the domain name converted to uppercase.")
print("")
if unattended:
return domain_name.upper()
realm_name = str(user_input("Please provide a realm name",
domain_name.upper()))
upper_dom = realm_name.upper()
if upper_dom != realm_name:
print("An upper-case realm name is required.")
if not user_input("Do you want to use " + upper_dom +
" as realm name?", True):
raise ScriptError(
"An upper-case realm name is required. Unable to continue.")
else:
realm_name = upper_dom
print("")
return realm_name
def read_dm_password():
print("Certain directory server operations require an administrative user.")
print("This user is referred to as the Directory Manager and has full "
"access")
print("to the Directory for system management tasks and will be added to "
"the")
print("instance of directory server created for IPA.")
print("The password must be at least 8 characters long.")
print("")
# TODO: provide the op
|
tion of generating a random password
dm_password = read_password("Directory Ma
|
nager",
validator=validate_dm_password)
return dm_password
def read_admin_password():
print("The IPA server requires an administrative user, named 'admin'.")
print("This user is a regular system account used for IPA server "
"administration.")
print("")
# TODO: provide the option of generating a random password
admin_password = read_password("IPA admin",
validator=validate_admin_password)
return admin_password
def check_dirsrv(unattended):
(ds_unsecure, ds_secure) = dsinstance.check_ports()
if not ds_unsecure or not ds_secure:
msg = ("IPA requires ports 389 and 636 for the Directory Server.\n"
"These are currently in use:\n")
if not ds_unsecure:
msg += "\t389\n"
if
|
TaiSakuma/scribblers
|
tests/compare_obj.py
|
Python
|
bsd-3-clause
| 1,134
| 0.003527
|
# Tai Sakuma <tai.sakuma@gmail.com>
import numbers
##__________________________________________________________________||
def cmp_obj_list_almost_equal(list1, list2, rtol=1e-05, atol=1e-08):
if not len(list1) == len(list2):
return False
for obj1, obj2 in zip(list1, list2):
if not cmp_obj_almost_equal(obj1, obj2, rtol=rtol, atol=atol):
return False
return True
def cmp_obj_almost_equal(obj1, obj2, rtol=1e-05, atol=1e-08):
# use the same formula as in
# https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.isclose.html
attrs1 = list(obj1._attrdict.keys())
attrs2 = list(obj2._att
|
rdict.keys())
if not attrs1 == attrs2:
return False
for attr in attrs2:
v1 = getattr(obj1, attr)
v2 = getattr(obj2, attr)
if v1 == v2:
continue
if isinstance(v2, number
|
s.Integral):
return False
if isinstance(v2, numbers.Real):
if abs(v1 - v2) > (atol + rtol * abs(v2)):
return False
return True
##__________________________________________________________________||
|
edisonlz/fruit
|
web_project/base/site-packages/gridfs/__init__.py
|
Python
|
apache-2.0
| 11,413
| 0
|
# Copyright 2009-2012 10gen, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "Li
|
cense");
# you may not use this file except in compliance with the License.
# You may obtain a copy of t
|
he License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GridFS is a specification for storing large objects in Mongo.
The :mod:`gridfs` package is an implementation of GridFS on top of
:mod:`pymongo`, exposing a file-like interface.
.. mongodoc:: gridfs
"""
from gridfs.errors import (NoFile,
UnsupportedAPI)
from gridfs.grid_file import (GridIn,
GridOut)
from pymongo import (ASCENDING,
DESCENDING)
from pymongo.database import Database
class GridFS(object):
"""An instance of GridFS on top of a single Database.
"""
def __init__(self, database, collection="fs"):
"""Create a new instance of :class:`GridFS`.
Raises :class:`TypeError` if `database` is not an instance of
:class:`~pymongo.database.Database`.
:Parameters:
- `database`: database to use
- `collection` (optional): root collection to use
.. versionadded:: 1.6
The `collection` parameter.
.. mongodoc:: gridfs
"""
if not isinstance(database, Database):
raise TypeError("database must be an instance of Database")
self.__database = database
self.__collection = database[collection]
self.__files = self.__collection.files
self.__chunks = self.__collection.chunks
connection = database.connection
if not hasattr(connection, 'is_primary') or connection.is_primary:
self.__chunks.ensure_index([("files_id", ASCENDING),
("n", ASCENDING)],
unique=True)
def new_file(self, **kwargs):
"""Create a new file in GridFS.
Returns a new :class:`~gridfs.grid_file.GridIn` instance to
which data can be written. Any keyword arguments will be
passed through to :meth:`~gridfs.grid_file.GridIn`.
If the ``"_id"`` of the file is manually specified, it must
not already exist in GridFS. Otherwise
:class:`~gridfs.errors.FileExists` is raised.
:Parameters:
- `**kwargs` (optional): keyword arguments for file creation
.. versionadded:: 1.6
"""
return GridIn(self.__collection, **kwargs)
def put(self, data, **kwargs):
"""Put data in GridFS as a new file.
Equivalent to doing::
try:
f = new_file(**kwargs)
f.write(data)
finally
f.close()
`data` can be either an instance of :class:`str` (:class:`bytes`
in python 3) or a file-like object providing a :meth:`read` method.
If an `encoding` keyword argument is passed, `data` can also be a
:class:`unicode` (:class:`str` in python 3) instance, which will
be encoded as `encoding` before being written. Any keyword arguments
will be passed through to the created file - see
:meth:`~gridfs.grid_file.GridIn` for possible arguments. Returns the
``"_id"`` of the created file.
If the ``"_id"`` of the file is manually specified, it must
not already exist in GridFS. Otherwise
:class:`~gridfs.errors.FileExists` is raised.
:Parameters:
- `data`: data to be written as a file.
- `**kwargs` (optional): keyword arguments for file creation
.. versionadded:: 1.9
The ability to write :class:`unicode`, if an `encoding` has
been specified as a keyword argument.
.. versionadded:: 1.6
"""
grid_file = GridIn(self.__collection, **kwargs)
# Start a request - necessary if w=0, harmless otherwise
request = self.__collection.database.connection.start_request()
try:
try:
grid_file.write(data)
finally:
grid_file.close()
finally:
# Ensure request is ended even if close() throws error
request.end()
return grid_file._id
def get(self, file_id):
"""Get a file from GridFS by ``"_id"``.
Returns an instance of :class:`~gridfs.grid_file.GridOut`,
which provides a file-like interface for reading.
:Parameters:
- `file_id`: ``"_id"`` of the file to get
.. versionadded:: 1.6
"""
return GridOut(self.__collection, file_id)
def get_version(self, filename=None, version=-1, **kwargs):
"""Get a file from GridFS by ``"filename"`` or metadata fields.
Returns a version of the file in GridFS whose filename matches
`filename` and whose metadata fields match the supplied keyword
arguments, as an instance of :class:`~gridfs.grid_file.GridOut`.
Version numbering is a convenience atop the GridFS API provided
by MongoDB. If more than one file matches the query (either by
`filename` alone, by metadata fields, or by a combination of
both), then version ``-1`` will be the most recently uploaded
matching file, ``-2`` the second most recently
uploaded, etc. Version ``0`` will be the first version
uploaded, ``1`` the second version, etc. So if three versions
have been uploaded, then version ``0`` is the same as version
``-3``, version ``1`` is the same as version ``-2``, and
version ``2`` is the same as version ``-1``.
Raises :class:`~gridfs.errors.NoFile` if no such version of
that file exists.
An index on ``{filename: 1, uploadDate: -1}`` will
automatically be created when this method is called the first
time.
:Parameters:
- `filename`: ``"filename"`` of the file to get, or `None`
- `version` (optional): version of the file to get (defaults
to -1, the most recent version uploaded)
- `**kwargs` (optional): find files by custom metadata.
.. versionchanged:: 1.11
`filename` defaults to None;
.. versionadded:: 1.11
Accept keyword arguments to find files by custom metadata.
.. versionadded:: 1.9
"""
connection = self.__database.connection
if not hasattr(connection, 'is_primary') or connection.is_primary:
self.__files.ensure_index([("filename", ASCENDING),
("uploadDate", DESCENDING)])
query = kwargs
if filename is not None:
query["filename"] = filename
cursor = self.__files.find(query)
if version < 0:
skip = abs(version) - 1
cursor.limit(-1).skip(skip).sort("uploadDate", DESCENDING)
else:
cursor.limit(-1).skip(version).sort("uploadDate", ASCENDING)
try:
grid_file = cursor.next()
return GridOut(self.__collection, file_document=grid_file)
except StopIteration:
raise NoFile("no version %d for filename %r" % (version, filename))
def get_last_version(self, filename=None, **kwargs):
"""Get the most recent version of a file in GridFS by ``"filename"``
or metadata fields.
Equivalent to calling :meth:`get_version` with the default
`version` (``-1``).
:Parameters:
- `filename`: ``"filename"`` of the file to get, or `None`
- `**kwargs` (optional): find files by custom metadata.
.. versionchanged:: 1.11
`filename` defaults to None;
.. versionadded:: 1.11
Accept keyword arguments to find files by custom metadata. See
:meth:`get_version`.
.. versionadded:: 1.6
|
xuweiliang/Codelibrary
|
openstack_dashboard/dashboards/admin/overview/views.py
|
Python
|
apache-2.0
| 3,126
| 0.00032
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.template.defaultfilters import floatformat # noqa
from django.utils import translation
from django.utils.translation import ugettext_lazy as _
from horizon import views
from horizon import exceptions
from horizon.utils import csvbase
from oslo_log import log as logging
from openstack_dashboard import api
from openstack_dashboard import usage
LOG = logging.getLogger(__name__)
class GlobalUsageCsvRenderer(csvbase.BaseCsvResponse):
columns = [_("Project Name"), _("VCPUs"), _("RAM (MB)"),
_("Disk (GB)"), _("Usage (Hours)")]
def get_row_data(self):
for u in self.context['usage'].usage_list:
yield (u.project_name or u.tenant_id,
u.vcpus,
u.memory_mb,
u.local_gb,
floatformat(u.vcpu_hours, 2))
class GlobalOverview(usage.UsageView):
table_class = usage.GlobalUsageTable
usage_class = usage.GlobalUsage
template_name = 'admin/overview/usage.html'
csv_response_class = GlobalUsageCsvRenderer
def get_context_data(self, **kwargs):
context = super(GlobalOverview, self).get_context_data(**kwargs)
# #context['monitoring'] = getattr(settings, 'EXTERNAL_MONITORING', [])
# LOG.info("usage =================== %s" % context['usage'].__dict__)
LOG.info("aaaaaaaaaaaaaaaaaaaaaaa =================== %s" % context)
return context
# def get_data(self):
# data = super(GlobalOverview, self).get_data()
# # Pre-fill project names
# try:
# projects, has_more = api.keystone.tenant_list(self.request)
# except Exception:
# projects = []
# exceptions.handle(self.request,
# _('Unable to
|
retrieve project list.'))
# for instance in data:
# project = [t for t in projects if t.id == instance.tena
|
nt_id]
# # If we could not get the project name, show the tenant_id with
# # a 'Deleted' identifier instead.
# if project:
# instance.project_name = getattr(project[0], "name", None)
# else:
# deleted = _("Deleted")
# instance.project_name = translation.string_concat(
# instance.tenant_id, " (", deleted, ")")
# return data
|
fcurella/cookiejar
|
tests/test_client.py
|
Python
|
mit
| 2,942
| 0.004079
|
import os
from unittest import TestCase
from cookiejar.client import CookiejarClient
class Cli
|
entTests(TestCase):
maxDiff = None
def test_pagination(self):
index = os.path.join((os.path.dirname(os.path.abspath(__file__))), 'index.1.json')
client = CookiejarClient(index=index)
expected = [
{
u'id': 1,
u'name': u'audreyr/pypackage',
u'url': u'https://github.com/audreyr/cookiecutter-pypackage/archive/fe165c5242cc889db0c58476abde905cecf14df
|
a.zip',
u'version': u'0.0.1',
u'author': u'Audrey Roy',
u'description': u'Cookiecutter template for a Python package.',
u'checksum': "md5$a79cc0ef3897d14eeb3b5be6a37a5ff8",
u'user': u'audreyr',
},
{
u'id': 2,
u'name': u'sloria/flask',
u'url': u'https://github.com/sloria/cookiecutter-flask/archive/97e835461d31c00e9f16ac79ef3af9aeb13ae84a.zip',
u'version': u'0.0.1',
u'author': u'Steven Loria',
u'description': u'A flask template with Twitter Bootstrap 3, starter templates, and basic registration/authentication.',
u'checksum': "md5$72aa94d5768756231c66d8ce03ca51cc",
u'user': u'sloria',
},
{
u'id': 3,
u'name': u'pydanny/django',
u'url': u'https://github.com/pydanny/cookiecutter-django/archive/172036f8f34b82c29bdc0bb3f31f5b703d0ce8f8.zip',
u'version': u'0.0.1',
u'author': u'Daniel Greenfeld',
u'description': u'A cookiecutter template for creating reusable Django projects quickly.',
u'checksum': "md5$874ce3c00faabde6a11fb3c9d3909649",
u'user': u'pydanny',
}
]
results = client.filter()
res = list(results)
self.assertEqual(len(res), len(expected))
self.assertEqual(res, expected)
def test_get(self):
index = os.path.join((os.path.dirname(os.path.abspath(__file__))), 'index.1.json')
client = CookiejarClient(index=index)
expected = {
u'id': 2,
u'name': u'sloria/flask',
u'url': u'https://github.com/sloria/cookiecutter-flask/archive/97e835461d31c00e9f16ac79ef3af9aeb13ae84a.zip',
u'version': u'0.0.1',
u'author': u'Steven Loria',
u'description': u'A flask template with Twitter Bootstrap 3, starter templates, and basic registration/authentication.',
u'checksum': "md5$72aa94d5768756231c66d8ce03ca51cc",
u'user': u'sloria',
}
client.fetch()
result = client.get('sloria/flask')
self.assertEqual(result, expected)
self.assertRaises(RuntimeError, client.get, 'unexisting_tmeplate')
|
SalesforceEng/Providence
|
providence.py
|
Python
|
bsd-3-clause
| 10,230
| 0.006158
|
'''
Copyright (c) 2015, Salesforce.com, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of Salesforce.com nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
#-- Load libraries
import datetime
import imp
import os, sys
import getpass
import json
import pytz
import logging
import argparse
from uuid import uuid4
from apscheduler.scheduler import Scheduler
from alerts import Alert
from Empire.creds import CredentialManager
#-- Load configuration
#-- This method may change in the future
import config
#-- remember command line settings
import settings
def set_global_config():
configuration = config.Configuration('config.json')
config.providence_configuration = configuration
return configuration
def set_logging_from_configuration(configuration):
#-- Setup Logging
logging.basicConfig(filename=configuration.get(('logging','filename')),
filemode='w',
level=logging.DEBUG if configuration.get(('logging','loglevel')) == 'debug' else logging.INFO,
format=configuration.get(('logging','stringfmt')),
datefmt=configuration.get(('logging','datefmt')))
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter(configuration.get(('logging','formatter')))
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
if __name__ == "__main__":
configuration = set_global_config()
set_logging_from_configuration(configuration)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(description='Providence Monitor Framework')
parser.add_argument('--tests','-t', help="run plugin tests", action='store_true')
parser.add_argument('--mode', help="specify production for production mode, or anything otherwise. Database will be reset if not in production, and providence will start from the current commit")
parser.add_argument('--p4change', help="specify the p4 change number to debug. Must not be in production mode")
parser.add_argument('--timestamp', help="timestamp in PST to pull commits from, in the format YYYY-MM-DD HH:MM:SS")
args = parser.parse_args()
settings.init(args.mode, args.p4change)
#-- Basic Credentials setup
credentials_file = configuration.get('credentials_file')
credential_key = os.environ.get('CREDENTIAL_KEY')
if credential_key is None:
credential_key = getpass.getpass('Credential Key:')
credential_manager = CredentialManager(credentials_file, credential_key)
config.credential_manager = credential_manager
from models import Base
from db import engine
#-- reset db if not in production or timestamp specified
if not settings.in_production() or args.timestamp:
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
from repos import repotracker
from plugins import RepoWatcher, Plugins
from plugins.base import PluginTestError
#-- Load plugins
loaded_plugins = Plugins(configuration)
if args.tests:
print "======================= Loading plugins ======================="
plugins = loaded_plugins.enabled_plugins()
print "======================= Running Plugin Tests ======================="
for plugin_area in plugins:
for plugin in plugins[plugin_area]:
print "Running test for ", plugin.__module__
try:
plugin.test()
except PluginTestError, pte:
print "Test Failed for ", plugin.__module__
print pte.message
sys.exit(1)
print "======================= Tests Successful ======================="
sys.exit(0)
def run_watchers(startTime=None):
# run watcher plugins
logger.info("Running watchers")
plugins = loaded_plugins.enabled_plugins()
repositories = loaded_plugins.get_repositories(plugins["repositories"])
watchers = loaded_plugins.get_watchers(plugins["watchers"])
tracker = repotracker.RepoTracker();
|
for repository_na
|
me, repository_data in repositories.items():
repository_watchers_by_path = watchers.get(repository_name)
logger.info("In repo %s", repository_name)
if repository_watchers_by_path is None:
continue
for repository_path, repo_watchers in repository_watchers_by_path.items():
repository_db_identifier = repository_name
if repository_path is not None:
repository_db_identifier += "::" + repository_path
def commit_started_callback(repo_commit):
if repo_watchers:
for repo_watcher in repo_watchers:
plugin = repo_watcher.plugin
if hasattr(plugin, 'commit_started'):
plugin.commit_started(repository_name, repo_commit)
def commit_finished_callback(repo_commit):
if repo_watchers:
for repo_watcher in repo_watchers:
plugin = repo_watcher.plugin
if hasattr(plugin, 'commit_finished'):
plugin.commit_finished(repository_name, repo_commit)
if repo_commit.identifier:
new_identifier = repo_commit.identifier
tracker.update_identifier(repository_db_identifier, new_identifier)
def patch_callback(repo_patch):
if repo_watchers:
for repo_watcher in repo_watchers:
plugin = repo_watcher.plugin
if hasattr(plugin, 'patch'):
plugin.patch(repository_name, repo_patch)
last_run_completed = tracker.last_run_completed(repository_db_identifier)
if repository_data.get("check-every-x-minutes"):
now = datetime.datetime.utcnow()
if last_run_completed:
if (now - last_run_completed) < datetime.timedelta(minutes=repository_data.get("check-every-x-minutes")):
pass;
try:
last_identifier = tracker.last_identifier(repository_db_identifier)
if not last_identifier and startTime:
last_identifier = startTime + " PST"
repository_data["source"].processSinceIdentifier(last_identifier,
commit_started_callback=commit_started_callback,
|
flyinactor91/Raspi-Hardware
|
7Segment/Counter.py
|
Python
|
mit
| 481
| 0.035343
|
#!/usr/bin/python
##--Michael duPont (flyinactor91.com)
##--Display increasing values on the seven-segment display
from Adafruit_7Segment import SevenSegment
import time
segment = SevenSegment(address=0x70)
num = 0
rest = float(raw_input('Step: '))
while True:
segment.setColon((num / 10000)%2)
segment.wri
|
teDigit(0 , (num /
|
1000)%10)
segment.writeDigit(1 , (num / 100)%10)
segment.writeDigit(3 , (num / 10)%10)
segment.writeDigit(4 , num % 10)
num += 1
time.sleep(rest)
|
agartland/utils
|
.ipynb_checkpoints/quickr-checkpoint.py
|
Python
|
mit
| 5,563
| 0.007011
|
import subprocess
import pandas as pd
import tempfile
import os
__all__ = ['runRscript']
def runRscript(Rcmd, inDf=None, outputFiles=0, removeTempFiles=None):
"""Runs an R cmd with option to provide a DataFrame as input and file
as output.
Params
------
Rcmd : str
String containing the R-script to run.
inDf : pd.DataFrame or list of pd.DataFrame's
Data to be passed to the R script via a CSV file.
Object should be referenced in the script as "INPUTDF" or "INPUTDF0" etc. if list
outputFiles : int
Number of output CSV files available for writing by the R-script.
The contents of the file are returned as a pd.DataFrame.
File name should be referenced as "OUTPUTFNX" in the R-script
removeTempFiles : True, False or None
For debugging. If True then the temporary script and data files will
always be removed. If None then they will be removed if there is not an error.
If False they will not be removed.
Returns
-------
stdout : str
Output of the R-script at the terminal (including stderr)
output : pd.DataFrame or list of pd.DataFrames
Optionally, the contents of CSV file(s) written by the R-script as a pd.DataFrame"""
"""Write data to a tempfile if required"""
if not inDf is None:
if not type(inDf) is list:
inputH, inputFn = tempfile.mkstemp(suffix='.csv', prefix='tmp-Rinput-', text=True)
readCmd = 'INPUTDF <- re
|
ad.csv("%s")\n' % inputFn
Rcmd = readCmd + Rcmd
os.close(inputH)
inDf.to_csv(inputFn)
else:
inputFilenames = []
for i, idf in enumerate(inDf):
inputH, inputFn = tempfile.mkstemp(suffix='.csv', prefix='tmp-Rinput%d-' % i, text=True)
readCmd = 'INPUTDF%d <- read.csv("%s")\n' % (i, inputFn)
Rcm
|
d = readCmd + Rcmd
os.close(inputH)
idf.to_csv(inputFn)
inputFilenames.append(inputFn)
"""Set up an output file if required"""
outFn = []
for outi in range(outputFiles):
outputH, outputFn = tempfile.mkstemp(suffix='.txt', prefix='tmp-Routput-', text=True)
outCmd = 'OUTPUTFN%d <- "%s"\n' % (outi, outputFn)
Rcmd = outCmd + Rcmd
outFn.append(outputFn)
os.close(outputH)
"""Write script to tempfile"""
scriptH, scriptFn = tempfile.mkstemp(suffix='.R', prefix='tmp-Rscript-', text=True)
with open(scriptFn, 'w') as fh:
fh.write(Rcmd)
os.close(scriptH)
"""Run the R script and collect output"""
try:
cmdList = ['Rscript', '--vanilla', scriptFn]
res = subprocess.check_output(cmdList, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
res = bytes('STDOUT:\n%s\nSTDERR:\n%s' % (e.stdout, e.stderr), 'utf-8')
print('R process returned an error')
if removeTempFiles is None:
print('Leaving tempfiles for debugging.')
print(' '.join(cmdList))
if not inDf is None:
print(inputFn)
for outputFn in outFn:
print(outputFn)
removeTempFiles = False
"""Read the ouptfile if required"""
outDf = []
for outputFn in outFn:
try:
tmp = pd.read_csv(outputFn)
outDf.append(tmp)
except:
print('Cannot read output CSV: reading as text (%s)' % outputFn)
with open(outputFn, 'r') as fh:
tmp = fh.read()
if len(tmp) == 0:
print('Output file is empty! (%s)' % outputFn)
tmp = None
outDf.append(tmp)
# outDf = [pd.read_csv(outputFn) for outputFn in outFn]
if len(outDf) == 0:
outDf = None
elif len(outDf) == 1:
outDf = outDf[0]
"""Cleanup the temporary files"""
if removeTempFiles is None or removeTempFiles:
os.remove(scriptFn)
if not inDf is None:
if not type(inDf) is list:
os.remove(inputFn)
else:
for inputFn in inputFilenames:
os.remove(inputFn)
else:
print('Leaving tempfiles for debugging.')
print(' '.join(cmdList))
if not inDf is None:
print(inputFn)
for outputFn in outFn:
print(outputFn)
if outputFiles == 0:
return res.decode('utf-8')
else:
return res.decode('utf-8'), outDf
def _test_simple():
Rcmd = """ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
lm.D90 <- lm(weight ~ group - 1) # omitting intercept
anova(lm.D9)
summary(lm.D90)"""
res = runRscript(Rcmd)
print(res)
def _test_io():
ctrl = [4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14]
trt = [4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69]
inDf = pd.DataFrame({'weight':ctrl + trt,
'group': ['Ctl']*len(ctrl) + ['Trt']*len(trt)})
Rcmd = """print(head(INPUTDF))
lm.D9 <- lm(weight ~ group, data=INPUTDF)
lm.D90 <- lm(weight ~ group - 1, data=INPUTDF) # omitting intercept
anova(lm.D9)
summary(lm.D90)
write.csv(data.frame(summary(lm.D90)$coefficients), OUTPUTFN)
"""
res, outputFile = runRscript(Rcmd, inDf=inDf, outputFiles=1)
print(res)
print(outputFile)
|
ioGrow/iogrowCRM
|
crm/tests/test_mail.py
|
Python
|
agpl-3.0
| 811
| 0
|
import unittest
from google.appengine.api import mail
from google.appengine.ext import testbed
class MailTestCase(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
sel
|
f.testbed.activate()
self.testbed.init_mail_stub()
self.mail_stub = self.testbed.get_stub(testbed.MAIL_SERVICE_NAME)
def tearDown(self):
self.testbed.deactivate()
def testMailSent(self):
mail.send_mail(to='alice@example.com',
subject='This is a test',
sender='bob@example.com',
body='This is a test e-mail')
messages = self.mail_stub.get_sent_messages(to
|
='alice@example.com')
self.assertEqual(1, len(messages))
self.assertEqual('alice@example.com', messages[0].to)
|
davy39/eric
|
MultiProject/MultiProject.py
|
Python
|
gpl-3.0
| 32,000
| 0.004
|
# -*- coding: utf-8 -*-
# Copyright (c) 2008 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing the multi project management functionality.
"""
from __future__ import unicode_literals
import os
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QFileInfo, QFile, \
QIODevice, QObject
from PyQt5.QtGui import QCursor
from PyQt5.QtWidgets import QMenu, QApplication, QDialog, QToolBar
from Globals import recentNameMultiProject
from E5Gui.E5Action import E5Action, createActionGroup
from E5Gui import E5FileDialog, E5MessageBox
import UI.PixmapCache
import Preferences
import Utilities
class MultiProject(QObject):
"""
Class implementing the project management functionality.
@signal dirty(bool) emitted when the dirty state changes
@signal newMultiProject() emitted after a new multi project was generated
@signal multiProjectOpened() emitted after a multi project file was read
@signal multiProjectClosed() emitted after a multi project was closed
@signal multiProjectPropertiesChanged() emitted after the multi project
properties were changed
@signal showMenu(string, QMenu) emitted when a menu is about to be shown.
The name of the menu and a reference to the menu are given.
@signal projectDataChanged(project data dict) emitted after a project entry
has been changed
@signal projectAdded(project data dict) emitted after a project entry
has been added
@signal projectRemoved(project data dict) emitted after a project entry
has been removed
@signal projectOpened(filename) emitted after the project has been opened
"""
dirty = pyqtSignal(bool)
newMultiProject = pyqtSignal()
multiProjectOpened = pyqtSignal()
multiProjectClosed = pyqtSignal()
multiProjectPropertiesChanged = pyqtSignal()
showMenu = pyqtSignal(str, QMenu)
projectDataChanged = pyqtSignal(dict)
projectAdded = pyqtSignal(dict)
projectRemoved = pyqtSignal(dict)
projectOpened = pyqtSignal(str)
def __init__(self, project, parent=None, filename=None):
"""
Constructor
@param project reference to the project object (Project.Project)
@param parent parent widget (usually the ui object) (QWidget)
@param filename optional filename of a multi project file to open
(string)
|
"""
super(MultiProject, self).__init__(parent)
self.ui = parent
self.projectObject
|
= project
self.__initData()
self.recent = []
self.__loadRecent()
if filename is not None:
self.openMultiProject(filename)
def __initData(self):
"""
Private method to initialize the multi project data part.
"""
self.loaded = False # flag for the loaded status
self.__dirty = False # dirty flag
self.pfile = "" # name of the multi project file
self.ppath = "" # name of the multi project directory
self.description = "" # description of the multi project
self.name = ""
self.opened = False
self.projects = []
# list of project info; each info entry is a dictionary
# 'name' : name of the project
# 'file' : project file name
# 'master' : flag indicating the master
# project
# 'description' : description of the project
# 'category' : name of the group
# 'uid' : unique identifier
self.categories = []
def __loadRecent(self):
"""
Private method to load the recently opened multi project filenames.
"""
self.recent = []
Preferences.Prefs.rsettings.sync()
rp = Preferences.Prefs.rsettings.value(recentNameMultiProject)
if rp is not None:
for f in rp:
if QFileInfo(f).exists():
self.recent.append(f)
def __saveRecent(self):
"""
Private method to save the list of recently opened filenames.
"""
Preferences.Prefs.rsettings.setValue(
recentNameMultiProject, self.recent)
Preferences.Prefs.rsettings.sync()
def getMostRecent(self):
"""
Public method to get the most recently opened multiproject.
@return path of the most recently opened multiproject (string)
"""
if len(self.recent):
return self.recent[0]
else:
return None
def setDirty(self, b):
"""
Public method to set the dirty state.
It emits the signal dirty(int).
@param b dirty state (boolean)
"""
self.__dirty = b
self.saveAct.setEnabled(b)
self.dirty.emit(bool(b))
def isDirty(self):
"""
Public method to return the dirty state.
@return dirty state (boolean)
"""
return self.__dirty
def isOpen(self):
"""
Public method to return the opened state.
@return open state (boolean)
"""
return self.opened
def getMultiProjectPath(self):
"""
Public method to get the multi project path.
@return multi project path (string)
"""
return self.ppath
def getMultiProjectFile(self):
"""
Public method to get the path of the multi project file.
@return path of the multi project file (string)
"""
return self.pfile
def __checkFilesExist(self):
"""
Private method to check, if the files in a list exist.
The project files are checked for existance in the
filesystem. Non existant projects are removed from the list and the
dirty state of the multi project is changed accordingly.
"""
removelist = []
for project in self.projects:
if not os.path.exists(project['file']):
removelist.append(project)
if removelist:
for project in removelist:
self.projects.remove(project)
self.setDirty(True)
def __extractCategories(self):
"""
Private slot to extract the categories used in the project definitions.
"""
for project in self.projects:
if project['category'] and \
project['category'] not in self.categories:
self.categories.append(project['category'])
def getCategories(self):
"""
Public method to get the list of defined categories.
@return list of categories (list of string)
"""
return [c for c in self.categories if c]
def __readMultiProject(self, fn):
"""
Private method to read in a multi project (.e4m, .e5m) file.
@param fn filename of the multi project file to be read (string)
@return flag indicating success
"""
f = QFile(fn)
if f.open(QIODevice.ReadOnly):
from E5XML.MultiProjectReader import MultiProjectReader
reader = MultiProjectReader(f, self)
reader.readXML()
f.close()
if reader.hasError():
return False
else:
QApplication.restoreOverrideCursor()
E5MessageBox.critical(
self.ui,
self.tr("Read multiproject file"),
self.tr(
"<p>The multiproject file <b>{0}</b> could not be"
" read.</p>").format(fn))
return False
self.pfile = os.path.abspath(fn)
self.ppath = os.path.abspath(os.path.dirname(fn))
self.__extractCategories()
# insert filename into list of recently opened multi projects
self.__syncRecent()
self.name = os.path.splitext(os.path.basename(fn))[0]
# check, if the files of the multi pro
|
nkremerh/cctools
|
prune/src/prune/class_item.py
|
Python
|
gpl-2.0
| 6,649
| 0.048428
|
import json, time
import glob
import timer
from utils import *
class Item(object):
__slots__ = ( 'type', 'cbid', 'dbid', 'wfid', 'step', 'when', 'meta', 'body', 'repo', 'path', 'size', 'id' )
def __init__( self, obj={}, **kwargs ):
# Three situations: Network streamed data (obj), Database data (obj), Newly created data (kw
|
args)
kwargs.update( obj )
self.type = kwargs['type']
self.cbid = kwargs['cbid'] if 'cbid' in kwargs else None
self.dbid = kwargs['dbid'] if 'db
|
id' in kwargs else None
self.wfid = kwargs['wfid'] if 'wfid' in kwargs else glob.workflow_id
self.step = kwargs['step'] if 'step' in kwargs else glob.workflow_step
self.when = kwargs['when'] if 'when' in kwargs else time.time()
self.id = kwargs['id'] if 'id' in kwargs else None
if 'meta' in kwargs:
if isinstance( kwargs['meta'], basestring ):
self.meta = json.loads( kwargs['meta'] )
else:
self.meta = kwargs['meta']
else:
self.meta = {}
self.body = None
self.repo = None
self.path = None
self.size = 0
if 'body' in kwargs and kwargs['body'] != None:
if isinstance( kwargs['body'], basestring ):
self.body = json.loads( kwargs['body'] )
tmp_str = kwargs['body']
else:
self.body = kwargs['body']
tmp_str = json.dumps( kwargs['body'], sort_keys=True )
self.size = len(tmp_str)
if not self.cbid:
self.cbid = hashstring(tmp_str)
elif 'repo' in kwargs and kwargs['repo'] != None:
self.repo = kwargs['repo']
if not self.cbid:
log.error("No cbid for an object in a remote repository. There is no way to obtain it.")
elif 'path' in kwargs and kwargs['path'] != None:
self.path = kwargs['path']
if not self.cbid:
if 'new_path' in kwargs:
self.cbid, self.size = hashfile_copy(self.path, kwargs['new_path'])
self.path = kwargs['new_path']
else:
self.cbid, self.size = hashfile(self.path)
elif 'size' in kwargs and kwargs['size'] != None:
self.size = int(kwargs['size'])
elif os.path.isfile(self.path):
statinfo = os.stat(self.path)
self.size = statinfo.st_size
elif os.path.isfile(glob.data_file_directory+self.path):
statinfo = os.stat(glob.data_file_directory+self.path)
self.size = statinfo.st_size
elif os.path.isfile(glob.cache_file_directory+self.path):
statinfo = os.stat(glob.cache_file_directory+self.path)
self.size = statinfo.st_size
else:
print "Can't find the file!!!"
def __str__( self ):
obj = dict(type=self.type, cbid=self.cbid, when=self.when)
if self.dbid: obj['dbid'] = self.dbid
if self.wfid: obj['wfid'] = self.wfid
if self.step: obj['step'] = self.step
if self.meta:
if isinstance( self.meta, basestring ):
#obj['meta'] = self.meta
obj['meta'] = json.loads(self.meta)
else:
#obj['meta'] = json.dumps(self.meta, sort_keys=True)
obj['meta'] = self.meta
if self.size:
obj['size'] = self.size
if self.body:
if isinstance( self.body, basestring ):
#obj['body'] = self.body[0:20]+' ... '+self.body[-20:]
obj['body'] = json.loads(self.body)
else:
#obj['body'] = json.dumps(self.body, sort_keys=True)
obj['body'] = self.body
elif self.repo:
obj['repo'] = self.repo
elif self.path:
obj['path'] = self.path
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': ')) + "\n"
def stream( self, active_stream ):
obj = dict(type=self.type, cbid=self.cbid, when=self.when)
summary = ''
if self.dbid: obj['dbid'] = self.dbid
if self.wfid: obj['wfid'] = self.wfid
if self.step: obj['step'] = self.step
if self.meta: obj['meta'] = self.meta
if self.size: obj['size'] = self.size
if self.body:
obj['body'] = self.body
active_stream.write( json.dumps(obj, sort_keys=True) + "\n" )
elif self.repo:
obj['repo'] = self.repo
active_stream.write( json.dumps(obj, sort_keys=True) + "\n" )
elif self.path:
active_stream.write( json.dumps(obj, sort_keys=True) + "\n" )
summary = self.stream_content( active_stream )
return json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': ')) + "\n" + summary + "\n"
def export( self ):
obj = dict(type=self.type, cbid=self.cbid, when=self.when)
if self.dbid: obj['dbid'] = self.dbid
if self.wfid: obj['wfid'] = self.wfid
if self.step: obj['step'] = self.step
if self.meta: obj['meta'] = self.meta
if self.size: obj['size'] = self.size
if self.body:
obj['body'] = self.body
elif self.repo:
obj['repo'] = self.repo
elif self.path:
obj['path'] = self.path
return obj
def dump( self ):
return json.dumps(self.export(), sort_keys=True)
def stream_content( self, active_stream ):
if self.body:
if isinstance( self.body, basestring ):
wstr = self.body
else:
wstr = json.dumps(self.body, sort_keys=True)
active_stream.write( wstr )
if len(wstr)>45:
return wstr[0:20] + ' ... ' + wstr[-20:]
else:
return wstr
elif self.repo:
print "Stream from repository not implemented..."
return None
elif self.path:
if self.type=='temp':
pathname = glob.cache_file_directory + self.path
else:
pathname = glob.data_file_directory + self.path
with open( pathname, 'r' ) as f:
buf = f.read(1024*1024)
lastbuf = ''
summary = buf[0:20] + ' ... ' if len(buf)>20 else buf
while buf:
active_stream.write( buf )
lastbuf = buf
buf = f.read(1024*1024)
summary = summary + buf[-20:] if len(lastbuf)>20 else summary + buf
return summary
def sqlite3_insert( self ):
keys = ['type','cbid','"when"']
vals = [self.type, self.cbid, self.when]
if self.id:
keys.append( 'id' )
vals.append( self.id )
if self.dbid:
keys.append( 'dbid' )
vals.append( self.dbid )
if self.wfid:
keys.append( 'wfid' )
vals.append( self.wfid )
if self.step:
keys.append( 'step' )
vals.append( self.step )
if self.meta:
keys.append( 'meta' )
vals.append( json.dumps(self.meta, sort_keys=True) )
if self.size:
keys.append( 'size' )
vals.append( self.size )
if self.body:
keys.append( 'body' )
vals.append( json.dumps(self.body, sort_keys=True) )
elif self.repo:
keys.append( 'repo' )
vals.append( self.repo )
elif self.path:
keys.append( 'path' )
vals.append( self.path )
qs = ['?'] * len(keys)
ins = 'INSERT INTO items (%s) VALUES (%s);' % (','.join(keys), ','.join(qs))
return ins, tuple(vals)
#def __add__( self, other ):
# return str(self) + other
#def __radd__( self, other ):
# return other + str(self)
def __eq__(self, other):
return self.cbid == other.cbid
def __ne__(self, other):
return not self.__eq__(other)
def __len__( self ):
return len(str(self))
|
h4r5h1t/django-hauthy
|
tests/model_forms/models.py
|
Python
|
bsd-3-clause
| 12,503
| 0.00192
|
"""
XX. Generating HTML forms from models
This is mostly just a reworking of the ``form_for_model``/``form_for_instance``
tests to use ``ModelForm``. As such, the text may not make sense in all cases,
and the examples are probably a poor fit for the ``ModelForm`` syntax. In other
words, most of these tests should be rewritten.
"""
from __future__ import unicode_literals
import datetime
import os
import tempfile
from django.core import validators
from django.core.exceptions import ValidationError
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils import six
from django.utils._os import upath
from django.utils.encoding import python_2_unicode_compatible
from django.utils.six.moves import range
temp_storage_dir = tempfile.mkdtemp()
temp_storage = FileSystemStorage(temp_storage_dir)
ARTICLE_STATUS = (
(1, 'Draft'),
(2, 'Pending'),
(3, 'Live'),
)
ARTICLE_STATUS_CHAR = (
('d', 'Draft'),
('p', 'Pending'),
('l', 'Live'),
)
class Person(models.Model):
name = models.CharField(max_length=100)
@python_2_unicode_compatible
class Category(models.Model):
name = models.CharField(max_length=20)
slug = models.SlugField(max_length=20)
url = models.CharField('The URL', max_length=40)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
@python_2_unicode_compatible
class Writer(models.Model):
name = models.CharField(max_length=50, help_text='Use both first and last names.')
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
headline = models.CharField(max_length=50)
slug = models.SlugField()
pub_date = models.DateField()
created = models.DateField(editable=False)
writer = models.ForeignKey(Writer)
article = models.TextField()
categories = models.ManyToManyField(Category, blank=True)
status = models.PositiveIntegerField(choices=ARTICLE_STATUS, blank=True, null=True)
def save(self, *args, **kwargs):
if not self.id:
self.created = datetime.date.today()
return super(Article, self).save(*args, **kwargs)
def __str__(self):
return self.headline
class ImprovedArticle(models.Model):
article = models.OneToOneField(Article)
class ImprovedArticleWithParentLink(models.Model):
article = models.OneToOneField(Article, parent_link=True)
class BetterWriter(Writer):
score = models.IntegerField()
@python_2_unicode_compatible
class Publication(models.Model):
title = models.CharField(max_length=30)
date_published = models.DateField()
def __str__(self):
return self.title
class Author(models.Model):
publication = models.OneToOneField(Publication, null=True, blank=True)
full_name = models.CharField(max_length=255)
class Author1(models.Model):
publication = models.OneToOneField(Publication, null=False)
full_name = models.CharField(max_length=255)
@python_2_unicode_compatible
class WriterProfile(models.Model):
writer = models.OneToOneField(Writer, primary_key=True)
age = models.PositiveIntegerField()
def __str__(self):
return "%s is %s" % (self.writer, self.age)
class Document(models.Model):
myfile = models.FileField(upload_to='unused', blank=True)
@python_2_unicode_compatible
class TextFile(models.Model):
description = models.CharField(max_length=20)
file = models.FileField(storage=temp_storage, upload_to='tests', max_length=15)
def __str__(self):
return self.description
class CustomFileField(models.FileField):
def save_form_data(self, instance, data):
been_here = getattr(self, 'been_saved', False)
assert not been_here, "save_form_data called more than once"
setattr(self, 'been_saved', True)
class CustomFF(models.Model):
f = CustomFileField(upload_to='unused', blank=True)
class FilePathModel(models.Model):
path = models.FilePathField(path=os.path.dirname(upath(__file__)), match=".*\.py$", blank=True)
try:
from PIL import Image # NOQA: detect if Pillow is installed
test_images = True
@python_2_unicode_compatible
class ImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
# Deliberately put the image field *after* the width/height fields to
# trigger the bug in #10404 with width/height not getting assigned.
width = models.IntegerField(editable=False)
height = models.IntegerField(editable=False)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height')
path = models.CharField(max_length=16, blank=True, default='')
def __str__(self):
return self.description
@python_2_unicode_compatible
class OptionalImageFile(models.Model):
def custom_upload_path(self, filename):
path = self.path or 'tests'
return '%s/%s' % (path, filename)
description = models.CharField(max_length=20)
image = models.ImageField(storage=temp_storage, upload_to=custom_upload_path,
width_field='width', height_field='height',
blank=True, null=True)
width = models.IntegerField(editable=False, null=True)
height = models.IntegerField(editable=False, null=True)
path = models.CharField(max_length=16, blank=True, default='')
def __str__(self):
return self.description
except ImportError:
test_images = False
@python_2_unicode_compatible
class CommaSeparatedInteger(models.Model):
field = models.CommaSeparatedIntegerField(max_length=20)
def __str__(self):
return self.field
class Homepage(models.Model):
url = models.URLField()
@python_2_unicode_compatible
class Product(models.Model):
slug = models.SlugField(unique=True)
def __str__(self):
return self.slug
@python_2_unicode_compatible
class Price(models.Model):
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.PositiveIntegerField()
def __str__(self):
return "%s for %s" % (self.quantity, self.price)
class Meta:
unique_together = (('price', 'quantity'),)
class Triple(models.Model):
left = models.IntegerField()
middle = models.IntegerField()
right = models.IntegerField()
class Meta:
unique_together = (('left', 'middle'), ('middle', 'right'))
class ArticleStatus(models.Model):
status = models.CharField(max_length=2, choices=ARTICLE_STATUS_CHAR, blank=True, null=True)
@python_2_unicode_compatible
class Inventory(models.Model):
barcode = models.PositiveIntegerField(unique=True)
parent = models.ForeignKey('self', to_field='barcode', blank=True, null=True)
name = models.CharField(blank=False, max_length=20)
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
class Book(models.Model):
title = models.CharField(max_length=40)
author = models.ForeignKey(Writer, blank=True, null=True)
special_id = models.IntegerField(blank=True, null=True, unique=True)
class Meta:
unique_together = ('title', 'author')
class BookXtra(models.Model):
isbn = models.CharField(max_length=16, unique=True)
suffix1 = models.IntegerField(blank=True, default=0)
suffix2 = models.IntegerField(blank=True, def
|
ault=0)
class Meta:
unique_together = (('suffix1', 'suffix2'))
abstract = True
class DerivedBook(Book, BookXtra):
pass
@python_2_unicode_compatible
class ExplicitPK(
|
models.Model):
key = models.CharField(max_length=20, primary_key=True)
desc = models.CharField(max_length=20, blank=True, unique=True)
class Meta:
unique_together = ('key', 'desc')
def __str__(self):
|
Tayamarn/socorro
|
e2e-tests/pages/crash_report_page.py
|
Python
|
mpl-2.0
| 2,049
| 0.00244
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from pypom import Region
from selenium.webdriver.common.by import By
from pages.base_page import CrashStatsBasePage
class CrashReport(CrashStatsBasePage):
_reports_tab_locator = (By.ID, 'reports')
_results_count_locator = (By.CSS_SELECTOR, 'span.totalItems')
_reports_row_locator = (By.CSS_SELECTOR, '#reports-list tbody tr')
_report_tab_button_locator = (By.CSS_SELECTOR, '#panels-nav .reports')
_summary_table_locator = (By.CSS_SELECTOR, '.content')
def wait_for_page_to_load(self):
super(CrashReport, self).wait_for_page_to_load()
self.wait.until(lambda s: self.is_element_displayed(*self._summary_table_locator))
return self
@property
def reports(self):
return [self.Report(self, el) for el in self.find_elements(*self._reports_row_locator)]
@property
def results_count_total(self):
return int(self.find_element(*self._results_count_locator).text.replace(",", ""))
def click_reports_tab(self):
self.find_element(*self._report_tab_button_locator).click()
self.wait.until(lambda s: len(self.reports))
class Report(Region):
_product_locator = (By.CSS_SELECTOR, 'td:nth-of-type(3)')
_version_locator = (By.CSS_SELECTOR,
|
'td:nth-of-type(4)')
_report_date_link_locator = (By.CSS_SELECTOR, '#reports-list a.external-link')
@property
d
|
ef product(self):
return self.find_element(*self._product_locator).text
@property
def version(self):
return self.find_element(*self._version_locator).text
def click_report_date(self):
self.find_element(*self._report_date_link_locator).click()
from uuid_report import UUIDReport
return UUIDReport(self.selenium, self.page.base_url).wait_for_page_to_load()
|
oracc/nammu
|
python/nammu/test/test_yaml_update.py
|
Python
|
gpl-3.0
| 2,813
| 0
|
'''
Copyright 2015 - 2018 University College London.
This file is part of Nammu.
Nammu is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Nammu is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Nammu. If not, see <http://www.gnu.org/licenses/>.
'''
import filecmp
import os
import yaml
from python.nammu.controller.NammuController import NammuController
from ..utils import get_home_env_var, update_yaml_config
def test_update_yaml_config():
"""
Ensure that, upon updating yaml settings files from jar, a user's
default project settings are not overwritten.
"""
pth = "resources/test/"
local_file = os.path.join(pth, "user_settings.yaml")
jar_file = os.path.join(pth, "jar_settings.yaml")
new_config = update_yaml_config(path_to_jar=jar_file,
yaml_path=local_file,
path_to_config=local_file,
test_mode=True)
with open(local_file, "r") as f:
orig_config = yaml.safe_load(f)
|
# Make sure the user (project) setting is not overwritten
assert (new_config["projects"]["default"] ==
orig_config["projects"]["default"])
def test_settings_copied_correctly(monkeypatch, tmpdir):
|
"""
Check that the settings are initialised correctly at first launch.
More specifically, this test ensures that, if the user starts Nammu without
already having any configuration files, then local configuration files with
the correct content will be created, without affecting the original files.
"""
# Mock the user's home directory
home_env_var = get_home_env_var() # will vary depending on OS
monkeypatch.setitem(os.environ, home_env_var, str(tmpdir))
assert os.listdir(str(tmpdir)) == [] # sanity check!
NammuController() # start up Nammu, but don't do anything with it
settings_dir = os.path.join(os.environ[home_env_var], '.nammu')
for filename in ['settings.yaml', 'logging.yaml']:
target_file = os.path.join(settings_dir, filename)
original_file = os.path.join('resources', 'config', filename)
assert os.path.isfile(target_file)
assert filecmp.cmp(target_file, original_file)
# Check that the original config files have not been emptied (see #347)
with open(original_file, 'r') as orig:
assert orig.readlines()
|
resmo/ansible
|
lib/ansible/modules/identity/keycloak/keycloak_client.py
|
Python
|
gpl-3.0
| 33,175
| 0.003044
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Eike Frost <ei@kefro.st>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: keycloak_client
short_description: Allows administration of Keycloak clients via Keycloak API
version_added: "2.5"
description:
- This module allows the administration of Keycloak clients via the Keycloak REST API. It
requires access to the REST API via OpenID Connect; the user connecting and the client being
used must have the requisite access rights. In a default Keycloak installation, admin-cli
and an admin user would work, as would a separate client definition with the scope tailored
to your needs and a user having the expected roles.
- The names of module options are snake_cased versions of the camelCase ones found in the
Keycloak API and its documentation at U(http://www.keycloak.org/docs-api/3.3/rest-api/).
Aliases are provided so camelCased versions can be used as well.
- The Keycloak API does not always sanity check inputs e.g. you can set
SAML-specific settings on an OpenID Connect client for instance and vice versa. Be careful.
If you do not specify a setting, usually a sensible default is chosen.
options:
state:
description:
- State of the client
- On C(present), the client will be created (or updated if it exists already).
- On C(absent), the client will be removed if it exists
choices: ['present', 'absent']
default: 'present'
realm:
description:
- The realm to create the client in.
client_id:
description:
- Client id of client to be worked on. This is usually an alphanumeric name chosen by
you. Either this or I(id) is required. If you specify both, I(id) takes precedence.
This is 'clientId' in the Keycloak REST API.
aliases:
- clientId
id:
description:
- Id of client to be worked on. This is usually an UUID. Either this or I(client_id)
is required. If you specify both, this takes precedence.
name:
description:
- Name of the client (this is not the same as I(client_id))
description:
description:
- Description of the client in Keycloak
root_url:
description:
- Root URL appended to relative URLs for this client
This is 'rootUrl' in the Keycloak REST API.
aliases:
- rootUrl
admin_url:
description:
- URL to the admin interface of the client
This is 'adminUrl' in the Keycloak REST API.
aliases:
- adminUrl
base_url:
description:
|
- Default URL to use when the auth server needs to redirect or link back to the client
This is 'baseUrl' in the Keycloak REST API.
aliases:
- baseUrl
enabled:
description:
- Is this client enabled or not?
type: bool
client_authenticator_type:
description:
|
- How do clients authenticate with the auth server? Either C(client-secret) or
C(client-jwt) can be chosen. When using C(client-secret), the module parameter
I(secret) can set it, while for C(client-jwt), you can use the keys C(use.jwks.url),
C(jwks.url), and C(jwt.credential.certificate) in the I(attributes) module parameter
to configure its behavior.
This is 'clientAuthenticatorType' in the Keycloak REST API.
choices: ['client-secret', 'client-jwt']
aliases:
- clientAuthenticatorType
secret:
description:
- When using I(client_authenticator_type) C(client-secret) (the default), you can
specify a secret here (otherwise one will be generated if it does not exit). If
changing this secret, the module will not register a change currently (but the
changed secret will be saved).
registration_access_token:
description:
- The registration access token provides access for clients to the client registration
service.
This is 'registrationAccessToken' in the Keycloak REST API.
aliases:
- registrationAccessToken
default_roles:
description:
- list of default roles for this client. If the client roles referenced do not exist
yet, they will be created.
This is 'defaultRoles' in the Keycloak REST API.
aliases:
- defaultRoles
redirect_uris:
description:
- Acceptable redirect URIs for this client.
This is 'redirectUris' in the Keycloak REST API.
aliases:
- redirectUris
web_origins:
description:
- List of allowed CORS origins.
This is 'webOrigins' in the Keycloak REST API.
aliases:
- webOrigins
not_before:
description:
- Revoke any tokens issued before this date for this client (this is a UNIX timestamp).
This is 'notBefore' in the Keycloak REST API.
aliases:
- notBefore
bearer_only:
description:
- The access type of this client is bearer-only.
This is 'bearerOnly' in the Keycloak REST API.
aliases:
- bearerOnly
type: bool
consent_required:
description:
- If enabled, users have to consent to client access.
This is 'consentRequired' in the Keycloak REST API.
aliases:
- consentRequired
type: bool
standard_flow_enabled:
description:
- Enable standard flow for this client or not (OpenID connect).
This is 'standardFlowEnabled' in the Keycloak REST API.
aliases:
- standardFlowEnabled
type: bool
implicit_flow_enabled:
description:
- Enable implicit flow for this client or not (OpenID connect).
This is 'implicitFlowEnabled' in the Keycloak REST API.
aliases:
- implicitFlowEnabled
type: bool
direct_access_grants_enabled:
description:
- Are direct access grants enabled for this client or not (OpenID connect).
This is 'directAccessGrantsEnabled' in the Keycloak REST API.
aliases:
- directAccessGrantsEnabled
type: bool
service_accounts_enabled:
description:
- Are service accounts enabled for this client or not (OpenID connect).
This is 'serviceAccountsEnabled' in the Keycloak REST API.
aliases:
- serviceAccountsEnabled
type: bool
authorization_services_enabled:
description:
- Are authorization services enabled for this client or not (OpenID connect).
This is 'authorizationServicesEnabled' in the Keycloak REST API.
aliases:
- authorizationServicesEnabled
type: bool
public_client:
description:
- Is the access type for this client public or not.
This is 'publicClient' in the Keycloak REST API.
aliases:
- publicClient
type: bool
frontchannel_logout:
description:
- Is frontchannel logout enabled for this client or not.
This is 'frontchannelLogout' in the Keycloak REST API.
aliases:
- frontchannelLogout
type: bool
protocol:
description:
- Type of client (either C(openid-connect) or C(saml).
choices: ['openid-connect', 'saml']
full_scope_allowed:
description:
- Is the "Full Scope Allowed" feature set for
|
saisankargochhayat/algo_quest
|
Company-Based/SAP/social_sabatical_name.py
|
Python
|
apache-2.0
| 938
| 0.007463
|
# maximum number of letters(ignoring spaces and duplicates) if tie choose alphabetical order.
# import sys
# text = "".join(sys.stdin.readlines())
# name_list = text.split("\n")
inputList = ["kylan charles", "raymond strickland", "juliss
|
a shepard", "andrea meza", "destiny alvarado"]
inputList2 = ["maria garcia", "smith hernandez", "hernandez smith", "mary martinez", "james johnson"]
inputList3 = ["Sheldon Cooper", "Howord Wolowitz", "Amy Farrah Fowler", "Leo
|
nard Hofstadter", "Bernadette R"]
name_store = {}
for name in inputList3:
name_store[name] = len(set(name.lower().replace(" ", ""))) # Remove spaces using replace and remove duplicates using set
res = []
maxLen = -float("inf")
for name in name_store.keys():
if name_store.get(name) > maxLen:
res.clear()
res.append(name)
maxLen = name_store.get(name)
elif name_store.get(name) == maxLen:
res.append(name)
res.sort()
print(res[0])
|
bastorer/SPINSpy
|
spinspy/isdim.py
|
Python
|
mit
| 178
| 0.011236
|
import os
from spinspy import local_data
def isdim(dim):
|
if os.path.isfile('{0:s}{1:s}grid'.format(local_data.path,dim)):
return True
else:
|
return False
|
sheshkovsky/jaryan
|
links/utils.py
|
Python
|
apache-2.0
| 1,792
| 0.029576
|
from django.db.models import Q
from links.models import Post
from comments.models import ThreadedComment as comments
from django.utils import timezone
from datetime import datetime, timedelta
from django.contrib import messages
KARMA_LOW = 100
KARMA_MEDIUM = 1000
KARMA_HIGH = 5000
INTERVAL_LOW = 3600
INTERVAL_MEDIUM = 360
INTERVAL_HIGH = 36
COMMENT_PER_INTERVAL = 20
COMMENT_MAX = 80
def allowed_to_comment(user):
karma = user.userprofile.karma
now = timezone.now()
time_threshold = now - timedelta(seconds=3600)
comments_number = comments.objects.filter(Q(user=user) and Q(submit_date__gt=time_threshold)).count()
if karma < KARMA_HIGH:
if comments_number > COMMENT_PER_INTERVAL:
return False
else:
return True
else:
if comments_number > COMMENT_MAX:
return False
else:
return True
def allowed_to_post(request, user):
karma = user.userprofile.karma
print karma
now = timezone.now()
try:
posted = Post.objects.filter(post__submitter__exact=user).latest('submit_date')
diff = now - posted.submit_date
diff = diff.seconds
except:
diff = INTERVAL_LOW + 1
print diff
if karma < KARMA_LOW:
result = diff > INTERVAL_LOW
if not result:
messag
|
es.success(request, 'Please try in an hour!')
return result
elif karma > KARMA_LOW and karma < KARMA_HIGH:
result = diff > INTERVAL_MEDIUM
if not result:
messages.success(request, 'Please try in ten minutes!')
return result
else:
result = diff > INTERVAL_HIGH
if not result:
messages.warning(request, 'Please try in 30 sec')
return result
def get_client_ip(request):
|
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
|
nyaruka/motome
|
motome/public/views.py
|
Python
|
bsd-3-clause
| 11,780
| 0.005772
|
from decimal import Decimal
from customers.models import Customer
from django.contrib import auth
from products.models import *
from django_quickblocks.models import *
import re
from smartmin.views import *
from django.http import HttpResponse, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response, get_object_or_404
from orders.models import Order
from locales.models import Country, Currency
from locales.widgets import CoordinatesPickerField
from customers.models import Location
from transactions.models import Credit, Debit
from django.db.models import Sum
# check if the user is trusted
def is_trusted(request):
return request.session.get('trusted', False)
# gate keeper to verify for visitors with/out secret pass
def has_secret_pass(request):
SECRET_PASS = 'iamhungry'
return request.GET and request.GET['password'] == SECRET_PASS
class LoginForm (forms.Form):
phone_number = forms.CharField()
def clean_phone_number(self):
phone_number = self.cleaned_data['phone_number']
phone_number = re.sub("[^0-9]", "", phone_number)
if len(phone_number) != 10:
raise forms.ValidationError("Please enter a phone number with 10 digits, e.g. 0788 55 55 55")
return phone_number
def home(request):
country = Country.objects.get(country_code='RW')
request.session['currency'] = country.currency
# populate favorite stores, for now will be loading all in rwanda
favorite_stores = []
for store in Store.objects.filter(country=country):
favorite_stores.append(store)
context = dict(product_list=Product.objects.filter(is_active=True), country=country, favorite_stores=favorite_stores, currency=country.currency)
if has_secret_pass(request) or is_trusted(request):
request.session['trusted'] = True
return render_to_response('public/home.html', context, context_instance=RequestContext(request))
else:
return render_to_response('public/home_login.html', context, context_instance=RequestContext(request))
def cart(request):
order = Order.from_request(request)
country = Country.objects.get(country_code='RW')
if request.method == 'POST':
if 'add_product' in request.REQUEST:
product = Product.objects.get(id=request.REQUEST['add_product'], is_active=True)
order.add_single(product)
if set(('update', 'checkout', 'shop')).intersection(set(request.REQUEST.keys())):
for item in order.items.all():
if 'remove_%d' % item.id in request.REQUEST:
order.items.filter(pk=item.id).update(is_active=False)
for addon in item.product.addons.all():
exists_in_order = item.addons.filter(addon=addon)
form_name = 'addon_%d_%d' % (item.id, addon.id)
exists_in_form = form_name in request.REQUEST
if exists_in_order and not exists_in_form:
exists_in_order.update(is_active=False)
elif exists_in_form and not exists_in_order:
item.add_on(addon)
if 'checkout' in request.REQUEST:
return HttpResponseRedirect(reverse('public_checkout'))
elif 'shop' in request.REQUEST:
return HttpResponseRedirect("%s?password=iamhungry" % reverse('public_home'))
context = dict(cart=True, order=order, country=country, currency=country.currency)
return render_to_response('public/cart.html', context, context_instance=RequestContext(request))
def checkout(request):
order = Order.from_request(request)
country = Country.objects.get(country_code='RW')
initial_data = dict()
if request.method == 'POST':
form = LoginForm(request.POST)
if form.is_valid():
phone_number = form.cleaned_data['phone_number']
phone_number = country.derive_international_number(phone_numb
|
er)
customer = Cus
|
tomer.get_or_create_customer(phone_number)
customer.send_password()
request.session['customer'] = customer
return HttpResponseRedirect(reverse('public_login'))
else:
form=LoginForm(initial_data)
context = dict(order=order, country=country, currency=country.currency, form=form)
return render_to_response('public/checkout.html', context, context_instance=RequestContext(request))
class PasswordForm(forms.Form):
password = forms.CharField()
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user')
super(PasswordForm, self).__init__(*args, **kwargs)
def clean_password(self):
password = self.cleaned_data['password']
password = re.sub("[^0-9]", "", password)
if not self.user.check_password(password):
raise forms.ValidationError("Sorry, that password doesn't match, try again.")
return password
class CustomerForm(forms.Form):
first_name = forms.CharField()
last_name = forms.CharField()
email = forms.EmailField()
def login(request):
user = request.session['customer']
if request.method == 'POST':
password_form = PasswordForm(request.POST, user=user)
customer_form = CustomerForm(request.POST)
if password_form.is_valid() and customer_form.is_valid():
customer = request.session['customer']
customer.first_name = customer_form.cleaned_data['first_name']
customer.last_name = customer_form.cleaned_data['last_name']
customer.email = customer_form.cleaned_data['email']
customer.save()
user = auth.authenticate(username=customer.username, password=password_form.cleaned_data['password'])
auth.login(request, user)
order = Order.from_request(request)
order.user = user
order.save()
if 'location' in request.session:
location = request.session['location']
return HttpResponseRedirect("%s?lat=%s&lng=%s" % (reverse('public.location_create'), location.lat, location.lng))
else:
return HttpResponseRedirect(reverse('public.location_create'))
else:
password_form = PasswordForm(user=user)
customer_form = CustomerForm(initial={'first_name':user.first_name, 'last_name':user.last_name, 'email': user.email})
context = dict(password_form=password_form, customer_form=customer_form, user=user)
return render_to_response('public/login.html', context, context_instance=RequestContext(request))
class LocationForm(forms.ModelForm):
coordinates = CoordinatesPickerField(required=True)
def clean(self):
clean = self.cleaned_data;
if 'coordinates' in clean and self.instance:
self.instance.lat = clean['coordinates']['lat']
self.instance.lng = clean['coordinates']['lng']
return clean
class Meta:
model = Location
fields = ('building', 'business', 'hints', 'coordinates')
class LocationCRUDL(SmartCRUDL):
model = Location
actions = ('create',)
permissions = False
class Create(SmartCreateView):
form_class = LocationForm
fields = ('building', 'business', 'hints', 'coordinates')
def derive_initial(self):
if self.object and self.object.lat and self.object.lng:
return dict(coordinates=(dict(lat=self.object.lat, lng=self.object.lng))) #pragma: no cover
else:
country = Country.objects.get(country_code='RW')
return dict(coordinates=(dict(lat=country.bounds_lat, lng=country.bounds_lng)))
def get_context_data(self, **kwargs):
context = super(LocationCRUDL.Create, self).get_context_data(**kwargs)
context['display_fields'] = ['hints', 'nickname']
context['order'] = Order.from_request(self.request)
# add our country and it's root locations
context['country'] = Country.objects.get(country_code='RW')
# set the country on our form's location picker
|
Havate/havate-openstack
|
proto-build/gui/horizon/openstack-dashboard/config/views.py
|
Python
|
apache-2.0
| 15,052
| 0.007109
|
import os
import subprocess
import yaml
from django.views.generic import View
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from django.core.urlresolvers import reverse
from django.core import serializers
from django.conf import settings
from django.template.loader import render_to_string
from braces.views import JSONResponseMixin, AjaxResponseMixin
from config.forms import ClusterSettingsForm, UCSMSettingsForm, OSSettingsForm, HostSettingsForm, NodeSettingsForm, NetworkSettingsForm, OpenstackSettingsForm
from config.models import OpenstackSettings, NodeSettings
from config.helpers import construct_conf_file
def traverse_tree(dictionary):
try:
for key, value in dictionary.items():
if value == None:
dictionary[key] = u""
traverse_tree(value)
except Exception, e:
pass
return
class HomePageView(TemplateView):
template_name = "home.html"
class SettingsTextView(TemplateView):
template_name = "config.template"
def get_context_data(self, **kwargs):
context = super(SettingsTextView, self).get_context_data(**kwargs)
try:
context['nodes'] = serializers.serialize('python', NodeSettings.objects.all())
context['settings'] = serializers.serialize('python', OpenstackSettings.objects.all())
except IndexError:
pass
return context
class SettingsView(TemplateView):
template_name = "os_template.html"
def get_context_data(self, **kwargs):
context = super(SettingsView, self).get_context_data(**kwargs)
context['cluster_form'] = ClusterSettingsForm()
context['ucsm_form'] = UCSMSettingsForm()
context['os_form'] = OSSettingsForm()
context['network_form'] = NetworkSettingsForm
context['host_form'] = HostSettingsForm()
context['node_form'] = NodeSettingsForm()
context['settings_form'] = OpenstackSettingsForm()
context['nodes'] = NodeSettings.objects.all()
context['settings'] = {}
try:
context['settings'] = OpenstackSettings.objects.all()[0]
context['settings_form'] = OpenstackSettingsForm(instance=context['settings'])
except IndexError:
pass
scenario_list = []
print settings.PROJECT_PATH
for filename in os.listdir(os.path.join(settings.PROJECT_PATH, 'static-raw', 'scenarios')):
if filename.endswith(".yaml"):
scenario_list.append(filename.split('.')[0])
context['scenario_list'] = scenario_list
return context
class SubmitSettingsView(FormView):
template_name = "os_template.html"
form_class = OpenstackSettingsForm
# # add the request to the kwargs
# def get_form_kwargs(self):
# kwargs = super(RegisterView, self).get_form_kwargs()
# kwargs['request'] = self.request
# return kwargs
def form_invalid(self, form):
return super(SubmitSettingsView, self).form_valid(form)
def form_valid(self, form):
OpenstackSettings.objects.all().delete()
config = form.save()
if self.request.POST.get('summary-table-settings', 0) == 'scenario':
try:
iplist_file_path = os.path.join(settings.IPLIST_DESTINATION, 'iplist.yaml')
iplist_content = ""
processed_iplist_content = {}
if os.path.isfile(iplist_file_path):
with open(iplist_file_path, 'r') as content_file:
iplist_content = content_file.read()
processed_iplist_content = yaml.load(iplist_content)
nodes = int(self.request.POST.get('scenario_node_number', 0))
iplist = {}
for x in range(nodes):
hostname = self.request.POST.get('scenario_hostname__'+str(x), "")
ip = self.request.POST.get('scenario_ip__'+str(x), "")
role = self.request.POST.get('role-'+str(x), "")
pndn = 'sys/chassis-'+self.request.POST.get('chassis_number__'+str(x), 0)+'/blade-'+self.request.POST.get('blade_number__'+str(x), 0)
if hostname and ip and role:
iplist[pndn] = {'name': hostname, 'ip':ip, 'role':role, 'type':role}
processed_iplist_content['iplist'] = iplist
traverse_tree(processed_iplist_content)
with open(iplist_file_path, 'w') as content_file:
content_file.write( yaml.safe_dump(processed_iplist_content, default_flow_style=False))
cobbler_file_path = os.path.join(settings.COBBLER_DESTINATION, 'cobbler.yaml')
cobbler_content = ""
processed_cobbler_content = {}
if os.path.isfile(cobbler_file_path):
with open(cobbler_file_path, 'r') as content_file:
cobbler_content = content_file.read()
processed_cobbler_content = yaml.load(cobbler_content)
for x in range(nodes):
hostname = self.request.POST.get('scenario_hostname__'+str(x), "")
ip = self.request.POST.get('scenario_ip__'+str(x), "")
role = self.request.POST.get('role-'+str(x), "")
if hostname and ip and role:
if hostname in processed_cobbler_content:
processed_cobbler_content[hostname]['hostname'] = hostname
processed_cobbler_conte
|
nt[hostname]['power_address'] =ip
else:
processed_cobbler_content[hostname] = {'hostname': hostname, 'power_address':ip}
traverse_tree(processed_cobbler_content)
# with open(cobbler_file_path, 'w') as content_file:
# content_file.write( yaml.safe_dump(processed_cobbler_content, default_flow_style=False))
except Exception, e:
pass
else:
NodeSettings
|
.objects.all().delete()
nodes = int(self.request.POST.get('node_number', 0))
for x in range(nodes):
node_name = self.request.POST.get('node_name__'+str(x), "")
node_number = x
chassis_number = int(self.request.POST.get('chassis_number__'+str(x), 0))
blade_number = int(self.request.POST.get('blade_number__'+str(x), 0))
aio = (x == int(self.request.POST.get('aio', 0)))
compute = ('compute__' + str(x) ) in self.request.POST
network = (x == int(self.request.POST.get('network', 0)))
swift = ('swift__' + str(x) ) in self.request.POST
cinder = ('cinder__' + str(x) ) in self.request.POST
NodeSettings(node_name=node_name, node_number=node_number, aio=aio, compute=compute, network=network,
swift=swift, cinder=cinder, chassis_number=chassis_number, blade_number=blade_number).save()
config_nodes = serializers.serialize('python', NodeSettings.objects.all())
config_settings = serializers.serialize('python', OpenstackSettings.objects.all())
config_text = render_to_string('config.template', {'nodes': config_nodes, 'settings':config_settings})
config_file_path = os.path.join(settings.PROJECT_PATH, 'openstack_settings.txt')
config_file = open(config_file_path, 'w')
config_file.write(config_text)
config_file.close()
construct_conf_file(config=config, query_str_dict = self.request.POST)
return super(SubmitSettingsView, self).form_valid(form)
def get_success_url(self):
return reverse('settings')
class NodeDiscoveryView(JSONResponseMixin, AjaxResponseMixin, View):
def post_ajax(self, request, *args, **kwargs):
hostname = request.POST.get('hostname', '')
username = request.POST.get('username', '')
password = request.POST.get('password', '')
script_path = os.path.join(settings.PROJECT_PATH, 'static-raw', 'scripts', 'NodeInventory.py'
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.3/Lib/plat-mac/lib-scriptpackages/CodeWarrior/CodeWarrior_suite.py
|
Python
|
mit
| 23,097
| 0.012296
|
"""Suite CodeWarrior suite: Terms for scripting the CodeWarrior IDE
Level 0, version 0
Generated from /Volumes/Sap/Applications (Mac OS 9)/Metrowerks CodeWarrior 7.0/Metrowerks CodeWarrior/CodeWarrior IDE 4.2.5
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'CWIE'
class CodeWarrior_suite_Events:
_argmap_add = {
'new' : 'kocl',
'with_data' : 'data',
'to_targets' : 'TTGT',
'to_group' : 'TGRP',
}
def add(self, _object, _attributes={}, **_arguments):
"""add: add elements to a project or target
Required argument: an AE object reference
Keyword argument new: the class of the new element or elements to add
Keyword argument with_data: the initial data for the element or elements
Keyword argument to_targets: the targets to which the new element or elements will be added
Keyword argument to_group: the group to which the new element or elements will be added
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'ADDF'
aetools.keysubst(_arguments, self._argmap_add)
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
#
|
XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def build(self, _no_object=None, _attributes={}, **_arguments):
"""build: build a project or target (equivalent of the Make men
|
u command)
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'MAKE'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def check(self, _object=None, _attributes={}, **_arguments):
"""check: check the syntax of a file in a project or target
Required argument: the file or files to be checked
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'CHEK'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def compile_file(self, _object=None, _attributes={}, **_arguments):
"""compile file: compile a file in a project or target
Required argument: the file or files to be compiled
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'COMP'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def disassemble_file(self, _object=None, _attributes={}, **_arguments):
"""disassemble file: disassemble a file in a project or target
Required argument: the file or files to be disassembled
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'DASM'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
_argmap_export = {
'in_' : 'kfil',
}
def export(self, _no_object=None, _attributes={}, **_arguments):
"""export: Export the project file as an XML file
Keyword argument in_: the XML file in which to export the project
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'EXPT'
aetools.keysubst(_arguments, self._argmap_export)
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def remove_object_code(self, _no_object=None, _attributes={}, **_arguments):
"""remove object code: remove object code from a project or target
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'RMOB'
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def remove_target_files(self, _object, _attributes={}, **_arguments):
"""remove target files: remove files from a target
Required argument: an AE object reference
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'RMFL'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def run_target(self, _no_object=None, _attributes={}, **_arguments):
"""run target: run a project or target
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'RUN '
if _arguments: raise TypeError, 'No optional args expected'
if _no_object != None: raise TypeError, 'No direct arg expected'
_reply, _arguments, _attributes = self.send(_code, _subcode,
_arguments, _attributes)
if _arguments.get('errn', 0):
raise aetools.Error, aetools.decodeerror(_arguments)
# XXXX Optionally decode result
if _arguments.has_key('----'):
return _arguments['----']
def touch_file(self, _object=None, _attributes={}, **_arguments):
"""touch file: touch a file in a project or target for compilation
Required argument: the file or files to be touched
Keyword argument _attributes: AppleEvent attribute dictionary
"""
_code = 'CWIE'
_subcode = 'TOCH'
if _arguments: raise TypeError, 'No optional args expected'
_arguments['----'] = _object
_reply, _arg
|
bkonersman/utils
|
seqCheck.py
|
Python
|
artistic-2.0
| 1,631
| 0.010423
|
#! /usr/bin/env python
#edited on c9
import os
import array as array
import sys
if (len(sys.argv)==1):
print "usage >>seqCheck.py [fileRootName, path ('.') for cwd]"
sys.exit()
elif (len(sys.argv)==2):
fileRootName = sys.argv[1]
rootpath = os.getcwd()
elif (len(sys.argv)==3):
fileRootName = sys.argv[1]
rootpath = os.getcwd() + sys.argv[2]
print rootpath
else:
print "usage >>seqCheck.py [fileRootname, path ('.') for cwd]"
'''
mypath = os.getcwd()
print("Path at terminal when executing this file")
print(os.getcwd() + "\n")
print("This file path, relative to os.getcwd()")
print(__file__ + "\n")
print("This file full path (following symlinks)")
ful
|
l_path = os.path.realpath(__file__)
print(full_path
|
+ "\n")
print("This file directory and name")
path, file = os.path.split(full_path)
print(path + ' --> ' + file + "\n")
print("This file directory only")
print(os.path.dirname(full_path))
'''
f = []
for (dirpath,dirnames,filenames) in os.walk(rootpath):
#print filenames
f.extend(filenames)
break
print len(f)
num = []
#narray = array("i")
for x in f:
y = x.split(".")
if len(y) >1:
try:
z =int(y[1])
except (SyntaxError, ValueError):
pass
num.append(z)
#narray.append(z)
mylist = sorted(num)
end = len(mylist)
start = mylist[0]
print("start is " + str(start))
for x in mylist:
#print(str(x) + " "+ str(start))
if x != start:
print(str(start) + " is missing")
break
start += 1
print("There are "+ str(end) + " files")
#x = os.walk(mypath)
#for y in x:
# print y
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.