repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
gangadharkadam/contributionerp | refs/heads/develop | erpnext/manufacturing/report/completed_production_orders/__init__.py | 12133432 | |
Kazade/NeHe-Website | refs/heads/master | google_appengine/lib/django-1.4/tests/regressiontests/httpwrappers/models.py | 12133432 | |
craigderington/studentloan5 | refs/heads/master | studentloan5/Lib/site-packages/django/contrib/sessions/__init__.py | 809 | default_app_config = 'django.contrib.sessions.apps.SessionsConfig'
|
hkawasaki/kawasaki-aio8-2 | refs/heads/gacco2/0701_kim_again | i18n/tests/__init__.py | 12133432 | |
ericholscher/django | refs/heads/master | django/contrib/gis/maps/__init__.py | 12133432 | |
lociii/googleads-python-lib | refs/heads/master | examples/adspygoogle/adwords/v201306/remarketing/__init__.py | 12133432 | |
rbian/avocado-vt | refs/heads/master | virttest/qemu_devices/__init__.py | 12133432 | |
vvv1559/intellij-community | refs/heads/master | python/testData/codeInsight/liveTemplates/expanding/IterableVariableFromImplicitImports/m.py | 31 | MY_GLOBAL = [] |
apocquet/django | refs/heads/master | tests/dates/tests.py | 293 | from __future__ import unicode_literals
import datetime
from django.core.exceptions import FieldError
from django.test import TestCase
from django.utils import six
from .models import Article, Category, Comment
class DatesTests(TestCase):
def test_related_model_traverse(self):
a1 = Article.objects.create(
title="First one",
pub_date=datetime.date(2005, 7, 28),
)
a2 = Article.objects.create(
title="Another one",
pub_date=datetime.date(2010, 7, 28),
)
a3 = Article.objects.create(
title="Third one, in the first day",
pub_date=datetime.date(2005, 7, 28),
)
a1.comments.create(
text="Im the HULK!",
pub_date=datetime.date(2005, 7, 28),
)
a1.comments.create(
text="HULK SMASH!",
pub_date=datetime.date(2005, 7, 29),
)
a2.comments.create(
text="LMAO",
pub_date=datetime.date(2010, 7, 28),
)
a3.comments.create(
text="+1",
pub_date=datetime.date(2005, 8, 29),
)
c = Category.objects.create(name="serious-news")
c.articles.add(a1, a3)
self.assertQuerysetEqual(
Comment.objects.dates("article__pub_date", "year"), [
datetime.date(2005, 1, 1),
datetime.date(2010, 1, 1),
],
lambda d: d,
)
self.assertQuerysetEqual(
Comment.objects.dates("article__pub_date", "month"), [
datetime.date(2005, 7, 1),
datetime.date(2010, 7, 1),
],
lambda d: d
)
self.assertQuerysetEqual(
Comment.objects.dates("article__pub_date", "day"), [
datetime.date(2005, 7, 28),
datetime.date(2010, 7, 28),
],
lambda d: d
)
self.assertQuerysetEqual(
Article.objects.dates("comments__pub_date", "day"), [
datetime.date(2005, 7, 28),
datetime.date(2005, 7, 29),
datetime.date(2005, 8, 29),
datetime.date(2010, 7, 28),
],
lambda d: d
)
self.assertQuerysetEqual(
Article.objects.dates("comments__approval_date", "day"), []
)
self.assertQuerysetEqual(
Category.objects.dates("articles__pub_date", "day"), [
datetime.date(2005, 7, 28),
],
lambda d: d,
)
def test_dates_fails_when_no_arguments_are_provided(self):
self.assertRaises(
TypeError,
Article.objects.dates,
)
def test_dates_fails_when_given_invalid_field_argument(self):
six.assertRaisesRegex(
self,
FieldError,
"Cannot resolve keyword u?'invalid_field' into field. Choices are: "
"categories, comments, id, pub_date, title",
Article.objects.dates,
"invalid_field",
"year",
)
def test_dates_fails_when_given_invalid_kind_argument(self):
six.assertRaisesRegex(
self,
AssertionError,
"'kind' must be one of 'year', 'month' or 'day'.",
Article.objects.dates,
"pub_date",
"bad_kind",
)
def test_dates_fails_when_given_invalid_order_argument(self):
six.assertRaisesRegex(
self,
AssertionError,
"'order' must be either 'ASC' or 'DESC'.",
Article.objects.dates,
"pub_date",
"year",
order="bad order",
)
|
cheral/orange3 | refs/heads/master | Orange/canvas/gui/stackedwidget.py | 2 | """
=====================
AnimatedStackedWidget
=====================
A widget similar to :class:`QStackedWidget` supporting animated
transitions between widgets.
"""
import logging
from AnyQt.QtWidgets import QWidget, QFrame, QStackedLayout, QSizePolicy
from AnyQt.QtGui import QPixmap, QPainter
from AnyQt.QtCore import Qt, QPoint, QRect, QSize, QPropertyAnimation
from AnyQt.QtCore import pyqtSignal as Signal, pyqtProperty as Property
from .utils import updates_disabled
log = logging.getLogger(__name__)
def clipMinMax(size, minSize, maxSize):
"""
Clip the size so it is bigger then minSize but smaller than maxSize.
"""
return size.expandedTo(minSize).boundedTo(maxSize)
def fixSizePolicy(size, hint, policy):
"""
Fix size so it conforms to the size policy and the given size hint.
"""
width, height = hint.width(), hint.height()
expanding = policy.expandingDirections()
hpolicy, vpolicy = policy.horizontalPolicy(), policy.verticalPolicy()
if expanding & Qt.Horizontal:
width = max(width, size.width())
if hpolicy == QSizePolicy.Maximum:
width = min(width, size.width())
if expanding & Qt.Vertical:
height = max(height, size.height())
if vpolicy == QSizePolicy.Maximum:
height = min(height, hint.height())
return QSize(width, height).boundedTo(size)
class StackLayout(QStackedLayout):
"""
A stacked layout with ``sizeHint`` always the same as that of the
`current` widget.
"""
def __init__(self, parent=None):
QStackedLayout.__init__(self, parent)
self.currentChanged.connect(self._onCurrentChanged)
def sizeHint(self):
current = self.currentWidget()
if current:
hint = current.sizeHint()
# Clip the hint with min/max sizes.
hint = clipMinMax(hint, current.minimumSize(),
current.maximumSize())
return hint
else:
return QStackedLayout.sizeHint(self)
def minimumSize(self):
current = self.currentWidget()
if current:
return current.minimumSize()
else:
return QStackedLayout.minimumSize(self)
def maximumSize(self):
current = self.currentWidget()
if current:
return current.maximumSize()
else:
return QStackedLayout.maximumSize(self)
def setGeometry(self, rect):
QStackedLayout.setGeometry(self, rect)
for i in range(self.count()):
w = self.widget(i)
hint = w.sizeHint()
geom = QRect(rect)
size = clipMinMax(rect.size(), w.minimumSize(), w.maximumSize())
size = fixSizePolicy(size, hint, w.sizePolicy())
geom.setSize(size)
if geom != w.geometry():
w.setGeometry(geom)
def _onCurrentChanged(self, index):
"""
Current widget changed, invalidate the layout.
"""
self.invalidate()
class AnimatedStackedWidget(QFrame):
# Current widget has changed
currentChanged = Signal(int)
# Transition animation has started
transitionStarted = Signal()
# Transition animation has finished
transitionFinished = Signal()
def __init__(self, parent=None, animationEnabled=True):
QFrame.__init__(self, parent)
self.__animationEnabled = animationEnabled
layout = StackLayout()
self.__fadeWidget = CrossFadePixmapWidget(self)
self.transitionAnimation = \
QPropertyAnimation(self.__fadeWidget, b"blendingFactor_", self)
self.transitionAnimation.setStartValue(0.0)
self.transitionAnimation.setEndValue(1.0)
self.transitionAnimation.setDuration(100 if animationEnabled else 0)
self.transitionAnimation.finished.connect(
self.__onTransitionFinished
)
layout.addWidget(self.__fadeWidget)
layout.currentChanged.connect(self.__onLayoutCurrentChanged)
self.setLayout(layout)
self.__widgets = []
self.__currentIndex = -1
self.__nextCurrentIndex = -1
def setAnimationEnabled(self, animationEnabled):
"""
Enable/disable transition animations.
"""
if self.__animationEnabled != animationEnabled:
self.__animationEnabled = animationEnabled
self.transitionAnimation.setDuration(
100 if animationEnabled else 0
)
def animationEnabled(self):
"""
Is the transition animation enabled.
"""
return self.__animationEnabled
def addWidget(self, widget):
"""
Append the widget to the stack and return its index.
"""
return self.insertWidget(self.layout().count(), widget)
def insertWidget(self, index, widget):
"""
Insert `widget` into the stack at `index`.
"""
index = min(index, self.count())
self.__widgets.insert(index, widget)
if index <= self.__currentIndex or self.__currentIndex == -1:
self.__currentIndex += 1
return self.layout().insertWidget(index, widget)
def removeWidget(self, widget):
"""
Remove `widget` from the stack.
.. note:: The widget is hidden but is not deleted.
"""
index = self.__widgets.index(widget)
self.layout().removeWidget(widget)
self.__widgets.pop(index)
def widget(self, index):
"""
Return the widget at `index`
"""
return self.__widgets[index]
def indexOf(self, widget):
"""
Return the index of `widget` in the stack.
"""
return self.__widgets.index(widget)
def count(self):
"""
Return the number of widgets in the stack.
"""
return max(self.layout().count() - 1, 0)
def setCurrentWidget(self, widget):
"""
Set the current shown widget.
"""
index = self.__widgets.index(widget)
self.setCurrentIndex(index)
def setCurrentIndex(self, index):
"""
Set the current shown widget index.
"""
index = max(min(index, self.count() - 1), 0)
if self.__currentIndex == -1:
self.layout().setCurrentIndex(index)
self.__currentIndex = index
return
# if not self.animationEnabled():
# self.layout().setCurrentIndex(index)
# self.__currentIndex = index
# return
# else start the animation
current = self.__widgets[self.__currentIndex]
next_widget = self.__widgets[index]
def has_pending_resize(widget):
return widget.testAttribute(Qt.WA_PendingResizeEvent) or \
not widget.testAttribute(Qt.WA_WState_Created)
current_pix = next_pix = None
if not has_pending_resize(current):
current_pix = current.grab()
if not has_pending_resize(next_widget):
next_pix = next_widget.grab()
with updates_disabled(self):
self.__fadeWidget.setPixmap(current_pix)
self.__fadeWidget.setPixmap2(next_pix)
self.__nextCurrentIndex = index
self.__transitionStart()
def currentIndex(self):
"""
Return the current shown widget index.
"""
return self.__currentIndex
def sizeHint(self):
hint = QFrame.sizeHint(self)
if hint.isEmpty():
hint = QSize(0, 0)
return hint
def __transitionStart(self):
"""
Start the transition.
"""
log.debug("Stack transition start (%s)", str(self.objectName()))
# Set the fade widget as the current widget
self.__fadeWidget.blendingFactor_ = 0.0
self.layout().setCurrentWidget(self.__fadeWidget)
self.transitionAnimation.start()
self.transitionStarted.emit()
def __onTransitionFinished(self):
"""
Transition has finished.
"""
log.debug("Stack transition finished (%s)" % str(self.objectName()))
self.__fadeWidget.blendingFactor_ = 1.0
self.__currentIndex = self.__nextCurrentIndex
with updates_disabled(self):
self.layout().setCurrentIndex(self.__currentIndex)
self.transitionFinished.emit()
def __onLayoutCurrentChanged(self, index):
# Suppress transitional __fadeWidget current widget
if index != self.count():
self.currentChanged.emit(index)
class CrossFadePixmapWidget(QWidget):
"""
A widget for cross fading between two pixmaps.
"""
def __init__(self, parent=None, pixmap1=None, pixmap2=None):
QWidget.__init__(self, parent)
self.setPixmap(pixmap1)
self.setPixmap2(pixmap2)
self.blendingFactor_ = 0.0
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
def setPixmap(self, pixmap):
"""
Set pixmap 1
"""
self.pixmap1 = pixmap
self.updateGeometry()
def setPixmap2(self, pixmap):
"""
Set pixmap 2
"""
self.pixmap2 = pixmap
self.updateGeometry()
def setBlendingFactor(self, factor):
"""
Set the blending factor between the two pixmaps.
"""
self.__blendingFactor = factor
self.updateGeometry()
def blendingFactor(self):
"""
Pixmap blending factor between 0.0 and 1.0
"""
return self.__blendingFactor
blendingFactor_ = Property(float, fget=blendingFactor,
fset=setBlendingFactor)
def sizeHint(self):
"""
Return an interpolated size between pixmap1.size()
and pixmap2.size()
"""
if self.pixmap1 and self.pixmap2:
size1 = self.pixmap1.size()
size2 = self.pixmap2.size()
return size1 + self.blendingFactor_ * (size2 - size1)
else:
return QWidget.sizeHint(self)
def paintEvent(self, event):
"""
Paint the interpolated pixmap image.
"""
p = QPainter(self)
p.setClipRect(event.rect())
factor = self.blendingFactor_ ** 2
if self.pixmap1 and 1. - factor:
p.setOpacity(1. - factor)
p.drawPixmap(QPoint(0, 0), self.pixmap1)
if self.pixmap2 and factor:
p.setOpacity(factor)
p.drawPixmap(QPoint(0, 0), self.pixmap2)
|
k8s-packages-power/contrib | refs/heads/master | hack/verify-flags-underscore.py | 21 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import mmap
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
parser.add_argument("-e", "--skip-exceptions", help="ignore hack/verify-flags/exceptions.txt and print all output", action="store_true")
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <TrentM@ActiveState.com>
@author: Jorge Orpinel <jorge@orpinel.com>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'Godeps' in dirs:
dirs.remove('Godeps')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if 'exceptions.txt' in files:
files.remove('exceptions.txt')
if 'known-flags.txt' in files:
files.remove('known-flags.txt')
if 'vendor' in dirs:
dirs.remove('vendor')
for name in files:
if name.endswith(".svg"):
continue
if name.endswith(".gliffy"):
continue
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
def normalize_files(rootdir, files):
newfiles = []
a = ['Godeps', 'vendor', 'third_party', 'exceptions.txt', 'known-flags.txt']
for f in files:
if any(x in f for x in a):
continue
if f.endswith(".svg"):
continue
if f.endswith(".gliffy"):
continue
newfiles.append(f)
for i, f in enumerate(newfiles):
if not os.path.isabs(f):
newfiles[i] = os.path.join(rootdir, f)
return newfiles
def line_has_bad_flag(line, flagre):
results = flagre.findall(line)
for result in results:
if not "_" in result:
return False
# this should exclude many cases where jinja2 templates use kube flags
# as variables, except it uses _ for the variable name
if "{% set" + result + "= \"" in line:
return False
if "pillar[" + result + "]" in line:
return False
if "grains" + result in line:
return False
# These are usually yaml definitions
if result.endswith(":"):
return False
# something common in juju variables...
if "template_data[" + result + "]" in line:
return False
return True
return False
# The list of files might not be the whole repo. If someone only changed a
# couple of files we don't want to run all of the golang files looking for
# flags. Instead load the list of flags from hack/verify-flags/known-flags.txt
# If running the golang files finds a new flag not in that file, return an
# error and tell the user to add the flag to the flag list.
def get_flags(rootdir, files):
# preload the 'known' flags
pathname = os.path.join(rootdir, "hack/verify-flags/known-flags.txt")
f = open(pathname, 'r')
flags = set(f.read().splitlines())
f.close()
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_flags = set()
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if not "-" in flag:
continue
if flag not in flags:
new_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
if len(new_flags) != 0:
print("Found flags in golang files not in the list of known flags. Please add these to hack/verify-flags/known-flags.txt")
l = list(new_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
return list(flags)
def flags_to_re(flags):
"""turn the list of all flags we found into a regex find both - and _ versions"""
dashRE = re.compile('[-_]')
flagREs = []
for flag in flags:
# turn all flag names into regexs which will find both types
newre = dashRE.sub('[-_]', flag)
# only match if there is not a leading or trailing alphanumeric character
flagREs.append("[^\w${]" + newre + "[^\w]")
# turn that list of regex strings into a single large RE
flagRE = "|".join(flagREs)
flagRE = re.compile(flagRE)
return flagRE
def load_exceptions(rootdir):
exceptions = set()
if args.skip_exceptions:
return exceptions
exception_filename = os.path.join(rootdir, "hack/verify-flags/exceptions.txt")
exception_file = open(exception_filename, 'r')
for exception in exception_file.read().splitlines():
out = exception.split(":", 1)
if len(out) != 2:
printf("Invalid line in exceptions file: %s" % exception)
continue
filename = out[0]
line = out[1]
exceptions.add((filename, line))
return exceptions
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
exceptions = load_exceptions(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
files = normalize_files(rootdir, files)
flags = get_flags(rootdir, files)
flagRE = flags_to_re(flags)
bad_lines = []
# walk all the file looking for any flag that was declared and now has an _
for pathname in files:
relname = os.path.relpath(pathname, rootdir)
f = open(pathname, 'r')
for line in f.read().splitlines():
if line_has_bad_flag(line, flagRE):
if (relname, line) not in exceptions:
bad_lines.append((relname, line))
f.close()
if len(bad_lines) != 0:
if not args.skip_exceptions:
print("Found illegal 'flag' usage. If these are false positives you should run `hack/verify-flags-underscore.py -e > hack/verify-flags/exceptions.txt` to update the list.")
bad_lines.sort()
for (relname, line) in bad_lines:
print("%s:%s" % (relname, line))
return 1
if __name__ == "__main__":
sys.exit(main())
|
anu7495/airmozilla | refs/heads/master | airmozilla/surveys/urls.py | 15 | from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns(
'',
url(r'load/(?P<id>\d+)/$',
views.load,
name='load'),
)
|
Voyager1/xbmc | refs/heads/master | lib/gtest/test/gtest_env_var_test.py | 343 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
SetEnvVar('XML_OUTPUT_FILE', None) # For 'output' test
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
def testXmlOutputFile(self):
"""Tests that $XML_OUTPUT_FILE affects the output flag."""
SetEnvVar('GTEST_OUTPUT', None)
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/bar.xml', GetFlag('output'))
def testXmlOutputFileOverride(self):
"""Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT"""
SetEnvVar('GTEST_OUTPUT', 'xml:tmp/foo.xml')
SetEnvVar('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/foo.xml', GetFlag('output'))
if __name__ == '__main__':
gtest_test_utils.Main()
|
apixandru/intellij-community | refs/heads/master | python/testData/refactoring/changeSignature/scatteredKwargsArgsRenameParam.after.py | 27 | def f(x, bar, **kwargs):
print(foo, kwargs)
f(42, bar=None, extra1=1, extra2=2) |
alex/characteristic | refs/heads/master | characteristic.py | 1 | from __future__ import absolute_import, division, print_function
"""
Say 'yes' to types but 'no' to typing!
"""
__version__ = "0.2.0dev"
__author__ = "Hynek Schlawack"
__license__ = "MIT"
__copyright__ = "Copyright 2014 Hynek Schlawack"
def with_cmp(attrs):
"""
A class decorator that adds comparison methods based on *attrs*.
Two instances are compared as if the respective values of *attrs* were
tuples.
:param attrs: Attributes to work with.
:type attrs: `list` of native strings
"""
def attrs_to_tuple(obj):
"""
Create a tuple of all values of *obj*'s *attrs*.
"""
return tuple(getattr(obj, a) for a in attrs)
def eq(self, other):
if isinstance(other, self.__class__):
return attrs_to_tuple(self) == attrs_to_tuple(other)
else:
return NotImplemented
def ne(self, other):
result = eq(self, other)
if result is NotImplemented:
return NotImplemented
else:
return not result
def lt(self, other):
if isinstance(other, self.__class__):
return attrs_to_tuple(self) < attrs_to_tuple(other)
else:
return NotImplemented
def le(self, other):
if isinstance(other, self.__class__):
return attrs_to_tuple(self) <= attrs_to_tuple(other)
else:
return NotImplemented
def gt(self, other):
if isinstance(other, self.__class__):
return attrs_to_tuple(self) > attrs_to_tuple(other)
else:
return NotImplemented
def ge(self, other):
if isinstance(other, self.__class__):
return attrs_to_tuple(self) >= attrs_to_tuple(other)
else:
return NotImplemented
def hash_(self):
return hash(attrs_to_tuple(self))
def wrap(cl):
cl.__eq__ = eq
cl.__ne__ = ne
cl.__lt__ = lt
cl.__le__ = le
cl.__gt__ = gt
cl.__ge__ = ge
cl.__hash__ = hash_
return cl
return wrap
def with_repr(attrs):
"""
A class decorator that adds a human-friendly ``__repr__`` method that
returns a sensible representation based on *attrs*.
:param attrs: Attributes to work with.
:type attrs: Iterable of native strings.
"""
def repr_(self):
return "<{0}({1})>".format(
self.__class__.__name__,
", ".join(a + "=" + repr(getattr(self, a)) for a in attrs)
)
def wrap(cl):
cl.__repr__ = repr_
return cl
return wrap
def with_init(attrs, defaults=None):
"""
A class decorator that wraps the __init__ method of a class and sets
*attrs* first using keyword arguments.
:param attrs: Attributes to work with.
:type attrs: Iterable of native strings.
:param defaults: Default values if attributes are omitted on instantiation.
:type defaults: `dict` or `None`
"""
if defaults is None:
defaults = {}
def init(self, *args, **kw):
for a in attrs:
try:
v = kw.pop(a)
except KeyError:
try:
v = defaults[a]
except KeyError:
raise ValueError("Missing value for '{0}'.".format(a))
setattr(self, a, v)
self.__original_init__(*args, **kw)
def wrap(cl):
cl.__original_init__ = cl.__init__
cl.__init__ = init
return cl
return wrap
def attributes(attrs, defaults=None, create_init=True):
"""
A convenience class decorator that combines :func:`with_cmp`,
:func:`with_repr`, and optionally :func:`with_init` to avoid code
duplication.
:param attrs: Attributes to work with.
:type attrs: Iterable of native strings.
:param defaults: Default values if attributes are omitted on instantiation.
:type defaults: `dict` or `None`
:param create_init: Also apply :func:`with_init` (default: `True`)
:type create_init: `bool`
"""
def wrap(cl):
cl = with_cmp(attrs)(with_repr(attrs)(cl))
if create_init is True:
return with_init(attrs, defaults=defaults)(cl)
else:
return cl
return wrap
|
demonchild2112/travis-test | refs/heads/master | grr/server/grr_response_server/bin/__init__.py | 3 | #!/usr/bin/env python
"""GRR server entry points."""
|
iglpdc/nipype | refs/heads/master | nipype/workflows/fmri/spm/__init__.py | 10 | from .preprocess import (create_spm_preproc, create_vbm_preproc,
create_DARTEL_template)
|
dougbenjamin/panda-harvester | refs/heads/master | pandaharvester/harvestercore/command_spec.py | 2 | """
Command spec class: a panda poller will retrieve commands from panda server and store cache them internally
"""
from .spec_base import SpecBase
class CommandSpec(SpecBase):
# attributes
attributesWithTypes = ('command_id:integer primary key',
'command:text',
'receiver:text',
'params:blob',
'ack_requested:integer',
'processed:integer'
)
# commands
COM_reportWorkerStats = 'REPORT_WORKER_STATS'
COM_setNWorkers = 'SET_N_WORKERS'
# mapping between command and receiver
receiver_map = {
COM_reportWorkerStats: 'propagator',
COM_setNWorkers: 'submitter'
}
# constructor
def __init__(self):
SpecBase.__init__(self)
# convert from Command JSON
def convert_command_json(self, data):
# mandatory fields
self.command_id = data['command_id']
self.command = data['command']
self.params = data['params']
self.ack_requested = data['ack_requested']
# For the day we want to parse the creation_date
# from datetime import datetime
# c = datetime.strptime(b, "%Y-%m-%dT%H:%M:%S.%f")
# optional field
try:
self.processed = data['processed']
except KeyError:
self.processed = 0
|
dolanmiu/Automated-Chicken-Coop-2 | refs/heads/master | py/open-door.py | 1 | #!/usr/bin/python
#import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_Stepper
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_StepperMotor
import time
import atexit
# create a default object, no changes to I2C address or frequency
mh = Adafruit_MotorHAT()
# recommended for auto-disabling motors on shutdown!
def turnOffMotors():
mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
atexit.register(turnOffMotors)
myStepper = mh.getStepper(200, 1) # 200 steps/rev, motor port #1
myStepper.setSpeed(30) # 30 RPM
print("Opening Door")
myStepper.step(100, Adafruit_MotorHAT.FORWARD, Adafruit_MotorHAT.SINGLE) |
sinkuri256/python-for-android | refs/heads/master | python3-alpha/python3-src/Lib/distutils/tests/test_util.py | 46 | """Tests for distutils.util."""
import os
import sys
import unittest
from copy import copy
from test.support import run_unittest
from distutils.errors import DistutilsPlatformError, DistutilsByteCompileError
from distutils.util import (get_platform, convert_path, change_root,
check_environ, split_quoted, strtobool,
rfc822_escape, byte_compile)
from distutils import util # used to patch _environ_checked
from distutils.sysconfig import get_config_vars
from distutils import sysconfig
from distutils.tests import support
class UtilTestCase(support.EnvironGuard, unittest.TestCase):
def setUp(self):
super(UtilTestCase, self).setUp()
# saving the environment
self.name = os.name
self.platform = sys.platform
self.version = sys.version
self.sep = os.sep
self.join = os.path.join
self.isabs = os.path.isabs
self.splitdrive = os.path.splitdrive
self._config_vars = copy(sysconfig._config_vars)
# patching os.uname
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._uname = None
os.uname = self._get_uname
def tearDown(self):
# getting back the environment
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep = self.sep
os.path.join = self.join
os.path.isabs = self.isabs
os.path.splitdrive = self.splitdrive
if self.uname is not None:
os.uname = self.uname
else:
del os.uname
sysconfig._config_vars = copy(self._config_vars)
super(UtilTestCase, self).tearDown()
def _set_uname(self, uname):
self._uname = uname
def _get_uname(self):
return self._uname
def test_get_platform(self):
# windows XP, 32bits
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Intel)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win32')
# windows XP, amd64
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Amd64)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-amd64')
# windows XP, itanium
os.name = 'nt'
sys.version = ('2.4.4 (#71, Oct 18 2006, 08:34:43) '
'[MSC v.1310 32 bit (Itanium)]')
sys.platform = 'win32'
self.assertEqual(get_platform(), 'win-ia64')
# macbook
os.name = 'posix'
sys.version = ('2.5 (r25:51918, Sep 19 2006, 08:49:13) '
'\n[GCC 4.0.1 (Apple Computer, Inc. build 5341)]')
sys.platform = 'darwin'
self._set_uname(('Darwin', 'macziade', '8.11.1',
('Darwin Kernel Version 8.11.1: '
'Wed Oct 10 18:23:28 PDT 2007; '
'root:xnu-792.25.20~1/RELEASE_I386'), 'i386'))
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.3'
get_config_vars()['CFLAGS'] = ('-fno-strict-aliasing -DNDEBUG -g '
'-fwrapv -O3 -Wall -Wstrict-prototypes')
cursize = sys.maxsize
sys.maxsize = (2 ** 31)-1
try:
self.assertEqual(get_platform(), 'macosx-10.3-i386')
finally:
sys.maxsize = cursize
# macbook with fat binaries (fat, universal or fat64)
get_config_vars()['MACOSX_DEPLOYMENT_TARGET'] = '10.4'
get_config_vars()['CFLAGS'] = ('-arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat')
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.1'
self.assertEqual(get_platform(), 'macosx-10.4-fat')
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-intel')
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat3')
get_config_vars()['CFLAGS'] = ('-arch ppc64 -arch x86_64 -arch ppc -arch i386 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-universal')
get_config_vars()['CFLAGS'] = ('-arch x86_64 -arch ppc64 -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3')
self.assertEqual(get_platform(), 'macosx-10.4-fat64')
for arch in ('ppc', 'i386', 'x86_64', 'ppc64'):
get_config_vars()['CFLAGS'] = ('-arch %s -isysroot '
'/Developer/SDKs/MacOSX10.4u.sdk '
'-fno-strict-aliasing -fno-common '
'-dynamic -DNDEBUG -g -O3'%(arch,))
self.assertEqual(get_platform(), 'macosx-10.4-%s'%(arch,))
# linux debian sarge
os.name = 'posix'
sys.version = ('2.3.5 (#1, Jul 4 2007, 17:28:59) '
'\n[GCC 4.1.2 20061115 (prerelease) (Debian 4.1.1-21)]')
sys.platform = 'linux2'
self._set_uname(('Linux', 'aglae', '2.6.21.1dedibox-r7',
'#1 Mon Apr 30 17:25:38 CEST 2007', 'i686'))
self.assertEqual(get_platform(), 'linux-i686')
# XXX more platforms to tests here
def test_convert_path(self):
# linux/mac
os.sep = '/'
def _join(path):
return '/'.join(path)
os.path.join = _join
self.assertEqual(convert_path('/home/to/my/stuff'),
'/home/to/my/stuff')
# win
os.sep = '\\'
def _join(*path):
return '\\'.join(path)
os.path.join = _join
self.assertRaises(ValueError, convert_path, '/home/to/my/stuff')
self.assertRaises(ValueError, convert_path, 'home/to/my/stuff/')
self.assertEqual(convert_path('home/to/my/stuff'),
'home\\to\\my\\stuff')
self.assertEqual(convert_path('.'),
os.curdir)
def test_change_root(self):
# linux/mac
os.name = 'posix'
def _isabs(path):
return path[0] == '/'
os.path.isabs = _isabs
def _join(*path):
return '/'.join(path)
os.path.join = _join
self.assertEqual(change_root('/root', '/old/its/here'),
'/root/old/its/here')
self.assertEqual(change_root('/root', 'its/here'),
'/root/its/here')
# windows
os.name = 'nt'
def _isabs(path):
return path.startswith('c:\\')
os.path.isabs = _isabs
def _splitdrive(path):
if path.startswith('c:'):
return ('', path.replace('c:', ''))
return ('', path)
os.path.splitdrive = _splitdrive
def _join(*path):
return '\\'.join(path)
os.path.join = _join
self.assertEqual(change_root('c:\\root', 'c:\\old\\its\\here'),
'c:\\root\\old\\its\\here')
self.assertEqual(change_root('c:\\root', 'its\\here'),
'c:\\root\\its\\here')
# BugsBunny os (it's a great os)
os.name = 'BugsBunny'
self.assertRaises(DistutilsPlatformError,
change_root, 'c:\\root', 'its\\here')
# XXX platforms to be covered: os2, mac
def test_check_environ(self):
util._environ_checked = 0
if 'HOME' in os.environ:
del os.environ['HOME']
# posix without HOME
if os.name == 'posix': # this test won't run on windows
check_environ()
import pwd
self.assertEqual(os.environ['HOME'], pwd.getpwuid(os.getuid())[5])
else:
check_environ()
self.assertEqual(os.environ['PLAT'], get_platform())
self.assertEqual(util._environ_checked, 1)
def test_split_quoted(self):
self.assertEqual(split_quoted('""one"" "two" \'three\' \\four'),
['one', 'two', 'three', 'four'])
def test_strtobool(self):
yes = ('y', 'Y', 'yes', 'True', 't', 'true', 'True', 'On', 'on', '1')
no = ('n', 'no', 'f', 'false', 'off', '0', 'Off', 'No', 'N')
for y in yes:
self.assertTrue(strtobool(y))
for n in no:
self.assertTrue(not strtobool(n))
def test_rfc822_escape(self):
header = 'I am a\npoor\nlonesome\nheader\n'
res = rfc822_escape(header)
wanted = ('I am a%(8s)spoor%(8s)slonesome%(8s)s'
'header%(8s)s') % {'8s': '\n'+8*' '}
self.assertEqual(res, wanted)
def test_dont_write_bytecode(self):
# makes sure byte_compile raise a DistutilsError
# if sys.dont_write_bytecode is True
old_dont_write_bytecode = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
self.assertRaises(DistutilsByteCompileError, byte_compile, [])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
def test_suite():
return unittest.makeSuite(UtilTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
Theer108/invenio | refs/heads/master | invenio/modules/deposit/fields/__init__.py | 13 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Init."""
from wtforms.utils import unset_value
from .abstract import *
from .author import *
from .date import *
from .doi import *
from .file_upload import *
from .issn import *
from .journal import *
from .keywords import *
from .language import *
from .notes import *
from .pages_number import *
from .publisher import *
from .record_id import *
from .title import *
from .wtformsext import *
|
mbauskar/Das_Erpnext | refs/heads/develop | erpnext/hr/doctype/department/department.py | 100 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Department(Document):
pass |
shyamalschandra/picochess | refs/heads/master | libs/requests/packages/chardet/universaldetector.py | 744 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import sys
import codecs
from .latin1prober import Latin1Prober # windows-1252
from .mbcsgroupprober import MBCSGroupProber # multi-byte character sets
from .sbcsgroupprober import SBCSGroupProber # single-byte character sets
from .escprober import EscCharSetProber # ISO-2122, etc.
import re
MINIMUM_THRESHOLD = 0.20
ePureAscii = 0
eEscAscii = 1
eHighbyte = 2
class UniversalDetector:
def __init__(self):
self._highBitDetector = re.compile(b'[\x80-\xFF]')
self._escDetector = re.compile(b'(\033|~{)')
self._mEscCharSetProber = None
self._mCharSetProbers = []
self.reset()
def reset(self):
self.result = {'encoding': None, 'confidence': 0.0}
self.done = False
self._mStart = True
self._mGotData = False
self._mInputState = ePureAscii
self._mLastChar = b''
if self._mEscCharSetProber:
self._mEscCharSetProber.reset()
for prober in self._mCharSetProbers:
prober.reset()
def feed(self, aBuf):
if self.done:
return
aLen = len(aBuf)
if not aLen:
return
if not self._mGotData:
# If the data starts with BOM, we know it is UTF
if aBuf[:3] == codecs.BOM:
# EF BB BF UTF-8 with BOM
self.result = {'encoding': "UTF-8", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_LE:
# FF FE 00 00 UTF-32, little-endian BOM
self.result = {'encoding': "UTF-32LE", 'confidence': 1.0}
elif aBuf[:4] == codecs.BOM_UTF32_BE:
# 00 00 FE FF UTF-32, big-endian BOM
self.result = {'encoding': "UTF-32BE", 'confidence': 1.0}
elif aBuf[:4] == b'\xFE\xFF\x00\x00':
# FE FF 00 00 UCS-4, unusual octet order BOM (3412)
self.result = {
'encoding': "X-ISO-10646-UCS-4-3412",
'confidence': 1.0
}
elif aBuf[:4] == b'\x00\x00\xFF\xFE':
# 00 00 FF FE UCS-4, unusual octet order BOM (2143)
self.result = {
'encoding': "X-ISO-10646-UCS-4-2143",
'confidence': 1.0
}
elif aBuf[:2] == codecs.BOM_LE:
# FF FE UTF-16, little endian BOM
self.result = {'encoding': "UTF-16LE", 'confidence': 1.0}
elif aBuf[:2] == codecs.BOM_BE:
# FE FF UTF-16, big endian BOM
self.result = {'encoding': "UTF-16BE", 'confidence': 1.0}
self._mGotData = True
if self.result['encoding'] and (self.result['confidence'] > 0.0):
self.done = True
return
if self._mInputState == ePureAscii:
if self._highBitDetector.search(aBuf):
self._mInputState = eHighbyte
elif ((self._mInputState == ePureAscii) and
self._escDetector.search(self._mLastChar + aBuf)):
self._mInputState = eEscAscii
self._mLastChar = aBuf[-1:]
if self._mInputState == eEscAscii:
if not self._mEscCharSetProber:
self._mEscCharSetProber = EscCharSetProber()
if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': self._mEscCharSetProber.get_charset_name(),
'confidence': self._mEscCharSetProber.get_confidence()}
self.done = True
elif self._mInputState == eHighbyte:
if not self._mCharSetProbers:
self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(),
Latin1Prober()]
for prober in self._mCharSetProbers:
if prober.feed(aBuf) == constants.eFoundIt:
self.result = {'encoding': prober.get_charset_name(),
'confidence': prober.get_confidence()}
self.done = True
break
def close(self):
if self.done:
return
if not self._mGotData:
if constants._debug:
sys.stderr.write('no data received!\n')
return
self.done = True
if self._mInputState == ePureAscii:
self.result = {'encoding': 'ascii', 'confidence': 1.0}
return self.result
if self._mInputState == eHighbyte:
proberConfidence = None
maxProberConfidence = 0.0
maxProber = None
for prober in self._mCharSetProbers:
if not prober:
continue
proberConfidence = prober.get_confidence()
if proberConfidence > maxProberConfidence:
maxProberConfidence = proberConfidence
maxProber = prober
if maxProber and (maxProberConfidence > MINIMUM_THRESHOLD):
self.result = {'encoding': maxProber.get_charset_name(),
'confidence': maxProber.get_confidence()}
return self.result
if constants._debug:
sys.stderr.write('no probers hit minimum threshhold\n')
for prober in self._mCharSetProbers[0].mProbers:
if not prober:
continue
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(),
prober.get_confidence()))
|
wandec/grr | refs/heads/master | config/server.py | 1 | #!/usr/bin/env python
"""Configuration parameters for the server side subsystems."""
from grr.lib import config_lib
from grr.lib import rdfvalue
# Note: Each thread adds about 8mb for stack space.
config_lib.DEFINE_integer("Threadpool.size", 50,
"Number of threads in the shared thread pool.")
config_lib.DEFINE_integer("Worker.flow_lease_time", 7200,
"Duration of a flow lease time in seconds.")
config_lib.DEFINE_integer("Worker.well_known_flow_lease_time", 600,
"Duration of a well known flow lease time in "
"seconds.")
config_lib.DEFINE_integer("Worker.compaction_lease_time", 3600,
"Duration of collections lease time for compaction "
"in seconds.")
config_lib.DEFINE_bool("Worker.enable_packed_versioned_collection_journaling",
False, "If True, all Add*() operations and all "
"compactions of PackedVersionedCollections will be "
"journaled so that these collections can be later "
"checked for integrity.")
config_lib.DEFINE_integer("Worker.queue_shards", 5,
"Queue notifications will be sharded across "
"this number of datastore subjects.")
config_lib.DEFINE_integer("Worker.notification_expiry_time", 600,
"The queue manager expires stale notifications "
"after this many seconds.")
config_lib.DEFINE_integer("Worker.notification_retry_interval", 30,
"The queue manager retries to work on requests it "
"could not complete after this many seconds.")
# We write a journal entry for the flow when it's about to be processed.
# If the journal entry is there after this time, the flow will get terminated.
config_lib.DEFINE_integer(
"Worker.stuck_flows_timeout", 60 * 60 * 6,
"Flows who got stuck in the worker for more than this time (in seconds) "
"are forcibly terminated")
config_lib.DEFINE_integer("Frontend.throttle_average_interval", 60,
"Time interval over which average request rate is "
"calculated when throttling is enabled.")
config_lib.DEFINE_list("Frontend.well_known_flows",
["TransferStore", "Stats"],
"Allow these well known flows to run directly on the "
"frontend. Other flows are scheduled as normal.")
# Smtp settings.
config_lib.DEFINE_string("Worker.smtp_server", "localhost",
"The smtp server for sending email alerts.")
config_lib.DEFINE_integer("Worker.smtp_port", 25, "The smtp server port.")
config_lib.DEFINE_bool("Worker.smtp_starttls", False,
"Enable TLS for the smtp connection.")
config_lib.DEFINE_string("Worker.smtp_user", None,
"Username for the smtp connection.")
config_lib.DEFINE_string("Worker.smtp_password", None,
"Password for the smtp connection.")
# Server Cryptographic settings.
config_lib.DEFINE_semantic(
rdfvalue.PEMPrivateKey, "PrivateKeys.ca_key",
description="CA private key. Used to sign for client enrollment.",
)
config_lib.DEFINE_semantic(
rdfvalue.PEMPrivateKey, "PrivateKeys.server_key",
description="Private key for the front end server.")
config_lib.DEFINE_semantic(
rdfvalue.RDFX509Cert, "Frontend.certificate",
description="An X509 certificate for the frontend server.")
config_lib.DEFINE_integer("ACL.cache_age", 600, "The number of seconds "
"approval objects live in the cache.")
config_lib.DEFINE_bool("Cron.active", False,
"Set to true to run a cron thread on this binary.")
config_lib.DEFINE_list("Cron.enabled_system_jobs", [],
"List of system cron jobs that will be "
"automatically scheduled on worker startup. "
"If cron jobs from this list were disabled "
"before, they will be enabled on worker "
"startup. Vice versa, if they were enabled "
"but are not specified in the list, they "
"will be disabled.")
config_lib.DEFINE_integer("ACL.approvers_required", 2,
"The number of approvers required for access.")
config_lib.DEFINE_string("Frontend.bind_address", "::",
"The ip address to bind.")
config_lib.DEFINE_integer("Frontend.bind_port", 8080, "The port to bind.")
config_lib.DEFINE_integer("Frontend.max_queue_size", 500,
"Maximum number of messages to queue for the client.")
config_lib.DEFINE_integer("Frontend.max_retransmission_time", 10,
"Maximum number of times we are allowed to "
"retransmit a request until it fails.")
config_lib.DEFINE_integer("Frontend.message_expiry_time", 600,
"Maximum time messages remain valid within the "
"system.")
# The Admin UI web application.
config_lib.DEFINE_integer("AdminUI.port", 8000, "port to listen on")
config_lib.DEFINE_string("AdminUI.bind", "::", "interface to bind to.")
config_lib.DEFINE_string(
"AdminUI.webauth_manager", "NullWebAuthManager",
"The web auth manager for controlling access to the UI.")
config_lib.DEFINE_bool("AdminUI.django_debug", True,
"Turn on to add django debugging")
config_lib.DEFINE_string(
"AdminUI.django_secret_key", "CHANGE_ME",
"This is a secret key that should be set in the server "
"config. It is used in XSRF and session protection.")
config_lib.DEFINE_list(
"AdminUI.django_allowed_hosts", ["*"],
"Set the django ALLOWED_HOSTS parameter. "
"See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts")
config_lib.DEFINE_bool("AdminUI.enable_ssl", False,
"Turn on SSL. This needs AdminUI.ssl_cert to be set.")
config_lib.DEFINE_string("AdminUI.ssl_cert_file", "",
"The SSL certificate to use.")
config_lib.DEFINE_string(
"AdminUI.ssl_key_file", None,
"The SSL key to use. The key may also be part of the cert file, in which "
"case this can be omitted.")
config_lib.DEFINE_string("AdminUI.url", "http://localhost:8000/",
"The direct external URL for the user interface.")
config_lib.DEFINE_string("AdminUI.use_precompiled_js", False,
"If True - use Closure-compiled JS bundle. This flag "
"is experimental and is not properly supported yet.")
config_lib.DEFINE_string("AdminUI.export_command",
"/usr/bin/grr_export",
"Command to show in the fileview for downloading the "
"files from the command line.")
config_lib.DEFINE_string("AdminUI.page_title",
"GRR Admin Console",
"Page title of the Admin UI.")
config_lib.DEFINE_string("AdminUI.heading", "",
"Dashboard heading displayed in the Admin UI.")
config_lib.DEFINE_string("AdminUI.report_url",
"https://github.com/google/grr/issues",
"URL of the 'Report a problem' link.")
config_lib.DEFINE_string("AdminUI.help_url",
"/help/index.html",
"URL of the 'Help' link.")
config_lib.DEFINE_string("AdminUI.github_docs_location",
"https://github.com/google/grr-doc/blob/master",
"Base path for GitHub-hosted GRR documentation. ")
config_lib.DEFINE_string("AdminUI.new_hunt_wizard.default_output_plugin",
None,
"Output plugin that will be added by default in the "
"'New Hunt' wizard output plugins selection page.")
config_lib.DEFINE_string("Server.master_watcher_class", "DefaultMasterWatcher",
"The master watcher class to use.")
config_lib.DEFINE_string(
"Rekall.profile_repository",
"https://github.com/google/rekall-profiles/raw/master",
"The repository to use when downloading Rekall profiles.")
config_lib.DEFINE_string(
"Rekall.profile_cache_urn", "aff4:/rekall_profiles",
"A cache in the aff4 space to store downloaded Rekall profiles.")
config_lib.DEFINE_string(
"Rekall.profile_server", "GRRRekallProfileServer",
"Which Rekall profile server to use.")
config_lib.DEFINE_string(
"StatsHunt.CollectionInterval", "10m",
"How often to collect the StatsHunt information from each client. The "
"minimum bound here is effectively 2 * Client.poll_max, since a new request"
" is only scheduled after results are received in the previous poll.")
config_lib.DEFINE_string(
"Server.username", None,
"System account to run as after initialization for running the server as "
"non-root.")
# Email Template Values
config_lib.DEFINE_string(
"Email.signature", "The GRR Team",
"The default signature block for template emails")
config_lib.DEFINE_string(
"Email.approval_cc_address", None,
"A single email address or comma separated list of addresses to CC on all "
"approval emails. Will be added"
" to all emails and can't be changed or removed by the user.")
config_lib.DEFINE_string(
"Email.approval_optional_cc_address", None,
"A single email address or comma separated list of addresses to CC on all "
"approval emails. The user has the option to"
" remove this CC address .")
config_lib.DEFINE_integer(
"StatsHunt.ClientBatchSize", "200",
"Batch size for client scheduling. This should be large enough that it "
"alleviates the performance impact of database roundtrips to open the "
"clients, but small enough that the threshold will be continuously reached "
"to keep the hunt running.")
config_lib.DEFINE_integer(
"StatsHunt.ClientLimit", "0",
"The number of clients to run the StatsHunt on. This is purely to "
"allow for testing when the cronjob is enabled, since it can be a "
"significant amount of traffic. This should be set to 0 once you know"
" that the server can handle it.")
config_lib.DEFINE_string("StatsStore.process_id", default="",
help="Id used to identify stats data of the current "
"process. This should be different for different GRR "
"processes. I.e. if you have 4 workers, for every "
"worker the subject should be different. For example: "
"worker_1, worker_2, worker_3, worker_4.")
config_lib.DEFINE_integer("StatsStore.write_interval", default=60,
help="Time in seconds between the dumps of stats "
"data into the stats store.")
config_lib.DEFINE_integer("StatsStore.ttl", default=60 * 60 * 24 * 3,
help="Maximum lifetime (in seconds) of data in the "
"stats store. Default is three days.")
config_lib.DEFINE_list("ConfigIncludes", [],
"List of additional config files to include. Files are "
"processed recursively depth-first, later values "
"override earlier ones.")
|
Th3R3p0/Nosql-Exploitation-Framework | refs/heads/master | coreconfigure.py | 1 | #!/usr/bin/python
#Core Configuration File For Commands and Options
import argparse
import dbattacks
from dbattacks import mongoattacks
from dbattacks import couchattacks
from dbattacks import redisattacks
from dbattacks import hbaseattacks
from dbattacks import cassattacks
from dbattacks import utils
import webattacks
import pymongo
import sys
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
logging.getLogger("ctype.runtime").setLevel(logging.ERROR)
from sniff import sniffredis
from sniff import sniffmongo
from sniff import sniffcouch
from termcolor import colored
def Config(args):
global available
global target,port # Needed to modify global copy of globvar
global user,passw
global mas
global file_name
global db_select
global column_select
global post_status
global creds
global dump
global specify_params
global paramcheck
global select
global db
global limit
global conn
mas=False
paramcheck=[]
specify_params=[]
available=['mongo','couch','redis']
post_status=False
target = args['ip']
port = args['port']
url=args['webapp']
seldb=args['enum']
filename=args['file']
try:
# Checks whether Host is up
if args['ip']:
utils.host_up(target)
# Credentials
screen=args['screen'] if args['screen'] else False
creds=args['auth'] if args['auth'] else False
authall=args['authall'] if args['authall'] else False
mass=args['mass'] if args['mass'] else False
db=args['db'] if args['db'] else 'admin'
column_select=args['c'] if args['c'] else False
dump=True if args['dump'] else False
post_status=True if args['post'] else False
limit=int(args['limit']) if args['limit'] else 0
write=args['write'] if args['write'] else False
#Scan for General DB Targets
if args['scan']:
utils.scan_target(target)
#Web Attacks
## This is argument is not working correctly - Need to fix - th3r3p0
#if args['url']:
# seldb=args['webapp'] if args['webapp'] else False
# if seldb == 'mongo':
# filename=['payload/js_inject.txt','payload/js_time']
#Dictionary Attacks
if args['dict']:
seldb=args['dict']
if args['file']:
if seldb=='mongo':
if args['port'] or args['db']:
pass
else:
port=27017
db='admin'
#mongoattacks.mongo_web_interface(target,port,creds,screen)
mongoattacks.dict_mongo(filename,target,port,db)
elif seldb=='couch':
if args['port']:
pass
else:
port=5984
couchattacks.dict_couch(filename,target,port)
elif seldb=='redis':
if args['port']:
pass
else:
port=6379
redisattacks.dict_redis(filename,target,port)
else:
print colored("[-] Specify File Name",'red')
#Enumeration Check
if args['enum']:
seldb=args['enum']
if seldb=='mongo':
if port:
pass
else:
port = 27017
#mongo_web_scan(target)
try:
conn = mongoattacks.mongo_conn(target,port,mass)
mongoattacks.mongo_enum(conn,creds,authall,db,column_select,dump,limit,write)
except Exception as e:
print colored(e,'red')
elif seldb=='couch':
if port:
pass
else:
port = 5984
try:
#print post_status
if db=='admin':
db=False
couch=couchattacks.couch_conn(target,port)
couchattacks.couch_enum(couch,target,port,creds,db,column_select,post_status)
except Exception as e:
print str(e)
print colored("[-] Enumeration Failed \n",'red')
elif seldb=='redis':
if port:
pass
else:
port = 6379
try:
r_server=redisattacks.redis_conn(target,port)
redisattacks.redis_enum(r_server,creds)
except Exception as e:
print colored(e,'red')
elif seldb == 'cassandra':
if port:
pass
else:
port = 9160
creds=False
if db=='admin':
db=False
cassattacks.cassa_enum(target,port,db,dump)
elif seldb == 'hbase':
if port:
pass
else:
port = 8080
hbaseattacks.hbase_enum(port)
else:
print colored("[-] No Support for the Specified DB",'red')
# Mass Scan Settings
if args['mass'] in available :
select=args['mass']
if args['file']:
mas=True
mass_scan(args['mass'],args['file'])
else:
print colored("[-] Plse specify File name \n",'red')
#Database Select (Currently available for Mongo,Couch)
if args['db']:
db_select = args['db']
column_select = args['c']
else:
db_select=""
if args['post'] == 'enable':
post_status=True
if args['param']:
paramcheck=args['param']
specify_params=paramcheck.split(',')
else:
specify_params=""
specify_params=args['param']
# Scans for WebAPP Attacks
if args['webapp']:
webattacks.nosqlweb.attack(url)
#Redis DOS (2.6+)
if args['exhaust']:
if port:
pass
else:
port=6379
redisattacks.redis_exhaust(target,port)
#Redis RCE Check
if args['remotecheck']:
if port:
pass
else:
port=6379
redisattacks.redis_rce(target,port)
#Redis File Enumeration Check
if args['filecheck']:
filename=args['filecheck']
if port:
pass
else:
port=6379
redisattacks.redis_file_enum(filename,target,port,creds)
#Shodan IP Grabber
if args['shodan']:
utils.shodan_frame(args['shodan'])
#Sniffing Module
if args['sniff']=='mongo':
sniffmongo.sniff_mongo()
if args['sniff']=='redis':
sniffredis.sniff_redis()
if args['sniff']=='couch':
sniffcouch.sniff_couch()
#Clone Database Currently Available for Mongo,Couch and Redis
if args['clone'] == 'couch':
couchattacks.clone_couch(target)
if args['clone'] == 'redis':
redisattacks.clone_redis(target)
except KeyboardInterrupt:
print colored("[-] Cntrl+C Shutting Down",'red')
sys.exit(0)
|
Kryz/sentry | refs/heads/master | src/sentry/search/models.py | 36 | """
sentry.search.models
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
|
TheUKDave/secret_santa | refs/heads/master | secret_santa/urls.py | 1 | from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('santa.urls'), name='santa'),
]
|
charlesvdv/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/pytest/_pytest/_code/_py2traceback.py | 192 | # copied from python-2.7.3's traceback.py
# CHANGES:
# - some_str is replaced, trying to create unicode strings
#
import types
def format_exception_only(etype, value):
"""Format the exception part of a traceback.
The arguments are the exception type and value such as given by
sys.last_type and sys.last_value. The return value is a list of
strings, each ending in a newline.
Normally, the list contains a single string; however, for
SyntaxError exceptions, it contains several lines that (when
printed) display detailed information about where the syntax
error occurred.
The message indicating which exception occurred is always the last
string in the list.
"""
# An instance should not have a meaningful value parameter, but
# sometimes does, particularly for string exceptions, such as
# >>> raise string1, string2 # deprecated
#
# Clear these out first because issubtype(string1, SyntaxError)
# would throw another exception and mask the original problem.
if (isinstance(etype, BaseException) or
isinstance(etype, types.InstanceType) or
etype is None or type(etype) is str):
return [_format_final_exc_line(etype, value)]
stype = etype.__name__
if not issubclass(etype, SyntaxError):
return [_format_final_exc_line(stype, value)]
# It was a syntax error; show exactly where the problem was found.
lines = []
try:
msg, (filename, lineno, offset, badline) = value.args
except Exception:
pass
else:
filename = filename or "<string>"
lines.append(' File "%s", line %d\n' % (filename, lineno))
if badline is not None:
if isinstance(badline, bytes): # python 2 only
badline = badline.decode('utf-8', 'replace')
lines.append(u' %s\n' % badline.strip())
if offset is not None:
caretspace = badline.rstrip('\n')[:offset].lstrip()
# non-space whitespace (likes tabs) must be kept for alignment
caretspace = ((c.isspace() and c or ' ') for c in caretspace)
# only three spaces to account for offset1 == pos 0
lines.append(' %s^\n' % ''.join(caretspace))
value = msg
lines.append(_format_final_exc_line(stype, value))
return lines
def _format_final_exc_line(etype, value):
"""Return a list of a single line -- normal case for format_exception_only"""
valuestr = _some_str(value)
if value is None or not valuestr:
line = "%s\n" % etype
else:
line = "%s: %s\n" % (etype, valuestr)
return line
def _some_str(value):
try:
return unicode(value)
except Exception:
try:
return str(value)
except Exception:
pass
return '<unprintable %s object>' % type(value).__name__
|
MediaKraken/MediaKraken_Deployment | refs/heads/master | source/database_async/db_base_media_tv_async.py | 1 | import inspect
from common import common_logging_elasticsearch_httpx
async def db_media_tv_list(self, genre_type=None, list_limit=None,
group_collection=False, offset=0, search_value=None,
db_connection=None):
"""
# grab tv data
"""
await common_logging_elasticsearch_httpx.com_es_httpx_post_async(message_type='info',
message_text={
'function':
inspect.stack()[0][
3],
'locals': locals(),
'caller':
inspect.stack()[1][
3]})
if db_connection is None:
db_conn = self.db_connection
else:
db_conn = db_connection
if search_value is not None:
return await db_conn.fetch('select mm_metadata_tvshow_name,'
' mm_metadata_tvshow_guid,'
' count(*) as mm_count,'
' COALESCE(mm_metadata_tvshow_localimage_json'
'->\'Images\'->\'tvmaze\'->>\'Poster\','
' mm_metadata_tvshow_localimage_json'
'->\'Images\'->\'thetvdb\'->>\'Poster\')'
' from mm_metadata_tvshow,'
' mm_media where mm_media_metadata_guid'
' = mm_metadata_tvshow_guid'
' and mm_metadata_tvshow_name % $1'
' group by mm_metadata_tvshow_guid'
' order by LOWER(mm_metadata_tvshow_name)'
' offset $2 limit $3', search_value,
offset, list_limit)
else:
return await db_conn.fetch('select mm_metadata_tvshow_name,'
' mm_metadata_tvshow_guid,'
' count(*) as mm_count,'
' COALESCE(mm_metadata_tvshow_localimage_json'
'->\'Images\'->\'tvmaze\'->>\'Poster\','
' mm_metadata_tvshow_localimage_json'
'->\'Images\'->\'thetvdb\'->>\'Poster\')'
' from mm_metadata_tvshow,'
' mm_media where mm_media_metadata_guid'
' = mm_metadata_tvshow_guid'
' group by mm_metadata_tvshow_guid'
' order by LOWER(mm_metadata_tvshow_name)'
' offset $1 limit $2',
offset, list_limit)
async def db_media_tv_list_count(self, genre_type=None, group_collection=False,
search_value=None, db_connection=None):
"""
# grab tv data count
"""
await common_logging_elasticsearch_httpx.com_es_httpx_post_async(message_type='info',
message_text={
'function':
inspect.stack()[0][
3],
'locals': locals(),
'caller':
inspect.stack()[1][
3]})
if db_connection is None:
db_conn = self.db_connection
else:
db_conn = db_connection
sql_data = await db_conn.fetch('select count(*) from mm_metadata_tvshow, mm_media'
' where mm_media_metadata_guid'
' = mm_metadata_tvshow_guid')
if sql_data is None:
return 0
return len(sql_data)
|
arderyp/scotuswebcites | refs/heads/master | scotuswebcites/__init__.py | 12133432 | |
magic0704/oslo.db | refs/heads/master | oslo_db/sqlalchemy/migration_cli/__init__.py | 12133432 | |
JackDanger/sentry | refs/heads/master | tests/sentry/tasks/test_clear_expired_snoozes.py | 9 | from __future__ import absolute_import
from datetime import timedelta
from django.utils import timezone
from sentry.models import Group, GroupSnooze, GroupStatus
from sentry.tasks.clear_expired_snoozes import clear_expired_snoozes
from sentry.testutils import TestCase
class ClearExpiredSnoozesTest(TestCase):
def test_task_persistent_name(self):
assert clear_expired_snoozes.name == 'sentry.tasks.clear_expired_snoozes'
def test_simple(self):
group1 = self.create_group(
status=GroupStatus.IGNORED,
)
GroupSnooze.objects.create(
group=group1,
until=timezone.now() - timedelta(minutes=1),
)
group2 = self.create_group(
status=GroupStatus.IGNORED,
)
GroupSnooze.objects.create(
group=group2,
until=timezone.now() + timedelta(minutes=1),
)
clear_expired_snoozes()
assert Group.objects.get(
id=group1.id,
).status == GroupStatus.UNRESOLVED
assert Group.objects.get(
id=group2.id,
).status == GroupStatus.IGNORED
|
pedrobaeza/OpenUpgrade | refs/heads/8.0 | addons/l10n_in_hr_payroll/report/__init__.py | 424 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_payslip_details
import report_payroll_advice
import report_hr_salary_employee_bymonth
import payment_advice_report
import report_hr_yearly_salary_detail
import payslip_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kazeeki/pipe2py | refs/heads/master | pipe2py/twisted/collections.py | 1 | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
pipe2py.twisted.collections
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides methods for creating asynchronous pipe2py pipes
"""
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
from twisted.internet.defer import inlineCallbacks, returnValue
from pipe2py.modules.pipeforever import asyncPipeForever
from pipe2py.lib.collections import PyPipe, PyCollection
class AsyncPipe(PyPipe):
"""An asynchronous PyPipe object"""
def __init__(self, name=None, context=None, **kwargs):
super(AsyncPipe, self).__init__(name, context)
self.pipe_input = kwargs.pop('input', asyncPipeForever())
self.pipeline = getattr(self.module, 'asyncPipe%s' % self.name.title())
self.kwargs = kwargs
@property
@inlineCallbacks
def list(self):
output = yield self.output
returnValue(list(output))
def pipe(self, name, **kwargs):
return AsyncPipe(name, self.context, input=self.output, **kwargs)
def loop(self, name, **kwargs):
embed = AsyncPipe(name, self.context).pipeline
return self.pipe('loop', embed=embed, **kwargs)
class AsyncCollection(PyCollection):
"""An asynchronous PyCollection object"""
@inlineCallbacks
def asyncFetchAll(self):
"""Fetch all source urls"""
src_pipes = self.gen_pipes(AsyncPipe)
first_pipe = src_pipes.next()
kwargs = self.make_kwargs(src_pipes)
if kwargs:
kwargs.update({'input': first_pipe})
result = yield AsyncPipe('union', **kwargs).output
else:
result = yield first_pipe
returnValue(result)
|
edmorley/treeherder | refs/heads/master | treeherder/services/elasticsearch/utils.py | 2 |
def dict_to_op(d, index_name, doc_type, op_type='index'):
"""
Create a bulk-indexing operation from the given dictionary.
"""
if d is None:
return d
op_types = ('create', 'delete', 'index', 'update')
if op_type not in op_types:
msg = 'Unknown operation type "{}", must be one of: {}'
raise Exception(msg.format(op_type, ', '.join(op_types)))
if 'id' not in d:
raise Exception('"id" key not found')
operation = {
'_op_type': op_type,
'_index': index_name,
'_type': doc_type,
'_id': d.pop('id'),
}
operation.update(d)
return operation
def to_dict(obj):
"""
Create a filtered dict from the given object.
Note: This function is currently specific to the FailureLine model.
"""
if not isinstance(obj.test, str):
# TODO: can we handle this in the DB?
# Reftests used to use tuple indicies, which we can't support.
# This is fixed upstream, but we also need to handle it here to allow
# for older branches.
return
keys = [
'id',
'job_guid',
'test',
'subtest',
'status',
'expected',
'message',
'best_classification',
'best_is_verified',
]
all_fields = obj.to_dict()
return {k: v for k, v in all_fields.items() if k in keys}
|
Johnetordoff/osf.io | refs/heads/develop | api_tests/brands/__init__.py | 12133432 | |
umitproject/site-status | refs/heads/master | django/contrib/localflavor/sk/__init__.py | 12133432 | |
stephen-soltesz/collectd-mlab | refs/heads/master | site-packages/mlab/disco/__init__.py | 12133432 | |
boomsbloom/dtm-fmri | refs/heads/master | DTM/for_gensim/lib/python2.7/site-packages/pandas/io/auth.py | 7 | from __future__ import print_function
# see LICENSES directory for copyright and license
import os
import sys
import logging
import httplib2
import apiclient.discovery as gapi
import gflags
import oauth2client.file as auth_file
import oauth2client.client as oauth
import oauth2client.tools as tools
OOB_CALLBACK_URN = oauth.OOB_CALLBACK_URN
class AuthenticationConfigError(ValueError):
pass
FLOWS = {}
FLAGS = gflags.FLAGS
DEFAULT_SECRETS = os.path.join(
os.path.dirname(__file__), 'client_secrets.json')
DEFAULT_SCOPE = 'https://www.googleapis.com/auth/analytics.readonly'
DEFAULT_TOKEN_FILE = os.path.join(os.path.dirname(__file__), 'analytics.dat')
MISSING_CLIENT_MSG = """
WARNING: Please configure OAuth 2.0
You need to populate the client_secrets.json file found at:
%s
with information from the APIs Console
<https://console.developers.google.com/iam-admin/projects>.
"""
DOC_URL = ('https://developers.google.com/api-client-library/python/guide/'
'aaa_client_secrets')
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
# Name of file that will store the access and refresh tokens to access
# the API without having to login each time. Make sure this file is in
# a secure place.
def process_flags(flags=None):
"""Uses the command-line flags to set the logging level.
Args:
argv: List of command line arguments passed to the python script.
"""
if flags is None:
flags = []
# Let the gflags module process the command-line arguments.
try:
FLAGS(flags)
except gflags.FlagsError as e:
print('%s\nUsage: %s ARGS\n%s' % (e, str(flags), FLAGS))
sys.exit(1)
# Set the logging according to the command-line flag.
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
def get_flow(secret, scope, redirect):
"""
Retrieve an authentication flow object based on the given
configuration in the secret file name, the authentication scope,
and a redirect URN
"""
key = (secret, scope, redirect)
flow = FLOWS.get(key, None)
if flow is None:
msg = MISSING_CLIENT_MSG % secret
if not os.path.exists(secret):
raise AuthenticationConfigError(msg)
flow = oauth.flow_from_clientsecrets(secret, scope,
redirect_uri=redirect,
message=msg)
FLOWS[key] = flow
return flow
def make_token_store(fpath=None):
"""create token storage from give file name"""
if fpath is None:
fpath = DEFAULT_TOKEN_FILE
return auth_file.Storage(fpath)
def authenticate(flow, storage=None):
"""
Try to retrieve a valid set of credentials from the token store if possible
Otherwise use the given authentication flow to obtain new credentials
and return an authenticated http object
Parameters
----------
flow : authentication workflow
storage: token storage, default None
"""
http = httplib2.Http()
# Prepare credentials, and authorize HTTP object with them.
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run(flow, storage)
http = credentials.authorize(http)
return http
def init_service(http):
"""
Use the given http object to build the analytics service object
"""
return gapi.build('analytics', 'v3', http=http)
def reset_default_token_store():
import os
os.remove(DEFAULT_TOKEN_FILE)
|
edx-solutions/edx-platform | refs/heads/master | lms/djangoapps/verify_student/tests/test_services.py | 4 | # -*- coding: utf-8 -*-
"""
Tests for the service classes in verify_student.
"""
import ddt
from django.conf import settings
from mock import patch
from lms.djangoapps.verify_student.models import ManualVerification, SoftwareSecurePhotoVerification, SSOVerification
from lms.djangoapps.verify_student.services import IDVerificationService
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
FAKE_SETTINGS = {
"DAYS_GOOD_FOR": 10,
}
@patch.dict(settings.VERIFY_STUDENT, FAKE_SETTINGS)
@ddt.ddt
class TestIDVerificationService(ModuleStoreTestCase):
"""
Tests for IDVerificationService.
"""
def test_user_is_verified(self):
"""
Test to make sure we correctly answer whether a user has been verified.
"""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
attempt.save()
# If it's any of these, they're not verified...
for status in ["created", "ready", "denied", "submitted", "must_retry"]:
attempt.status = status
attempt.save()
self.assertFalse(IDVerificationService.user_is_verified(user), status)
attempt.status = "approved"
attempt.save()
self.assertTrue(IDVerificationService.user_is_verified(user), attempt.status)
def test_user_has_valid_or_pending(self):
"""
Determine whether we have to prompt this user to verify, or if they've
already at least initiated a verification submission.
"""
user = UserFactory.create()
attempt = SoftwareSecurePhotoVerification(user=user)
# If it's any of these statuses, they don't have anything outstanding
for status in ["created", "ready", "denied"]:
attempt.status = status
attempt.save()
self.assertFalse(IDVerificationService.user_has_valid_or_pending(user), status)
# Any of these, and we are. Note the benefit of the doubt we're giving
# -- must_retry, and submitted both count until we hear otherwise
for status in ["submitted", "must_retry", "approved"]:
attempt.status = status
attempt.save()
self.assertTrue(IDVerificationService.user_has_valid_or_pending(user), status)
def test_user_status(self):
# test for correct status when no error returned
user = UserFactory.create()
status = IDVerificationService.user_status(user)
expected_status = {'status': 'none', 'error': '', 'should_display': True, 'verification_expiry': ''}
self.assertDictEqual(status, expected_status)
# test for when photo verification has been created
SoftwareSecurePhotoVerification.objects.create(user=user, status='approved')
status = IDVerificationService.user_status(user)
expected_status = {'status': 'approved', 'error': '', 'should_display': True, 'verification_expiry': ''}
self.assertDictEqual(status, expected_status)
# create another photo verification for the same user, make sure the denial
# is handled properly
SoftwareSecurePhotoVerification.objects.create(
user=user, status='denied', error_msg='[{"photoIdReasons": ["Not provided"]}]'
)
status = IDVerificationService.user_status(user)
expected_status = {
'status': 'must_reverify', 'error': ['id_image_missing'], 'should_display': True, 'verification_expiry': ''
}
self.assertDictEqual(status, expected_status)
# test for when sso verification has been created
SSOVerification.objects.create(user=user, status='approved')
status = IDVerificationService.user_status(user)
expected_status = {'status': 'approved', 'error': '', 'should_display': False, 'verification_expiry': ''}
self.assertDictEqual(status, expected_status)
# create another sso verification for the same user, make sure the denial
# is handled properly
SSOVerification.objects.create(user=user, status='denied')
status = IDVerificationService.user_status(user)
expected_status = {'status': 'must_reverify', 'error': '', 'should_display': False, 'verification_expiry': ''}
self.assertDictEqual(status, expected_status)
# test for when manual verification has been created
ManualVerification.objects.create(user=user, status='approved')
status = IDVerificationService.user_status(user)
expected_status = {'status': 'approved', 'error': '', 'should_display': False, 'verification_expiry': ''}
self.assertDictEqual(status, expected_status)
@ddt.unpack
@ddt.data(
{'enrollment_mode': 'honor', 'status': None, 'output': 'N/A'},
{'enrollment_mode': 'audit', 'status': None, 'output': 'N/A'},
{'enrollment_mode': 'verified', 'status': False, 'output': 'Not ID Verified'},
{'enrollment_mode': 'verified', 'status': True, 'output': 'ID Verified'},
)
def test_verification_status_for_user(self, enrollment_mode, status, output):
"""
Verify verification_status_for_user returns correct status.
"""
user = UserFactory.create()
CourseFactory.create()
with patch(
'lms.djangoapps.verify_student.services.IDVerificationService.user_is_verified'
) as mock_verification:
mock_verification.return_value = status
status = IDVerificationService.verification_status_for_user(user, enrollment_mode)
self.assertEqual(status, output)
def test_get_verified_user_ids(self):
"""
Tests for getting users that are verified.
"""
user_a = UserFactory.create()
user_b = UserFactory.create()
user_c = UserFactory.create()
user_unverified = UserFactory.create()
user_denied = UserFactory.create()
SoftwareSecurePhotoVerification.objects.create(user=user_a, status='approved')
ManualVerification.objects.create(user=user_b, status='approved')
SSOVerification.objects.create(user=user_c, status='approved')
SSOVerification.objects.create(user=user_denied, status='denied')
verified_user_ids = set(IDVerificationService.get_verified_user_ids([
user_a, user_b, user_c, user_unverified, user_denied
]))
expected_user_ids = {user_a.id, user_b.id, user_c.id}
self.assertEqual(expected_user_ids, verified_user_ids)
|
FrozenPigs/Taigabot | refs/heads/master | plugins/gelbooru.py | 1 | import random
import re
from util import hook, web
from utilities import request
gelbooru_cache = []
gb_lastsearch = ''
def gb_refresh_cache(inp):
global gelbooru_cache
gelbooru_cache = []
num = 0
search = (
inp.replace(' ', '+').replace('explicit', 'rating:explicit').replace(
'nsfw', 'rating:explicit').replace('safe', 'rating:safe').replace('sfw', 'rating:safe'))
posts = request.get_json(
u'https://gelbooru.com/index.php?page=dapi&s=post&q=index&limit=20&json=1',
params={'tags': search})
while num < len(posts):
gelbooru_cache.append((
posts[num].get('id'),
posts[num].get('score'),
posts[num].get('file_url'),
posts[num].get('rating'),
posts[num].get('tags'),
))
num += 1
random.shuffle(gelbooru_cache)
return
#@hook.command('sb', autohelp=False)
@hook.command('gb', autohelp=False)
@hook.command('loli', autohelp=False)
@hook.command('shota', autohelp=False)
@hook.command('trap', autohelp=False)
@hook.command('futa', autohelp=False)
@hook.command('futanari', autohelp=False)
@hook.command(autohelp=False)
def gelbooru(inp, reply=None, input=None):
"gelbooru <tags> -- Gets a random image from gelbooru.com"
global gb_lastsearch
global gelbooru_cache
inp = inp.split(' ')
filetype = inp[-1]
filetypes = ['png', 'jpg', 'jpeg']
if filetype not in filetypes:
filetype = None
try:
inp.pop(inp.index(filetype))
except ValueError:
pass
if len(inp) >= 2:
inp = ' '.join(inp)
else:
inp = ''.join(inp)
if input.trigger == u'loli':
search = 'loli' + '+' + inp.lower()
elif input.trigger == u'shota':
search = 'shota' + '+' + inp.lower()
elif input.trigger == u'futa' or input.trigger == u'futanari':
search = 'futanari' + '+' + inp.lower()
elif input.trigger == u'trap':
search = 'trap' + '+' + inp.lower()
else:
search = inp.lower()
search = search.split(' ')
for i, n in enumerate(search):
if n == u'gif':
search[i] = 'animated_gif'
if len(search) >= 2:
search = ' '.join(search)
else:
search = ''.join(search)
if not search in gb_lastsearch or len(gelbooru_cache) < 2:
gb_refresh_cache(search)
gb_lastsearch = search
if len(gelbooru_cache) == 0:
reply('No results')
return
id, score, url, rating, tags = gelbooru_cache.pop()
if filetype:
counter = 0
while not url.endswith(filetype):
try:
if counter == 5:
reply('No results')
return
id, score, url, rating, tags = gelbooru_cache.pop()
except IndexError:
counter += 1
gb_refresh_cache(search)
if rating == 'e':
rating = "\x02\x034NSFW\x03\x02"
elif rating == 'q':
rating = "\x02\x037Questionable\x03\x02"
elif rating == 's':
rating = "\x02\x033Safe\x03\x02"
try:
return u'\x02[{}]\x02 Score: \x02{}\x02 - Rating: {} - {}'.format(
id, score, rating, web.isgd(url))
except:
return u'\x02[{}]\x02 Score: \x02{}\x02 - Rating: {} - {}'.format(id, score, rating, url)
# return u'\x02[{}]\x02 Score: \x02{}\x02 - Rating: {} - {} - {}'.format(id, score, rating, url, tags[:75].strip())
# shows website title, just let urls.py handle it
# gelbooru_list_re = (r'(.+gelbooru.com/.+list&tags.+)', re.I)
# @hook.regex(*gelbooru_list_re)
# def gelbooru_list_url(match):
# soup = http.get_soup(match.group(1))
# return u'{}'.format(soup.find('title').text)
gelbooru_re = (r'(?:gelbooru.com.*?id=)([-_a-zA-Z0-9]+)', re.I)
@hook.regex(*gelbooru_re)
def gelbooru_url(match):
posts = request.get_json(
'https://gelbooru.me/index.php?page=dapi&s=post&q=index&limit=1&id={}&json=1'.format(
match.group(1)))
id, score, url, rating, tags = (
posts[0].get('id'),
posts[0].get('score'),
posts[0].get('file_url'),
posts[0].get('rating'),
posts[0].get('tags'),
)
if rating == 'e':
rating = "\x02\x034NSFW\x03\x02"
elif rating == 'q':
rating = "\x02\x037Questionable\x03\x02"
elif rating == 's':
rating = "\x02\x033Safe\x03\x02"
return u'\x02[{}]\x02 Score: \x02{}\x02 - Rating: {} - {} - {}'.format(
id, score, rating, url, tags[:75].strip())
|
indictranstech/biggift-erpnext | refs/heads/develop | erpnext/buying/report/supplier_addresses_and_contacts/__init__.py | 12133432 | |
baffolobill/django-lfs | refs/heads/master | lfs/utils/templatetags/__init__.py | 12133432 | |
allmende/synnefo | refs/heads/develop | snf-cyclades-app/synnefo/helpdesk/models.py | 12133432 | |
porcobosso/spark-ec2 | refs/heads/master | lib/boto-2.34.0/tests/integration/kinesis/__init__.py | 12133432 | |
cloud-ark/cloudark | refs/heads/master | server/server_plugins/aws/resource/__init__.py | 12133432 | |
jinverar/crits | refs/heads/master | crits/objects/__init__.py | 12133432 | |
daenamkim/ansible | refs/heads/devel | lib/ansible/modules/notification/hall.py | 29 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Billy Kimble <basslines@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
module: hall
short_description: Send notification to Hall
description:
- "The C(hall) module connects to the U(https://hall.com) messaging API and allows you to deliver notication messages to rooms."
version_added: "2.0"
author: Billy Kimble (@bkimble) <basslines@gmail.com>
options:
room_token:
description:
- "Room token provided to you by setting up the Ansible room integation on U(https://hall.com)"
required: true
msg:
description:
- The message you wish to deliver as a notification
required: true
title:
description:
- The title of the message
required: true
picture:
description:
- >
The full URL to the image you wish to use for the Icon of the message. Defaults to
U(http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627)
required: false
"""
EXAMPLES = """
- name: Send Hall notifiation
hall:
room_token: <hall room integration token>
title: Nginx
msg: 'Created virtual host file on {{ inventory_hostname }}'
delegate_to: loclahost
- name: Send Hall notification if EC2 servers were created.
hall:
room_token: <hall room integration token>
title: Server Creation
msg: 'Created instance {{ item.id }} of type {{ item.instance_type }}.\\nInstance can be reached at {{ item.public_ip }} in the {{ item.region }} region.'
delegate_to: loclahost
when: ec2.instances|length > 0
with_items: '{{ ec2.instances }}'
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
HALL_API_ENDPOINT = 'https://hall.com/api/1/services/generic/%s'
def send_request_to_hall(module, room_token, payload):
headers = {'Content-Type': 'application/json'}
payload=module.jsonify(payload)
api_endpoint = HALL_API_ENDPOINT % (room_token)
response, info = fetch_url(module, api_endpoint, data=payload, headers=headers)
if info['status'] != 200:
secure_url = HALL_API_ENDPOINT % ('[redacted]')
module.fail_json(msg=" failed to send %s to %s: %s" % (payload, secure_url, info['msg']))
def main():
module = AnsibleModule(
argument_spec=dict(
room_token=dict(type='str', required=True),
msg=dict(type='str', required=True),
title=dict(type='str', required=True),
picture=dict(type='str',
default='http://cdn2.hubspot.net/hub/330046/file-769078210-png/Official_Logos/ansible_logo_black_square_small.png?t=1421076128627'),
)
)
room_token = module.params['room_token']
message = module.params['msg']
title = module.params['title']
picture = module.params['picture']
payload = {'title': title, 'message': message, 'picture': picture}
send_request_to_hall(module, room_token, payload)
module.exit_json(msg="OK")
if __name__ == '__main__':
main()
|
mbartling/TAMU_senior_design | refs/heads/master | mavlink/cmake/arkcmake/updateArkcmake.py | 24 | #!/usr/bin/python
# Author: Lenna X. Peterson (github.com/lennax)
# Based on bash script by James Goppert (github.com/jgoppert)
#
# script used to update cmake modules from git repo, can't make this
# a submodule otherwise it won't know how to interpret the CMakeLists.txt
# # # # # # subprocess# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import os # for os.path
import subprocess # for check_call()
clone_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print clone_path
os.chdir(clone_path)
subprocess.check_call(["git", "clone", "git://github.com/arktools/arkcmake.git","arkcmake_tmp"])
subprocess.check_call(["rm", "-rf", "arkcmake_tmp/.git"])
if os.path.isdir("arkcmake"):
subprocess.check_call(["rm", "-rf", "arkcmake"])
subprocess.check_call(["mv", "arkcmake_tmp", "arkcmake"])
|
ProfessionalIT/professionalit-webiste | refs/heads/master | sdk/google_appengine/lib/django-1.4/django/contrib/sitemaps/management/__init__.py | 12133432 | |
alexsmx/djangoAppengineSrcTemplate | refs/heads/master | __init__.py | 12133432 | |
rohitwaghchaure/alec_frappe5_erpnext | refs/heads/develop | erpnext/accounts/doctype/fiscal_year_company/__init__.py | 12133432 | |
edmorley/django | refs/heads/master | tests/servers/another_app/__init__.py | 12133432 | |
anksp21/Community-Zenpacks | refs/heads/master | ZenPacks.community.Fedora/ZenPacks/community/Fedora/modeler/__init__.py | 12133432 | |
mbauskar/omnitech-demo-erpnext | refs/heads/develop | erpnext/patches/v5_0/update_companywise_payment_account.py | 120 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc('accounts', 'doctype', 'mode_of_payment')
frappe.reload_doc('accounts', 'doctype', 'mode_of_payment_account')
mode_of_payment_list = frappe.db.sql("""select name, default_account
from `tabMode of Payment`""", as_dict=1)
for d in mode_of_payment_list:
if d.get("default_account"):
parent_doc = frappe.get_doc("Mode of Payment", d.get("name"))
parent_doc.set("accounts",
[{"company": frappe.db.get_value("Account", d.get("default_account"), "company"),
"default_account": d.get("default_account")}])
parent_doc.save()
|
CaptFrank/NetworkDeviceMonitor | refs/heads/master | NetworkMonitor/Base/Node.py | 1 | """
:Node:
==========
:description:
:copyright: (c) 2015-10-23 by francispapineau.
:license: BSD, see LICENSE for more details.
Author: francispapineau
Version: :version: #TODO
Date: 2015-10-23
"""
"""
=============================================
Imports
=============================================
"""
"""
=============================================
Constants
=============================================
"""
# Program Attributes
__author__ = "francispapineau"
__version__ = "" # TODO
__date__ = "2015-10-23"
"""
=============================================
Variables
=============================================
"""
"""
=============================================
Source
=============================================
"""
# Start logger
class Node(object):
def __init__(self):
# register publisher object
"""
def main():
logging.basicConfig(level=logging.DEBUG)
q = multiprocessing.Queue()
# Connect to localhost:5672 as guest with the password guest and virtual host "/" (%2F)
example = NodePublisher('amqp://test:test@localhost:5672/%2F?connection_attempts=3&heartbeat_interval=3600',
q)
try:
example.start()
for i in range(0,100):
q.put({str(i) : str(i)})
except KeyboardInterrupt:
example.stop()
if __name__ == '__main__':
main()
"""
# Start the Logstash Engine
return
def generate_password(self):
import getpass
getpass.AskPassword()
#encode 64 |
jdmonaco/grid-remapping-model | refs/heads/master | src/analysis/scan.py | 1 | #encoding: utf-8
"""
grid.analysis.scan -- AbstractAnalysis subclass for exploring statistics of spatial
map properties by scanning a single parameter with even sampling.
Written by Joe Monaco, 05/14/2008.
Copyright (c) 2008 Columbia University. All rights reserved.
"""
# Library imports
import numpy as N, scipy as S, os
# Package imports
from .. import PlaceNetworkStd, CheckeredRatemap, GridCollection
from ..core.analysis import AbstractAnalysis
from ..tools.string import snake2title
# Traits imports
from enthought.traits.api import Enum, Button
from enthought.traits.ui.api import View, Group, Item, Include
# Chaco imports for custom analysis view
from enthought.chaco.api import (ArrayPlotData, ArrayDataSource, Plot,
GridContainer)
from enthought.enable.component_editor import ComponentEditor
# Tuples of available placemap data
STAGE_DATA = ('sparsity', 'stage_coverage', 'stage_repr', 'peak_rate')
UNIT_DATA = ('max_rate', 'num_fields', 'coverage')
FIELD_DATA = ('area', 'diameter', 'peak', 'average')
DATA = STAGE_DATA + UNIT_DATA + FIELD_DATA
class BaseScan(AbstractAnalysis):
label = 'Stat Scan'
save_current_plot = Button
traits_view = \
View(
Item('figure', show_label=False, editor=ComponentEditor()),
title='Network Population Scan',
kind='live',
resizable=True,
width=0.75,
height=0.75,
buttons=['Cancel', 'OK'])
def collect_data(self, ntrials=10, npoints=4, param='J0', bounds=(1, 4),
**kwargs):
raise NotImplementedError
def create_plots(self):
"""Create a simple 2D image plot of the parameter sweep"""
# Figure is horizontal container for main plot + colorbar
self.figure = \
container = GridContainer(fill_padding=True, spacing=(5,5),
padding=[20, 20, 40, 10], bgcolor='linen', shape=(3,4))
# Create datasource for means and confidence intervals
data_dict = {}
for d in DATA:
# The means
data_dict[d] = self.results[d]
data_dict[d][N.isnan(data_dict[d])] = 0.0
# The 95% confidence intervals
conf_interval = 1.96*self.results[d + '_err']
conf_interval[N.isnan(conf_interval)] = 0.0
data_dict[d + '_err'] = \
N.r_[self.results[d] + conf_interval,
(self.results[d] - conf_interval)[::-1]]
data = ArrayPlotData(
index=self.results['samples'],
err_ix=N.r_[self.results['samples'], self.results['samples'][::-1]],
**data_dict)
# Create individual plots and add to grid container
pad_factor = 0.08
for d in DATA:
p = Plot(data, padding=[25, 25, 25, 40])
styles = {'line_width':1.5, 'color':'darkcyan'}
# Plot the error, line, and scatter plots
p.plot(('err_ix', d + '_err'), name=d+'_p', type='polygon',
edge_color='transparent', face_color='silver')
p.plot(('index', d), name=d+'_l', type='line', **styles)
p.plot(('index', d), name=d+'_s', type='scatter', marker='circle',
marker_size=int(2*styles['line_width']), color=styles['color'],
line_width=0)
# Y-axis padding
low = (self.results[d] - self.results[d + '_err']).min()
high = (self.results[d] + self.results[d + '_err']).max()
padding = pad_factor * (high - low)
low -= padding
high += padding
if low == high:
low -= 1
high += 1
p.value_range.set_bounds(low, high)
# X-axis padding
padding = pad_factor * self.results['samples'].ptp()
p.index_range.set_bounds(
self.results['samples'][0] - padding,
self.results['samples'][-1] + padding)
# Labels, grids and ticks
p.title = snake2title(d)
p.x_axis.title = snake2title(self.results['param'])
p.y_grid.visible = p.x_grid.visible = False
p.x_axis.tick_in = p.y_axis.tick_in = 0
# Add the plot to the grid container
container.add(p)
class MultiNetworkScan(BaseScan):
"""
Analyze a 1D scan of regularly-spaced distributions of network simulations
across parameter space.
See core.analysis.AbstractAnalysis documentation and collect_data method
signature and docstring for usage.
"""
label = 'Network Scan'
def collect_data(self, ntrials=10, npoints=5, param='J0', bounds=(0.5, 8),
**kwargs):
"""
Store statistics about placemap data from multiple trials along a 1D
parameter scan
Keyword arguments:
ntrials -- number of network trials to run per sample point
npoints -- number of sample points, inclusive of the bounds
param -- string name of PlaceNetwork parameter to scan
bounds -- bounds for the parameter scan
"""
# Store bounds and scan parameter
self.results['bounds'] = N.array(bounds)
self.results['param'] = param
# Load cortex
self.out('Creating grid collection object...')
EC = GridCollection()
os.chdir(self.datadir)
# Set default model parameters
pdict = dict( EC=EC,
growl=False,
desc='scan',
projdir=self.datadir,
refresh_weights=True,
refresh_orientation=False,
refresh_phases=False,
refresh_traj=False,
traj_type='checker',
num_trials=ntrials,
monitoring=True)
pdict.update(kwargs)
# Update with keyword arguments
if param not in PlaceNetworkStd().traits(user=True).keys():
raise ValueError, 'param (%s) is not a user parameter'%param
# Create the list of sample points to scan
self.out('Creating %s scan vector from %.2f to %.2f'%((param,)+bounds))
if bounds[0] > bounds[1]:
bounds = bounds[::-1]
pts = N.linspace(bounds[0], bounds[1], num=npoints)
self.results['samples'] = pts
# Initialize stage map sample data arrays
sparsity = N.empty(npoints, 'd')
sparsity_err = N.empty(npoints, 'd')
stage_coverage = N.empty(npoints, 'd')
stage_coverage_err = N.empty(npoints, 'd')
stage_repr = N.empty(npoints, 'd')
stage_repr_err = N.empty(npoints, 'd')
peak_rate = N.empty(npoints, 'd')
peak_rate_err = N.empty(npoints, 'd')
# Initialize per-unit sample data arrays
max_rate = N.zeros(npoints, 'd')
max_rate_err = N.zeros(npoints, 'd')
num_fields = N.zeros(npoints, 'd')
num_fields_err = N.zeros(npoints, 'd')
coverage = N.zeros(npoints, 'd')
coverage_err = N.zeros(npoints, 'd')
# Initialize per-field sample data arrays
area = N.zeros(npoints, 'd')
area_err = N.zeros(npoints, 'd')
diameter = N.zeros(npoints, 'd')
diameter_err = N.zeros(npoints, 'd')
peak = N.zeros(npoints, 'd')
peak_err = N.zeros(npoints, 'd')
average = N.zeros(npoints, 'd')
average_err = N.zeros(npoints, 'd')
# Error calculation
def error(values):
return N.std(values) / N.sqrt(len(values))
# Per-sample data collection method
def run_sample_point(i, model):
self.out('Running (%d): %s = %.4f'%(i, param, getattr(model, param)))
# Run the model simulation and save the results
model.advance_all()
# Create ratemap objects
ir_list = [None] * ntrials
fdata_list = [None] * ntrials
udata_list = [None] * ntrials
for trial in xrange(ntrials):
ir = CheckeredRatemap(model.post_mortem(trial=trial+1))
ir.compute_coverage()
ir_list[trial] = ir
fdata_list[trial] = ir.get_field_data()
udata_list[trial] = ir.get_unit_data()
# Collate the stage map data
sparsity[i] = N.mean([ir.sparsity for ir in ir_list])
sparsity_err[i] = error([ir.sparsity for ir in ir_list])
stage_coverage[i] = N.mean([ir.stage_coverage for ir in ir_list])
stage_coverage_err[i] = error([ir.stage_coverage for ir in ir_list])
stage_repr[i] = N.mean([ir.stage_repr for ir in ir_list])
stage_repr_err[i] = error([ir.stage_repr for ir in ir_list])
peak_rate[i] = N.mean([ir.peak_rate for ir in ir_list])
peak_rate_err[i] = error([ir.peak_rate for ir in ir_list])
# Collate the per-unit data
_max_rate = N.array([], 'd')
_num_fields = N.array([], 'd')
_coverage = N.array([], 'd')
for udata in udata_list:
_max_rate = N.r_[_max_rate, udata['max_r']]
_num_fields = N.r_[_num_fields, udata['num_fields']]
_coverage = N.r_[_coverage, udata['coverage']]
max_rate[i] = _max_rate.mean()
max_rate_err[i] = error(_max_rate)
num_fields[i] = _num_fields.mean()
num_fields_err[i] = error(_num_fields)
coverage[i] = _coverage.mean()
coverage_err[i] = error(_coverage)
# Collate the per-field data
_area = N.array([], 'd')
_diameter = N.array([], 'd')
_peak = N.array([], 'd')
_average = N.array([], 'd')
for fdata in fdata_list:
_area = N.r_[_area, fdata['area']]
_diameter = N.r_[_diameter, fdata['diameter']]
_peak = N.r_[_peak, fdata['peak']]
_average = N.r_[_average, fdata['average']]
area[i] = _area.mean()
area_err[i] = error(_area)
diameter[i] = _diameter.mean()
diameter_err[i] = error(_diameter)
peak[i] = _peak.mean()
peak_err[i] = error(_peak)
average[i] = _average.mean()
average_err[i] = error(_average)
# Execute data collection process for each sample point
self.out('Beginning data collection process')
for i, p in enumerate(pts):
pdict[param] = p
self.execute(run_sample_point, i, PlaceNetworkStd(**pdict))
# Store the mean data results
self.results['sparsity'] = sparsity
self.results['stage_coverage'] = stage_coverage
self.results['stage_repr'] = stage_repr
self.results['peak_rate'] = peak_rate
self.results['max_rate'] = max_rate
self.results['num_fields'] = num_fields
self.results['coverage'] = coverage
self.results['area'] = area
self.results['diameter'] = diameter
self.results['peak'] = peak
self.results['average'] = average
# ... and the error data
self.results['sparsity_err'] = sparsity_err
self.results['stage_coverage_err'] = stage_coverage_err
self.results['stage_repr_err'] = stage_repr_err
self.results['peak_rate_err'] = peak_rate_err
self.results['max_rate_err'] = max_rate_err
self.results['num_fields_err'] = num_fields_err
self.results['coverage_err'] = coverage_err
self.results['area_err'] = area_err
self.results['diameter_err'] = diameter_err
self.results['peak_err'] = peak_err
self.results['average_err'] = average_err
# Good-bye!
self.out('All done!')
|
pupboss/xndian | refs/heads/master | deploy/site-packages/pip/_vendor/distlib/util.py | 190 | #
# Copyright (C) 2012-2013 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tarfile
import tempfile
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, httplib, xmlrpclib, splittype,
HTTPHandler, HTTPSHandler as BaseHTTPSHandler,
BaseConfigurator, valid_ident, Container, configparser,
URLError, match_hostname, CertificateError, ZipFile)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
in os.environ):
result = os.environ['__PYVENV_LAUNCHER__']
else:
result = sys.executable
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
data = json.load(stream)
result = data['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
cp = configparser.ConfigParser()
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
if headers.get('Content-Type') != 'application/json':
logger.debug('Unexpected response for JSON request')
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
def get_project_data(name):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/project.json' % (name[0].upper(), name))
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/package-%s.json' % (name[0].upper(), name, version))
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base):
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
|
dutradda/myreco | refs/heads/master | myreco/external_variables/__init__.py | 12133432 | |
devs1991/test_edx_docmode | refs/heads/master | venv/lib/python2.7/site-packages/dealer/contrib/__init__.py | 12133432 | |
jordonbiondo/cldoc | refs/heads/master | cldoc/__init__.py | 4 | # This file is part of cldoc. cldoc is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import absolute_import
import sys
def run_inspect(args):
from . import cmdinspect
cmdinspect.run(args)
def run_serve(args):
from . import cmdserve
cmdserve.run(args)
def run_generate(args):
from . import cmdgenerate
cmdgenerate.run(args)
def run_gir(args):
from . import cmdgir
cmdgir.run(args)
def print_available_commands():
sys.stderr.write('Available commands:\n')
commands = ['inspect', 'serve', 'generate', 'gir']
for c in commands:
sys.stderr.write(' ' + c + '\n')
sys.stderr.write('\n')
def run():
if len(sys.argv) <= 1:
sys.stderr.write('Please use: cldoc [command] [OPTIONS] [FILES...]\n\n')
print_available_commands()
sys.exit(1)
cmd = sys.argv[1]
rest = sys.argv[2:]
if cmd == 'inspect':
run_inspect(rest)
elif cmd == 'serve':
run_serve(rest)
elif cmd == 'generate':
run_generate(rest)
elif cmd == 'gir':
run_gir(rest)
elif cmd == '--help' or cmd == '-h':
sys.stderr.write('Please use: cldoc [command] --help\n\n')
print_available_commands()
sys.exit(1)
else:
sys.stderr.write('Unknown command `{0}\'\n'.format(cmd))
sys.exit(1)
if __name__ == '__main__':
run()
# vi:ts=4:et
|
amenonsen/ansible | refs/heads/devel | lib/ansible/modules/cloud/digital_ocean/digital_ocean_domain_info.py | 25 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: digital_ocean_domain_info
short_description: Gather information about DigitalOcean Domains
description:
- This module can be used to gather information about DigitalOcean provided Domains.
- This module was called C(digital_ocean_domain_facts) before Ansible 2.9. The usage did not change.
author: "Abhijeet Kasurde (@Akasurde)"
version_added: "2.6"
options:
domain_name:
description:
- Name of the domain to gather information for.
required: false
requirements:
- "python >= 2.6"
extends_documentation_fragment: digital_ocean.documentation
'''
EXAMPLES = '''
- name: Gather information about all domains
digital_ocean_domain_info:
oauth_token: "{{ oauth_token }}"
- name: Gather information about domain with given name
digital_ocean_domain_info:
oauth_token: "{{ oauth_token }}"
domain_name: "example.com"
- name: Get ttl from domain
digital_ocean_domain_info:
register: resp_out
- set_fact:
domain_ttl: "{{ item.ttl }}"
loop: "{{ resp_out.data|json_query(name) }}"
vars:
name: "[?name=='example.com']"
- debug: var=domain_ttl
'''
RETURN = '''
data:
description: DigitalOcean Domain information
returned: success
type: list
sample: [
{
"domain_records": [
{
"data": "ns1.digitalocean.com",
"flags": null,
"id": 37826823,
"name": "@",
"port": null,
"priority": null,
"tag": null,
"ttl": 1800,
"type": "NS",
"weight": null
},
],
"name": "myexample123.com",
"ttl": 1800,
"zone_file": "myexample123.com. IN SOA ns1.digitalocean.com. hostmaster.myexample123.com. 1520702984 10800 3600 604800 1800\n",
},
]
'''
from traceback import format_exc
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
from ansible.module_utils._text import to_native
def core(module):
domain_name = module.params.get('domain_name', None)
rest = DigitalOceanHelper(module)
domain_results = []
if domain_name is not None:
response = rest.get("domains/%s" % domain_name)
status_code = response.status_code
if status_code != 200:
module.fail_json(msg="Failed to retrieve domain for DigitalOcean")
resp_json = response.json
domains = [resp_json['domain']]
else:
domains = rest.get_paginated_data(base_url="domains?", data_key_name='domains')
for temp_domain in domains:
temp_domain_dict = {
"name": temp_domain['name'],
"ttl": temp_domain['ttl'],
"zone_file": temp_domain['zone_file'],
"domain_records": list(),
}
base_url = "domains/%s/records?" % temp_domain['name']
temp_domain_dict["domain_records"] = rest.get_paginated_data(base_url=base_url, data_key_name='domain_records')
domain_results.append(temp_domain_dict)
module.exit_json(changed=False, data=domain_results)
def main():
argument_spec = DigitalOceanHelper.digital_ocean_argument_spec()
argument_spec.update(
domain_name=dict(type='str', required=False),
)
module = AnsibleModule(argument_spec=argument_spec)
if module._name == 'digital_ocean_domain_facts':
module.deprecate("The 'digital_ocean_domain_facts' module has been renamed to 'digital_ocean_domain_info'", version='2.13')
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e), exception=format_exc())
if __name__ == '__main__':
main()
|
dwaynebailey/pootle | refs/heads/master | tests/core/display.py | 10 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import pytest
from pootle.core.display import Display, ItemDisplay, SectionDisplay
# context can be any dict-like object whose values are non-string iterables
# in this case we wrap the data dict, but we could just provide that dict
# as the context
class DummyDisplayContext(object):
data = {
"section1": ["item1", "item2"],
"section2": None,
"section3": [],
"section4": ["some", "more", "iterable", "values"]}
def __iter__(self):
for section in self.data:
yield section
def __getitem__(self, k):
return self.data[k]
def test_display_instance():
context = DummyDisplayContext()
display = Display(context)
assert sorted(display.sections) == ["section1", "section4"]
assert display.context is context
assert display.section_class == SectionDisplay
assert display.no_results_msg == ""
for name in display.sections:
section = display.section(name)
assert isinstance(section, SectionDisplay)
assert section.context is display
assert section.name == name
result = ""
for section in display.sections:
result += str(display.section(section))
assert str(display) == "%s\n" % result
def test_display_no_results():
class DisplayNoResults(Display):
no_results_msg = "Nothing to see here, move along"
display = DisplayNoResults({})
assert str(display) == "%s\n" % DisplayNoResults.no_results_msg
def test_display_section_instance():
context = DummyDisplayContext()
display = Display(context)
section = SectionDisplay(display, "section4")
assert section.context is display
assert section.name == "section4"
assert section.info == dict(title=section.name)
assert section.data == context[section.name]
assert section.description == ""
assert section.title == (
"%s (%s)"
% (section.name, len(section.data)))
assert len(section.items) == len(section.data)
for i, item_display in enumerate(section.items):
assert isinstance(item_display, section.item_class)
assert item_display.item == section.data[i]
result = (
"%s\n%s\n\n"
% (section.title,
"-" * len(section.title)))
for item in section:
result += str(item)
assert str(section) == "%s\n" % result
def test_display_section_info():
context = DummyDisplayContext()
class DisplayWithInfo(Display):
context_info = dict(
section4=dict(
title="Section 4 title",
description="Section 4 description"))
display = DisplayWithInfo(context)
section = display.section("section4")
assert section.info == display.context_info["section4"]
assert section.description == section.info["description"]
assert section.title == (
"%s (%s)"
% (section.info["title"], len(section.data)))
result = (
"%s\n%s\n%s\n\n"
% (section.title,
"-" * len(section.title),
section.description))
for item in section:
result += str(item)
assert str(section) == "%s\n" % result
def test_display_section_no_info():
context = DummyDisplayContext()
class DisplayWithoutInfo(Display):
context_info = dict(
section2=dict(
title="Section 4 title",
description="Section 4 description"))
display = DisplayWithoutInfo(context)
section = display.section("section4")
assert section.info == dict(title="section4")
assert section.description == ""
assert section.title == (
"%s (%s)"
% (section.name, len(section.data)))
result = (
"%s\n%s\n\n"
% (section.title,
"-" * len(section.title)))
for item in section:
result += str(item)
assert str(section) == "%s\n" % result
def test_display_section_bad_items_none():
display = Display(DummyDisplayContext())
section = SectionDisplay(display, "section2")
# in the example section2 would normally be ignored by
# the Display class, but if a section were created from it
# it would raise a TypeError
with pytest.raises(TypeError):
assert section.items
def test_display_section_bad_items_str():
display = Display(dict(section1="FOO"))
section = SectionDisplay(display, "section1")
assert section.data == "FOO"
with pytest.raises(TypeError):
assert section.items
def test_display_item_instance():
display = Display(DummyDisplayContext())
section = SectionDisplay(display, "section1")
item_display = ItemDisplay(section, section.data[0])
assert item_display.section is section
assert item_display.item == section.data[0]
assert str(item_display) == "%s\n" % section.data[0]
|
zbqf109/goodo | refs/heads/master | openerp/report/render/rml.py | 49 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import render
import rml2pdf
import rml2html as htmlizer
import rml2txt as txtizer
import odt2odt as odt
import html2html as html
import makohtml2html as makohtml
class rml(render.render):
def __init__(self, rml, localcontext = None, datas=None, path='.', title=None):
render.render.__init__(self, datas, path)
self.localcontext = localcontext
self.rml = rml
self.output_type = 'pdf'
self.title=title
def _render(self):
return rml2pdf.parseNode(self.rml, self.localcontext, images=self.bin_datas, path=self.path,title=self.title)
class rml2html(render.render):
def __init__(self, rml,localcontext = None, datas=None):
super(rml2html, self).__init__(datas)
self.rml = rml
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return htmlizer.parseString(self.rml,self.localcontext)
class rml2txt(render.render):
def __init__(self, rml, localcontext= None, datas=None):
super(rml2txt, self).__init__(datas)
self.rml = rml
self.localcontext = localcontext
self.output_type = 'txt'
def _render(self):
return txtizer.parseString(self.rml, self.localcontext)
class odt2odt(render.render):
def __init__(self, rml, localcontext=None, datas=None):
render.render.__init__(self, datas)
self.rml_dom = rml
self.localcontext = localcontext
self.output_type = 'odt'
def _render(self):
return odt.parseNode(self.rml_dom,self.localcontext)
class html2html(render.render):
def __init__(self, rml, localcontext=None, datas=None):
render.render.__init__(self, datas)
self.rml_dom = rml
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return html.parseString(self.rml_dom,self.localcontext)
class makohtml2html(render.render):
def __init__(self, html, localcontext = None):
render.render.__init__(self)
self.html = html
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return makohtml.parseNode(self.html,self.localcontext)
|
biswajitsahu/kuma | refs/heads/master | kuma/contentflagging/migrations/__init__.py | 12133432 | |
redhat-cip/tempest | refs/heads/master | tempest/thirdparty/boto/utils/__init__.py | 12133432 | |
DPaaS-Raksha/horizon | refs/heads/master | openstack_dashboard/dashboards/admin/images/forms.py | 16 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.dashboards.project.images_and_snapshots \
.images import forms
class AdminCreateImageForm(forms.CreateImageForm):
pass
class AdminUpdateImageForm(forms.UpdateImageForm):
pass
|
xhb/profitpy | refs/heads/master | examples/accountkeysdialog.py | 18 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase <troy@gci.net>
# Distributed under the terms of the GNU General Public License v2
from PyQt4.QtCore import Qt
from PyQt4.QtGui import QDialog, QStandardItem, QStandardItemModel
from .ui_accountkeysdialog import Ui_AccountKeysDialog
class AccountKeysModel(QStandardItemModel):
def __init__(self, parent=None):
QStandardItemModel.__init__(self, parent)
self.setHorizontalHeaderLabels(['Display', 'Item', 'Currency'])
def keys(self):
item = self.item
rows = self.rowCount(self.indexFromItem(self.invisibleRootItem()))
for r in range(rows):
yield (str(item(r, 1).text()), str(item(r, 2).text())), \
item(r, 0).checkState()==Qt.Checked
class AccountKeysItem(QStandardItem):
def __init__(self, text='', checked=0, checkable=False):
QStandardItem.__init__(self, text)
self.setEditable(False)
self.setCheckable(checkable)
if checkable:
self.setCheckState(Qt.CheckState(2 if checked else 0))
class AccountKeysDialog(QDialog, Ui_AccountKeysDialog):
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self.setupUi(self)
self.dataModel = AccountKeysModel()
self.tableView.setModel(self.dataModel)
self.tableView.verticalHeader().hide()
def setupKeys(self, mapping):
model = self.dataModel
for (key, currency), checked in sorted(mapping):
model.appendRow([
AccountKeysItem(checked=checked, checkable=True),
AccountKeysItem(key),
AccountKeysItem(currency)
])
view = self.tableView
view.resizeRowsToContents()
view.resizeColumnsToContents()
def keys(self):
return self.dataModel.keys()
|
jeanlinux/calibre | refs/heads/master | src/calibre/gui2/tweak_book/spell.py | 8 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import cPickle, os, sys
from collections import defaultdict, OrderedDict
from threading import Thread
from functools import partial
from PyQt5.Qt import (
QGridLayout, QApplication, QTreeWidget, QTreeWidgetItem, Qt, QFont, QSize,
QStackedLayout, QLabel, QVBoxLayout, QWidget, QPushButton, QIcon, QMenu,
QDialogButtonBox, QLineEdit, QDialog, QToolButton, QFormLayout, QHBoxLayout,
pyqtSignal, QAbstractTableModel, QModelIndex, QTimer, QTableView, QCheckBox,
QComboBox, QListWidget, QListWidgetItem, QInputDialog, QPlainTextEdit, QKeySequence,
QT_VERSION_STR)
from calibre.constants import __appname__, plugins
from calibre.ebooks.oeb.polish.spell import replace_word, get_all_words, merge_locations, get_checkable_file_names
from calibre.gui2 import choose_files, error_dialog
from calibre.gui2.complete2 import LineEdit
from calibre.gui2.languages import LanguagesEdit
from calibre.gui2.progress_indicator import ProgressIndicator
from calibre.gui2.tweak_book import dictionaries, current_container, set_book_locale, tprefs, editors
from calibre.gui2.tweak_book.widgets import Dialog
from calibre.spell import DictionaryLocale
from calibre.spell.dictionary import (
builtin_dictionaries, custom_dictionaries, best_locale_for_language,
get_dictionary, dprefs, remove_dictionary, rename_dictionary)
from calibre.spell.import_from import import_from_oxt
from calibre.spell.break_iterator import split_into_words
from calibre.utils.localization import calibre_langcode_to_name, get_language, get_lang, canonicalize_lang
from calibre.utils.icu import sort_key, primary_sort_key, primary_contains, contains
LANG = 0
COUNTRY = 1
DICTIONARY = 2
_country_map = None
def country_map():
global _country_map
if _country_map is None:
_country_map = cPickle.loads(P('localization/iso3166.pickle', data=True, allow_user_override=False))
return _country_map
class AddDictionary(QDialog): # {{{
def __init__(self, parent=None):
QDialog.__init__(self, parent)
self.setWindowTitle(_('Add a dictionary'))
self.l = l = QFormLayout(self)
self.setLayout(l)
self.la = la = QLabel('<p>' + _(
'''{0} supports the use of LibreOffice dictionaries for spell checking. You can
download more dictionaries from <a href="{1}">the LibreOffice extensions repository</a>.
The dictionary will download as an .oxt file. Simply specify the path to the
downloaded .oxt file here to add the dictionary to {0}.'''.format(
__appname__, 'http://extensions.libreoffice.org/extension-center?getCategories=Dictionary&getCompatibility=any&sort_on=positive_ratings'))+'<p>') # noqa
la.setWordWrap(True)
la.setOpenExternalLinks(True)
la.setMinimumWidth(450)
l.addRow(la)
self.h = h = QHBoxLayout()
self.path = p = QLineEdit(self)
p.setPlaceholderText(_('Path to OXT file'))
h.addWidget(p)
self.b = b = QToolButton(self)
b.setIcon(QIcon(I('document_open.png')))
b.setToolTip(_('Browse for an OXT file'))
b.clicked.connect(self.choose_file)
h.addWidget(b)
l.addRow(_('&Path to OXT file:'), h)
l.labelForField(h).setBuddy(p)
self.nick = n = QLineEdit(self)
n.setPlaceholderText(_('Choose a nickname for this dictionary'))
l.addRow(_('&Nickname:'), n)
self.bb = bb = QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
bb.accepted.connect(self.accept)
bb.rejected.connect(self.reject)
l.addRow(bb)
b.setFocus(Qt.OtherFocusReason)
def choose_file(self):
path = choose_files(self, 'choose-dict-for-import', _('Choose OXT Dictionary'), filters=[
(_('Dictionaries'), ['oxt'])], all_files=False, select_only_single_file=True)
if path is not None:
self.path.setText(path[0])
if not self.nickname:
n = os.path.basename(path[0])
self.nick.setText(n.rpartition('.')[0])
@property
def nickname(self):
return unicode(self.nick.text()).strip()
def accept(self):
nick = self.nickname
if not nick:
return error_dialog(self, _('Must specify nickname'), _(
'You must specify a nickname for this dictionary'), show=True)
if nick in {d.name for d in custom_dictionaries()}:
return error_dialog(self, _('Nickname already used'), _(
'A dictionary with the nick name "%s" already exists.') % nick, show=True)
oxt = unicode(self.path.text())
try:
num = import_from_oxt(oxt, nick)
except:
import traceback
return error_dialog(self, _('Failed to import dictionaries'), _(
'Failed to import dictionaries from %s. Click "Show Details" for more information') % oxt,
det_msg=traceback.format_exc(), show=True)
if num == 0:
return error_dialog(self, _('No dictionaries'), _(
'No dictionaries were found in %s') % oxt, show=True)
QDialog.accept(self)
# }}}
# User Dictionaries {{{
class UserWordList(QListWidget):
def __init__(self, parent=None):
QListWidget.__init__(self, parent)
def contextMenuEvent(self, ev):
m = QMenu(self)
m.addAction(_('Copy selected words to clipboard'), self.copy_to_clipboard)
m.addAction(_('Select all words'), self.select_all)
m.exec_(ev.globalPos())
def select_all(self):
for item in (self.item(i) for i in xrange(self.count())):
item.setSelected(True)
def copy_to_clipboard(self):
words = []
for item in (self.item(i) for i in xrange(self.count())):
if item.isSelected():
words.append(item.data(Qt.UserRole)[0])
if words:
QApplication.clipboard().setText('\n'.join(words))
def keyPressEvent(self, ev):
if ev == QKeySequence.Copy:
self.copy_to_clipboard()
ev.accept()
return
return QListWidget.keyPressEvent(self, ev)
class ManageUserDictionaries(Dialog):
def __init__(self, parent=None):
self.dictionaries_changed = False
Dialog.__init__(self, _('Manage user dictionaries'), 'manage-user-dictionaries', parent=parent)
def setup_ui(self):
self.l = l = QVBoxLayout(self)
self.h = h = QHBoxLayout()
l.addLayout(h)
l.addWidget(self.bb)
self.bb.clear(), self.bb.addButton(self.bb.Close)
b = self.bb.addButton(_('&New dictionary'), self.bb.ActionRole)
b.setIcon(QIcon(I('spell-check.png')))
b.clicked.connect(self.new_dictionary)
self.dictionaries = d = QListWidget(self)
self.emph_font = f = QFont(self.font())
f.setBold(True)
self.build_dictionaries()
d.currentItemChanged.connect(self.show_current_dictionary)
h.addWidget(d)
l = QVBoxLayout()
h.addLayout(l)
h = QHBoxLayout()
self.remove_button = b = QPushButton(QIcon(I('trash.png')), _('&Remove dictionary'), self)
b.clicked.connect(self.remove_dictionary)
h.addWidget(b)
self.rename_button = b = QPushButton(QIcon(I('modified.png')), _('Re&name dictionary'), self)
b.clicked.connect(self.rename_dictionary)
h.addWidget(b)
self.dlabel = la = QLabel('')
l.addWidget(la)
l.addLayout(h)
self.is_active = a = QCheckBox(_('Mark this dictionary as active'))
self.is_active.stateChanged.connect(self.active_toggled)
l.addWidget(a)
self.la = la = QLabel(_('Words in this dictionary:'))
l.addWidget(la)
self.words = w = UserWordList(self)
w.setSelectionMode(w.ExtendedSelection)
l.addWidget(w)
self.add_word_button = b = QPushButton(_('&Add word'), self)
b.clicked.connect(self.add_word)
b.setIcon(QIcon(I('plus.png')))
l.h = h = QHBoxLayout()
l.addLayout(h)
h.addWidget(b)
self.remove_word_button = b = QPushButton(_('&Remove selected words'), self)
b.clicked.connect(self.remove_word)
b.setIcon(QIcon(I('minus.png')))
h.addWidget(b)
self.import_words_button = b = QPushButton(_('&Import list of words'), self)
b.clicked.connect(self.import_words)
l.addWidget(b)
self.show_current_dictionary()
def sizeHint(self):
return Dialog.sizeHint(self) + QSize(30, 100)
def build_dictionaries(self, current=None):
self.dictionaries.clear()
for dic in sorted(dictionaries.all_user_dictionaries, key=lambda d:sort_key(d.name)):
i = QListWidgetItem(dic.name, self.dictionaries)
i.setData(Qt.UserRole, dic)
if dic.is_active:
i.setData(Qt.FontRole, self.emph_font)
if current == dic.name:
self.dictionaries.setCurrentItem(i)
if current is None and self.dictionaries.count() > 0:
self.dictionaries.setCurrentRow(0)
def new_dictionary(self):
name, ok = QInputDialog.getText(self, _('New dictionary'), _(
'Name of the new dictionary'))
if ok:
name = unicode(name)
if name in {d.name for d in dictionaries.all_user_dictionaries}:
return error_dialog(self, _('Already used'), _(
'A dictionary with the name %s already exists') % name, show=True)
dictionaries.create_user_dictionary(name)
self.dictionaries_changed = True
self.build_dictionaries(name)
self.show_current_dictionary()
def remove_dictionary(self):
d = self.current_dictionary
if d is None:
return
if dictionaries.remove_user_dictionary(d.name):
self.build_dictionaries()
self.dictionaries_changed = True
self.show_current_dictionary()
def rename_dictionary(self):
d = self.current_dictionary
if d is None:
return
name, ok = QInputDialog.getText(self, _('New name'), _(
'New name for the dictionary'))
if ok:
name = unicode(name)
if name == d.name:
return
if name in {d.name for d in dictionaries.all_user_dictionaries}:
return error_dialog(self, _('Already used'), _(
'A dictionary with the name %s already exists') % name, show=True)
if dictionaries.rename_user_dictionary(d.name, name):
self.build_dictionaries(name)
self.dictionaries_changed = True
self.show_current_dictionary()
@property
def current_dictionary(self):
d = self.dictionaries.currentItem()
if d is None:
return
return d.data(Qt.UserRole)
def active_toggled(self):
d = self.current_dictionary
if d is not None:
dictionaries.mark_user_dictionary_as_active(d.name, self.is_active.isChecked())
self.dictionaries_changed = True
for item in (self.dictionaries.item(i) for i in xrange(self.dictionaries.count())):
d = item.data(Qt.UserRole)
item.setData(Qt.FontRole, self.emph_font if d.is_active else None)
def show_current_dictionary(self, *args):
d = self.current_dictionary
if d is None:
return
self.dlabel.setText(_('Configure the dictionary: <b>%s') % d.name)
self.is_active.blockSignals(True)
self.is_active.setChecked(d.is_active)
self.is_active.blockSignals(False)
self.words.clear()
for word, lang in sorted(d.words, key=lambda x:sort_key(x[0])):
i = QListWidgetItem('%s [%s]' % (word, get_language(lang)), self.words)
i.setData(Qt.UserRole, (word, lang))
def add_word(self):
d = QDialog(self)
d.l = l = QFormLayout(d)
d.setWindowTitle(_('Add a word'))
d.w = w = QLineEdit(d)
w.setPlaceholderText(_('Word to add'))
l.addRow(_('&Word:'), w)
d.loc = loc = LanguagesEdit(parent=d)
l.addRow(_('&Language:'), d.loc)
loc.lang_codes = [canonicalize_lang(get_lang())]
d.bb = bb = QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
bb.accepted.connect(d.accept), bb.rejected.connect(d.reject)
l.addRow(bb)
if d.exec_() != d.Accepted:
return
word = unicode(w.text())
lang = (loc.lang_codes or [canonicalize_lang(get_lang())])[0]
if not word:
return
if (word, lang) not in self.current_dictionary.words:
dictionaries.add_to_user_dictionary(self.current_dictionary.name, word, DictionaryLocale(lang, None))
dictionaries.clear_caches()
self.show_current_dictionary()
self.dictionaries_changed = True
idx = self.find_word(word, lang)
if idx > -1:
self.words.scrollToItem(self.words.item(idx))
def import_words(self):
d = QDialog(self)
d.l = l = QFormLayout(d)
d.setWindowTitle(_('Import list of words'))
d.w = w = QPlainTextEdit(d)
l.addRow(QLabel(_('Enter a list of words, one per line')))
l.addRow(w)
d.b = b = QPushButton(_('Paste from clipboard'))
l.addRow(b)
b.clicked.connect(w.paste)
d.la = la = QLabel(_('Words in the user dictionary must have an associated language. Choose the language below:'))
la.setWordWrap(True)
l.addRow(la)
d.le = le = LanguagesEdit(d)
lc = canonicalize_lang(get_lang())
if lc:
le.lang_codes = [lc]
l.addRow(_('&Language:'), le)
d.bb = bb = QDialogButtonBox(QDialogButtonBox.Ok|QDialogButtonBox.Cancel)
l.addRow(bb)
bb.accepted.connect(d.accept), bb.rejected.connect(d.reject)
if d.exec_() != d.Accepted:
return
lc = le.lang_codes
if not lc:
return error_dialog(self, _('Must specify language'), _(
'You must specify a language to import words'), show=True)
words = set(filter(None, [x.strip() for x in unicode(w.toPlainText()).splitlines()]))
lang = lc[0]
words = {(w, lang) for w in words} - self.current_dictionary.words
if dictionaries.add_to_user_dictionary(self.current_dictionary.name, words, None):
dictionaries.clear_caches()
self.show_current_dictionary()
self.dictionaries_changed = True
def remove_word(self):
words = {i.data(Qt.UserRole) for i in self.words.selectedItems()}
if words:
kwords = [(w, DictionaryLocale(l, None)) for w, l in words]
d = self.current_dictionary
if dictionaries.remove_from_user_dictionary(d.name, kwords):
dictionaries.clear_caches()
self.show_current_dictionary()
self.dictionaries_changed = True
def find_word(self, word, lang):
key = (word, lang)
for i in xrange(self.words.count()):
if self.words.item(i).data(Qt.UserRole) == key:
return i
return -1
@classmethod
def test(cls):
d = cls()
d.exec_()
# }}}
class ManageDictionaries(Dialog): # {{{
def __init__(self, parent=None):
Dialog.__init__(self, _('Manage dictionaries'), 'manage-dictionaries', parent=parent)
def sizeHint(self):
ans = Dialog.sizeHint(self)
ans.setWidth(ans.width() + 250)
ans.setHeight(ans.height() + 200)
return ans
def setup_ui(self):
self.l = l = QGridLayout(self)
self.setLayout(l)
self.stack = s = QStackedLayout()
self.helpl = la = QLabel('<p>')
la.setWordWrap(True)
self.pcb = pc = QPushButton(self)
pc.clicked.connect(self.set_preferred_country)
self.lw = w = QWidget(self)
self.ll = ll = QVBoxLayout(w)
ll.addWidget(pc)
self.dw = w = QWidget(self)
self.dl = dl = QVBoxLayout(w)
self.fb = b = QPushButton(self)
b.clicked.connect(self.set_favorite)
self.remove_dictionary_button = rd = QPushButton(_('&Remove this dictionary'), w)
rd.clicked.connect(self.remove_dictionary)
dl.addWidget(b), dl.addWidget(rd)
w.setLayout(dl)
s.addWidget(la)
s.addWidget(self.lw)
s.addWidget(w)
self.dictionaries = d = QTreeWidget(self)
d.itemChanged.connect(self.data_changed, type=Qt.QueuedConnection)
self.build_dictionaries()
d.setCurrentIndex(d.model().index(0, 0))
d.header().close()
d.currentItemChanged.connect(self.current_item_changed)
self.current_item_changed()
l.addWidget(d)
l.addLayout(s, 0, 1)
self.bb.clear()
self.bb.addButton(self.bb.Close)
b = self.bb.addButton(_('Manage &user dictionaries'), self.bb.ActionRole)
b.setIcon(QIcon(I('user_profile.png')))
b.setToolTip(_(
'Mange the list of user dictionaries (dictionaries to which you can add words)'))
b.clicked.connect(self.manage_user_dictionaries)
b = self.bb.addButton(_('&Add dictionary'), self.bb.ActionRole)
b.setToolTip(_(
'Add a new dictionary that you downloaded from the internet'))
b.setIcon(QIcon(I('plus.png')))
b.clicked.connect(self.add_dictionary)
l.addWidget(self.bb, l.rowCount(), 0, 1, l.columnCount())
def manage_user_dictionaries(self):
d = ManageUserDictionaries(self)
d.exec_()
if d.dictionaries_changed:
self.dictionaries_changed = True
def data_changed(self, item, column):
if column == 0 and item.type() == DICTIONARY:
d = item.data(0, Qt.UserRole)
if not d.builtin and unicode(item.text(0)) != d.name:
rename_dictionary(d, unicode(item.text(0)))
def build_dictionaries(self, reread=False):
all_dictionaries = builtin_dictionaries() | custom_dictionaries(reread=reread)
languages = defaultdict(lambda : defaultdict(set))
for d in all_dictionaries:
for locale in d.locales | {d.primary_locale}:
languages[locale.langcode][locale.countrycode].add(d)
bf = QFont(self.dictionaries.font())
bf.setBold(True)
itf = QFont(self.dictionaries.font())
itf.setItalic(True)
self.dictionaries.clear()
for lc in sorted(languages, key=lambda x:sort_key(calibre_langcode_to_name(x))):
i = QTreeWidgetItem(self.dictionaries, LANG)
i.setText(0, calibre_langcode_to_name(lc))
i.setData(0, Qt.UserRole, lc)
best_country = getattr(best_locale_for_language(lc), 'countrycode', None)
for countrycode in sorted(languages[lc], key=lambda x: country_map()['names'].get(x, x)):
j = QTreeWidgetItem(i, COUNTRY)
j.setText(0, country_map()['names'].get(countrycode, countrycode))
j.setData(0, Qt.UserRole, countrycode)
if countrycode == best_country:
j.setData(0, Qt.FontRole, bf)
pd = get_dictionary(DictionaryLocale(lc, countrycode))
for dictionary in sorted(languages[lc][countrycode], key=lambda d:d.name):
k = QTreeWidgetItem(j, DICTIONARY)
pl = calibre_langcode_to_name(dictionary.primary_locale.langcode)
if dictionary.primary_locale.countrycode:
pl += '-' + dictionary.primary_locale.countrycode.upper()
k.setText(0, dictionary.name or (_('<Builtin dictionary for {0}>').format(pl)))
k.setData(0, Qt.UserRole, dictionary)
if dictionary.name:
k.setFlags(k.flags() | Qt.ItemIsEditable)
if pd == dictionary:
k.setData(0, Qt.FontRole, itf)
self.dictionaries.expandAll()
def add_dictionary(self):
d = AddDictionary(self)
if d.exec_() == d.Accepted:
self.build_dictionaries(reread=True)
def remove_dictionary(self):
item = self.dictionaries.currentItem()
if item is not None and item.type() == DICTIONARY:
dic = item.data(0, Qt.UserRole)
if not dic.builtin:
remove_dictionary(dic)
self.build_dictionaries(reread=True)
def current_item_changed(self):
item = self.dictionaries.currentItem()
if item is not None:
self.stack.setCurrentIndex(item.type())
if item.type() == LANG:
self.init_language(item)
elif item.type() == COUNTRY:
self.init_country(item)
elif item.type() == DICTIONARY:
self.init_dictionary(item)
def init_language(self, item):
self.helpl.setText(_(
'''<p>You can change the dictionaries used for any specified language.</p>
<p>A language can have many country specific variants. Each of these variants
can have one or more dictionaries assigned to it. The default variant for each language
is shown in bold to the left.</p>
<p>You can change the default country variant as well as changing the dictionaries used for
every variant.</p>
<p>When a book specifies its language as a plain language, without any country variant,
the default variant you choose here will be used.</p>
'''))
def init_country(self, item):
pc = self.pcb
font = item.data(0, Qt.FontRole)
preferred = bool(font and font.bold())
pc.setText((_(
'This is already the preferred variant for the {1} language') if preferred else _(
'Use this as the preferred variant for the {1} language')).format(
unicode(item.text(0)), unicode(item.parent().text(0))))
pc.setEnabled(not preferred)
def set_preferred_country(self):
item = self.dictionaries.currentItem()
bf = QFont(self.dictionaries.font())
bf.setBold(True)
for x in (item.parent().child(i) for i in xrange(item.parent().childCount())):
x.setData(0, Qt.FontRole, bf if x is item else None)
lc = unicode(item.parent().data(0, Qt.UserRole))
pl = dprefs['preferred_locales']
pl[lc] = '%s-%s' % (lc, unicode(item.data(0, Qt.UserRole)))
dprefs['preferred_locales'] = pl
def init_dictionary(self, item):
saf = self.fb
font = item.data(0, Qt.FontRole)
preferred = bool(font and font.italic())
saf.setText((_(
'This is already the preferred dictionary') if preferred else
_('Use this as the preferred dictionary')))
saf.setEnabled(not preferred)
self.remove_dictionary_button.setEnabled(not item.data(0, Qt.UserRole).builtin)
def set_favorite(self):
item = self.dictionaries.currentItem()
bf = QFont(self.dictionaries.font())
bf.setItalic(True)
for x in (item.parent().child(i) for i in xrange(item.parent().childCount())):
x.setData(0, Qt.FontRole, bf if x is item else None)
cc = unicode(item.parent().data(0, Qt.UserRole))
lc = unicode(item.parent().parent().data(0, Qt.UserRole))
d = item.data(0, Qt.UserRole)
locale = '%s-%s' % (lc, cc)
pl = dprefs['preferred_dictionaries']
pl[locale] = d.id
dprefs['preferred_dictionaries'] = pl
@classmethod
def test(cls):
d = cls()
d.exec_()
# }}}
# Spell Check Dialog {{{
class WordsModel(QAbstractTableModel):
word_ignored = pyqtSignal(object, object)
def __init__(self, parent=None):
QAbstractTableModel.__init__(self, parent)
self.counts = (0, 0)
self.words = {} # Map of (word, locale) to location data for the word
self.spell_map = {} # Map of (word, locale) to dictionaries.recognized(word, locale)
self.sort_on = (0, False)
self.items = [] # The currently displayed items
self.filter_expression = None
self.show_only_misspelt = True
self.headers = (_('Word'), _('Count'), _('Language'), _('Misspelled?'))
def rowCount(self, parent=QModelIndex()):
return len(self.items)
def columnCount(self, parent=QModelIndex()):
return len(self.headers)
def clear(self):
self.beginResetModel()
self.words = {}
self.spell_map = {}
self.items =[]
self.endResetModel()
def headerData(self, section, orientation, role=Qt.DisplayRole):
if orientation == Qt.Horizontal:
if role == Qt.DisplayRole:
try:
return self.headers[section]
except IndexError:
pass
elif role == Qt.InitialSortOrderRole:
return Qt.DescendingOrder if section == 1 else Qt.AscendingOrder
def misspelled_text(self, w):
if self.spell_map[w]:
return _('Ignored') if dictionaries.is_word_ignored(*w) else ''
return '✓'
def data(self, index, role=Qt.DisplayRole):
try:
word, locale = self.items[index.row()]
except IndexError:
return
if role == Qt.DisplayRole:
col = index.column()
if col == 0:
return word
if col == 1:
return '%d' % len(self.words[(word, locale)])
if col == 2:
pl = calibre_langcode_to_name(locale.langcode)
countrycode = locale.countrycode
if countrycode:
pl = '%s (%s)' % (pl, countrycode)
return pl
if col == 3:
return self.misspelled_text((word, locale))
if role == Qt.TextAlignmentRole:
return Qt.AlignVCenter | (Qt.AlignLeft if index.column() == 0 else Qt.AlignHCenter)
def sort(self, column, order=Qt.AscendingOrder):
reverse = order != Qt.AscendingOrder
self.sort_on = (column, reverse)
self.beginResetModel()
self.do_sort()
self.endResetModel()
def filter(self, filter_text):
self.filter_expression = filter_text or None
self.beginResetModel()
self.do_filter()
self.do_sort()
self.endResetModel()
def sort_key(self, col):
if col == 0:
f = (lambda x: x) if tprefs['spell_check_case_sensitive_sort'] else primary_sort_key
def key(w):
return f(w[0])
elif col == 1:
def key(w):
return len(self.words[w])
elif col == 2:
def key(w):
locale = w[1]
return (calibre_langcode_to_name(locale.langcode), locale.countrycode)
else:
key = self.misspelled_text
return key
def do_sort(self):
col, reverse = self.sort_on
self.items.sort(key=self.sort_key(col), reverse=reverse)
def set_data(self, words, spell_map):
self.words, self.spell_map = words, spell_map
self.beginResetModel()
self.do_filter()
self.do_sort()
self.counts = (len([None for w, recognized in spell_map.iteritems() if not recognized]), len(self.words))
self.endResetModel()
def filter_item(self, x):
if self.show_only_misspelt and self.spell_map[x]:
return False
func = contains if tprefs['spell_check_case_sensitive_search'] else primary_contains
if self.filter_expression is not None and not func(self.filter_expression, x[0]):
return False
return True
def do_filter(self):
self.items = filter(self.filter_item, self.words)
def toggle_ignored(self, row):
w = self.word_for_row(row)
if w is not None:
ignored = dictionaries.is_word_ignored(*w)
(dictionaries.unignore_word if ignored else dictionaries.ignore_word)(*w)
self.spell_map[w] = dictionaries.recognized(*w)
self.update_word(w)
self.word_ignored.emit(*w)
def ignore_words(self, rows):
words = {self.word_for_row(r) for r in rows}
words.discard(None)
for w in words:
ignored = dictionaries.is_word_ignored(*w)
(dictionaries.unignore_word if ignored else dictionaries.ignore_word)(*w)
self.spell_map[w] = dictionaries.recognized(*w)
self.update_word(w)
self.word_ignored.emit(*w)
def add_word(self, row, udname):
w = self.word_for_row(row)
if w is not None:
if dictionaries.add_to_user_dictionary(udname, *w):
self.spell_map[w] = dictionaries.recognized(*w)
self.update_word(w)
self.word_ignored.emit(*w)
def add_words(self, dicname, rows):
words = {self.word_for_row(r) for r in rows}
words.discard(None)
for w in words:
if not dictionaries.add_to_user_dictionary(dicname, *w):
dictionaries.remove_from_user_dictionary(dicname, [w])
self.spell_map[w] = dictionaries.recognized(*w)
self.update_word(w)
self.word_ignored.emit(*w)
def remove_word(self, row):
w = self.word_for_row(row)
if w is not None:
if dictionaries.remove_from_user_dictionaries(*w):
self.spell_map[w] = dictionaries.recognized(*w)
self.update_word(w)
def replace_word(self, w, new_word):
# Hack to deal with replacement words that are actually multiple words,
# ignore all words except the first
new_word = split_into_words(new_word)[0]
for location in self.words[w]:
location.replace(new_word)
if w[0] == new_word:
return w
new_key = (new_word, w[1])
if new_key in self.words:
self.words[new_key] = merge_locations(self.words[new_key], self.words[w])
row = self.row_for_word(w)
self.dataChanged.emit(self.index(row, 1), self.index(row, 1))
else:
self.words[new_key] = self.words[w]
self.spell_map[new_key] = dictionaries.recognized(*new_key)
self.update_word(new_key)
row = self.row_for_word(w)
if row > -1:
self.beginRemoveRows(QModelIndex(), row, row)
del self.items[row]
self.endRemoveRows()
self.words.pop(w, None)
return new_key
def update_word(self, w):
should_be_filtered = not self.filter_item(w)
row = self.row_for_word(w)
if should_be_filtered and row != -1:
self.beginRemoveRows(QModelIndex(), row, row)
del self.items[row]
self.endRemoveRows()
elif not should_be_filtered and row == -1:
self.items.append(w)
self.do_sort()
row = self.row_for_word(w)
self.beginInsertRows(QModelIndex(), row, row)
self.endInsertRows()
self.dataChanged.emit(self.index(row, 3), self.index(row, 3))
def word_for_row(self, row):
try:
return self.items[row]
except IndexError:
pass
def row_for_word(self, word):
try:
return self.items.index(word)
except ValueError:
return -1
class WordsView(QTableView):
ignore_all = pyqtSignal()
add_all = pyqtSignal(object)
change_to = pyqtSignal(object, object)
def __init__(self, parent=None):
QTableView.__init__(self, parent)
self.setSortingEnabled(True), self.setShowGrid(False), self.setAlternatingRowColors(True)
self.setSelectionBehavior(self.SelectRows)
self.setTabKeyNavigation(False)
self.verticalHeader().close()
def keyPressEvent(self, ev):
if ev == QKeySequence.Copy:
self.copy_to_clipboard()
ev.accept()
return
ret = QTableView.keyPressEvent(self, ev)
if ev.key() in (Qt.Key_PageUp, Qt.Key_PageDown, Qt.Key_Up, Qt.Key_Down):
idx = self.currentIndex()
if idx.isValid():
self.scrollTo(idx)
return ret
def highlight_row(self, row):
idx = self.model().index(row, 0)
if idx.isValid():
self.selectRow(row)
self.setCurrentIndex(idx)
self.scrollTo(idx)
def contextMenuEvent(self, ev):
m = QMenu(self)
w = self.model().word_for_row(self.currentIndex().row())
if w is not None:
a = m.addAction(_('Change %s to') % w[0])
cm = QMenu()
a.setMenu(cm)
cm.addAction(_('Specify replacement manually'), partial(self.change_to.emit, w, None))
cm.addSeparator()
for s in dictionaries.suggestions(*w):
cm.addAction(s, partial(self.change_to.emit, w, s))
m.addAction(_('Ignore/Unignore all selected words'), self.ignore_all)
a = m.addAction(_('Add/Remove all selected words'))
am = QMenu()
a.setMenu(am)
for dic in sorted(dictionaries.active_user_dictionaries, key=lambda x:sort_key(x.name)):
am.addAction(dic.name, partial(self.add_all.emit, dic.name))
m.addSeparator()
m.addAction(_('Copy selected words to clipboard'), self.copy_to_clipboard)
m.exec_(ev.globalPos())
def copy_to_clipboard(self):
rows = {i.row() for i in self.selectedIndexes()}
words = {self.model().word_for_row(r) for r in rows}
words.discard(None)
words = sorted({w[0] for w in words}, key=sort_key)
if words:
QApplication.clipboard().setText('\n'.join(words))
class SpellCheck(Dialog):
work_finished = pyqtSignal(object, object, object)
find_word = pyqtSignal(object, object)
refresh_requested = pyqtSignal()
word_replaced = pyqtSignal(object)
word_ignored = pyqtSignal(object, object)
change_requested = pyqtSignal(object, object)
def __init__(self, parent=None):
self.__current_word = None
self.thread = None
self.cancel = False
dictionaries.initialize()
self.current_word_changed_timer = t = QTimer()
t.timeout.connect(self.do_current_word_changed)
t.setSingleShot(True), t.setInterval(100)
Dialog.__init__(self, _('Check spelling'), 'spell-check', parent)
self.work_finished.connect(self.work_done, type=Qt.QueuedConnection)
self.setAttribute(Qt.WA_DeleteOnClose, False)
def setup_ui(self):
self.state_name = 'spell-check-table-state-' + QT_VERSION_STR.partition('.')[0]
set_no_activate_on_click = plugins['progress_indicator'][0].set_no_activate_on_click
self.setWindowIcon(QIcon(I('spell-check.png')))
self.l = l = QVBoxLayout(self)
self.setLayout(l)
self.stack = s = QStackedLayout()
l.addLayout(s)
l.addWidget(self.bb)
self.bb.clear()
self.bb.addButton(self.bb.Close)
b = self.bb.addButton(_('&Refresh'), self.bb.ActionRole)
b.setToolTip('<p>' + _('Re-scan the book for words, useful if you have edited the book since opening this dialog'))
b.setIcon(QIcon(I('view-refresh.png')))
b.clicked.connect(partial(self.refresh, change_request=None))
self.progress = p = QWidget(self)
s.addWidget(p)
p.l = l = QVBoxLayout(p)
l.setAlignment(Qt.AlignCenter)
self.progress_indicator = pi = ProgressIndicator(self, 256)
l.addWidget(pi, alignment=Qt.AlignHCenter), l.addSpacing(10)
p.la = la = QLabel(_('Checking, please wait...'))
la.setStyleSheet('QLabel { font-size: 30pt; font-weight: bold }')
l.addWidget(la, alignment=Qt.AlignHCenter)
self.main = m = QWidget(self)
s.addWidget(m)
m.l = l = QVBoxLayout(m)
m.h1 = h = QHBoxLayout()
l.addLayout(h)
self.filter_text = t = QLineEdit(self)
t.setPlaceholderText(_('Filter the list of words'))
t.textChanged.connect(self.do_filter)
m.fc = b = QToolButton(m)
b.setIcon(QIcon(I('clear_left.png'))), b.setToolTip(_('Clear filter'))
b.clicked.connect(t.clear)
h.addWidget(t), h.addWidget(b)
m.h2 = h = QHBoxLayout()
l.addLayout(h)
self.words_view = w = WordsView(m)
set_no_activate_on_click(w)
w.ignore_all.connect(self.ignore_all)
w.add_all.connect(self.add_all)
w.activated.connect(self.word_activated)
w.change_to.connect(self.change_to)
w.currentChanged = self.current_word_changed
state = tprefs.get(self.state_name, None)
hh = self.words_view.horizontalHeader()
h.addWidget(w)
self.words_model = m = WordsModel(self)
w.setModel(m)
m.dataChanged.connect(self.current_word_changed)
m.modelReset.connect(self.current_word_changed)
m.word_ignored.connect(self.word_ignored)
if state is not None:
hh.restoreState(state)
# Sort by the restored state, if any
w.sortByColumn(hh.sortIndicatorSection(), hh.sortIndicatorOrder())
m.show_only_misspelt = hh.isSectionHidden(3)
self.ignore_button = b = QPushButton(_('&Ignore'))
b.ign_text, b.unign_text = unicode(b.text()), _('Un&ignore')
b.ign_tt = _('Ignore the current word for the rest of this session')
b.unign_tt = _('Stop ignoring the current word')
b.clicked.connect(self.toggle_ignore)
l = QVBoxLayout()
h.addLayout(l)
h.setStretch(0, 1)
l.addWidget(b), l.addSpacing(20)
self.add_button = b = QPushButton(_('Add word to &dictionary:'))
b.add_text, b.remove_text = unicode(b.text()), _('Remove from &dictionaries')
b.add_tt = _('Add the current word to the specified user dictionary')
b.remove_tt = _('Remove the current word from all active user dictionaries')
b.clicked.connect(self.add_remove)
self.user_dictionaries = d = QComboBox(self)
self.user_dictionaries_missing_label = la = QLabel(_(
'You have no active user dictionaries. You must'
' choose at least one active user dictionary via'
' Preferences->Editor->Manage spelling dictionaries'))
la.setWordWrap(True)
self.initialize_user_dictionaries()
d.setMinimumContentsLength(25)
l.addWidget(b), l.addWidget(d), l.addWidget(la)
self.next_occurrence = b = QPushButton(_('Show &next occurrence'), self)
b.setToolTip('<p>' + _(
'Show the next occurrence of the selected word in the editor, so you can edit it manually'))
b.clicked.connect(self.show_next_occurrence)
l.addSpacing(20), l.addWidget(b)
l.addStretch(1)
self.change_button = b = QPushButton(_('&Change selected word to:'), self)
b.clicked.connect(self.change_word)
l.addWidget(b)
self.suggested_word = sw = LineEdit(self)
sw.set_separator(None)
sw.setPlaceholderText(_('The replacement word'))
sw.returnPressed.connect(self.change_word)
l.addWidget(sw)
self.suggested_list = sl = QListWidget(self)
sl.currentItemChanged.connect(self.current_suggestion_changed)
sl.itemActivated.connect(self.change_word)
set_no_activate_on_click(sl)
l.addWidget(sl)
hh.setSectionHidden(3, m.show_only_misspelt)
self.show_only_misspelled = om = QCheckBox(_('Show &only misspelled words'))
om.setChecked(m.show_only_misspelt)
om.stateChanged.connect(self.update_show_only_misspelt)
self.case_sensitive_sort = cs = QCheckBox(_('Case &sensitive sort'))
cs.setChecked(tprefs['spell_check_case_sensitive_sort'])
cs.setToolTip(_('When sorting the list of words, be case sensitive'))
cs.stateChanged.connect(self.sort_type_changed)
self.case_sensitive_search = cs2 = QCheckBox(_('Case sensitive sea&rch'))
cs2.setToolTip(_('When filtering the list of words, be case sensitive'))
cs2.setChecked(tprefs['spell_check_case_sensitive_search'])
cs2.stateChanged.connect(self.search_type_changed)
self.hb = h = QHBoxLayout()
self.summary = s = QLabel('')
self.main.l.addLayout(h), h.addWidget(s), h.addWidget(om), h.addWidget(cs), h.addWidget(cs2), h.addStretch(1)
def keyPressEvent(self, ev):
if ev.key() in (Qt.Key_Enter, Qt.Key_Return):
ev.accept()
return
return Dialog.keyPressEvent(self, ev)
def sort_type_changed(self):
tprefs['spell_check_case_sensitive_sort'] = bool(self.case_sensitive_sort.isChecked())
if self.words_model.sort_on[0] == 0:
with self:
hh = self.words_view.horizontalHeader()
self.words_view.sortByColumn(hh.sortIndicatorSection(), hh.sortIndicatorOrder())
def search_type_changed(self):
tprefs['spell_check_case_sensitive_search'] = bool(self.case_sensitive_search.isChecked())
if unicode(self.filter_text.text()).strip():
self.do_filter()
def show_next_occurrence(self):
self.word_activated(self.words_view.currentIndex())
def word_activated(self, index):
w = self.words_model.word_for_row(index.row())
if w is None:
return
self.find_word.emit(w, self.words_model.words[w])
def initialize_user_dictionaries(self):
ct = unicode(self.user_dictionaries.currentText())
self.user_dictionaries.clear()
self.user_dictionaries.addItems([d.name for d in dictionaries.active_user_dictionaries])
if ct:
idx = self.user_dictionaries.findText(ct)
if idx > -1:
self.user_dictionaries.setCurrentIndex(idx)
self.user_dictionaries.setVisible(self.user_dictionaries.count() > 0)
self.user_dictionaries_missing_label.setVisible(not self.user_dictionaries.isVisible())
def current_word_changed(self, *args):
self.current_word_changed_timer.start(self.current_word_changed_timer.interval())
def do_current_word_changed(self):
try:
b = self.ignore_button
except AttributeError:
return
ignored = recognized = in_user_dictionary = False
current = self.words_view.currentIndex()
current_word = ''
if current.isValid():
row = current.row()
w = self.words_model.word_for_row(row)
if w is not None:
ignored = dictionaries.is_word_ignored(*w)
recognized = self.words_model.spell_map[w]
current_word = w[0]
if recognized:
in_user_dictionary = dictionaries.word_in_user_dictionary(*w)
suggestions = dictionaries.suggestions(*w)
self.suggested_list.clear()
for i, s in enumerate(suggestions):
item = QListWidgetItem(s, self.suggested_list)
if i == 0:
self.suggested_list.setCurrentItem(item)
self.suggested_word.setText(s)
prefix = b.unign_text if ignored else b.ign_text
b.setText(prefix + ' ' + current_word)
b.setToolTip(b.unign_tt if ignored else b.ign_tt)
b.setEnabled(current.isValid() and (ignored or not recognized))
if not self.user_dictionaries_missing_label.isVisible():
b = self.add_button
b.setText(b.remove_text if in_user_dictionary else b.add_text)
b.setToolTip(b.remove_tt if in_user_dictionary else b.add_tt)
self.user_dictionaries.setVisible(not in_user_dictionary)
def current_suggestion_changed(self, item):
try:
self.suggested_word.setText(item.text())
except AttributeError:
pass # item is None
def change_word(self):
current = self.words_view.currentIndex()
if not current.isValid():
return
row = current.row()
w = self.words_model.word_for_row(row)
if w is None:
return
new_word = unicode(self.suggested_word.text())
self.change_requested.emit(w, new_word)
def change_word_after_update(self, w, new_word):
self.refresh(change_request=(w, new_word))
def change_to(self, w, new_word):
if new_word is None:
self.suggested_word.setFocus(Qt.OtherFocusReason)
self.suggested_word.clear()
return
self.change_requested.emit(w, new_word)
def do_change_word(self, w, new_word):
changed_files = replace_word(current_container(), new_word, self.words_model.words[w], w[1])
if changed_files:
self.word_replaced.emit(changed_files)
w = self.words_model.replace_word(w, new_word)
row = self.words_model.row_for_word(w)
if row > -1:
self.words_view.highlight_row(row)
def toggle_ignore(self):
current = self.words_view.currentIndex()
if current.isValid():
self.words_model.toggle_ignored(current.row())
def ignore_all(self):
rows = {i.row() for i in self.words_view.selectionModel().selectedRows()}
rows.discard(-1)
if rows:
self.words_model.ignore_words(rows)
def add_all(self, dicname):
rows = {i.row() for i in self.words_view.selectionModel().selectedRows()}
rows.discard(-1)
if rows:
self.words_model.add_words(dicname, rows)
def add_remove(self):
current = self.words_view.currentIndex()
if current.isValid():
if self.user_dictionaries.isVisible(): # add
udname = unicode(self.user_dictionaries.currentText())
self.words_model.add_word(current.row(), udname)
else:
self.words_model.remove_word(current.row())
def update_show_only_misspelt(self):
m = self.words_model
m.show_only_misspelt = self.show_only_misspelled.isChecked()
self.words_view.horizontalHeader().setSectionHidden(3, m.show_only_misspelt)
self.do_filter()
def __enter__(self):
idx = self.words_view.currentIndex().row()
self.__current_word = self.words_model.word_for_row(idx)
def __exit__(self, *args):
if self.__current_word is not None:
row = self.words_model.row_for_word(self.__current_word)
self.words_view.highlight_row(max(0, row))
self.__current_word = None
def do_filter(self):
text = unicode(self.filter_text.text()).strip()
with self:
self.words_model.filter(text)
def refresh(self, change_request=None):
if not self.isVisible():
return
self.cancel = True
if self.thread is not None:
self.thread.join()
self.stack.setCurrentIndex(0)
self.progress_indicator.startAnimation()
self.refresh_requested.emit()
self.thread = Thread(target=partial(self.get_words, change_request=change_request))
self.thread.daemon = True
self.cancel = False
self.thread.start()
def get_words(self, change_request=None):
try:
words = get_all_words(current_container(), dictionaries.default_locale)
spell_map = {w:dictionaries.recognized(*w) for w in words}
except:
import traceback
traceback.print_exc()
words = traceback.format_exc()
spell_map = {}
if self.cancel:
self.end_work()
else:
self.work_finished.emit(words, spell_map, change_request)
def end_work(self):
self.stack.setCurrentIndex(1)
self.progress_indicator.stopAnimation()
self.words_model.clear()
def work_done(self, words, spell_map, change_request):
self.end_work()
if not isinstance(words, dict):
return error_dialog(self, _('Failed to check spelling'), _(
'Failed to check spelling, click "Show details" for the full error information.'),
det_msg=words, show=True)
if not self.isVisible():
return
self.words_model.set_data(words, spell_map)
col, reverse = self.words_model.sort_on
self.words_view.horizontalHeader().setSortIndicator(
col, Qt.DescendingOrder if reverse else Qt.AscendingOrder)
self.words_view.highlight_row(0)
self.update_summary()
self.initialize_user_dictionaries()
if self.words_model.rowCount() > 0:
self.words_view.resizeRowToContents(0)
self.words_view.verticalHeader().setDefaultSectionSize(self.words_view.rowHeight(0))
if change_request is not None:
w, new_word = change_request
if w in self.words_model.words:
self.do_change_word(w, new_word)
else:
error_dialog(self, _('Files edited'), _(
'The files in the editor were edited outside the spell check dialog,'
' and the word %s no longer exists.') % w[0], show=True)
def update_summary(self):
self.summary.setText(_('Misspelled words: {0} Total words: {1}').format(*self.words_model.counts))
def sizeHint(self):
return QSize(1000, 650)
def show(self):
Dialog.show(self)
QTimer.singleShot(0, self.refresh)
def accept(self):
tprefs[self.state_name] = bytearray(self.words_view.horizontalHeader().saveState())
Dialog.accept(self)
def reject(self):
tprefs[self.state_name] = bytearray(self.words_view.horizontalHeader().saveState())
Dialog.reject(self)
@classmethod
def test(cls):
from calibre.ebooks.oeb.polish.container import get_container
from calibre.gui2.tweak_book import set_current_container
set_current_container(get_container(sys.argv[-1], tweak_mode=True))
set_book_locale(current_container().mi.language)
d = cls()
QTimer.singleShot(0, d.refresh)
d.exec_()
# }}}
# Find next occurrence {{{
def find_next(word, locations, current_editor, current_editor_name,
gui_parent, show_editor, edit_file):
files = OrderedDict()
for l in locations:
try:
files[l.file_name].append(l)
except KeyError:
files[l.file_name] = [l]
if current_editor_name not in files:
current_editor_name = None
locations = [(fname, {l.original_word for l in _locations}, False) for fname, _locations in files.iteritems()]
else:
# Re-order the list of locations to search so that we search in the
# current editor first
lfiles = list(files)
idx = lfiles.index(current_editor_name)
before, after = lfiles[:idx], lfiles[idx+1:]
lfiles = after + before + [current_editor_name]
locations = [(current_editor_name, {l.original_word for l in files[current_editor_name]}, True)]
for fname in lfiles:
locations.append((fname, {l.original_word for l in files[fname]}, False))
for file_name, original_words, from_cursor in locations:
ed = editors.get(file_name, None)
if ed is None:
edit_file(file_name)
ed = editors[file_name]
if ed.find_spell_word(original_words, word[1].langcode, from_cursor=from_cursor):
show_editor(file_name)
return True
return False
def find_next_error(current_editor, current_editor_name, gui_parent, show_editor, edit_file):
files = get_checkable_file_names(current_container())[0]
if current_editor_name not in files:
current_editor_name = None
else:
idx = files.index(current_editor_name)
before, after = files[:idx], files[idx+1:]
files = [current_editor_name] + after + before + [current_editor_name]
for file_name in files:
from_cursor = False
if file_name == current_editor_name:
from_cursor = True
current_editor_name = None
ed = editors.get(file_name, None)
if ed is None:
edit_file(file_name)
ed = editors[file_name]
if ed.editor.find_next_spell_error(from_cursor=from_cursor):
show_editor(file_name)
return True
return False
# }}}
if __name__ == '__main__':
app = QApplication([])
dictionaries.initialize()
ManageUserDictionaries.test()
del app
|
kristianfzr/AutomateTheBoringStuff | refs/heads/master | trafficLightsimualtion.py | 12133432 | |
pkimber/mail | refs/heads/master | example_mail/management/commands/__init__.py | 12133432 | |
Averroes/urssus | refs/heads/master | urssus/util/__init__.py | 12133432 | |
akarambir/askcoding | refs/heads/develop | settings/__init__.py | 12133432 | |
fixator/aprinter | refs/heads/master | host_stuff/littlevent/__init__.py | 12133432 | |
T2DREAM/t2dream-portal | refs/heads/master | src/encoded/tests/test_upgrade_page.py | 1 | import pytest
@pytest.fixture
def page():
return{
'name': 'Fake Page',
}
@pytest.fixture
def page_1(page):
item = page.copy()
item.update({
'schema_version': '1',
'news_keywords': ['RNA binding', 'Experiment', 'DNA methylation', 'promoter-like regions', 'Conferences'],
})
return item
@pytest.fixture
def page_2(page):
item = page.copy()
item.update({
'schema_version': '1',
'news_keywords': ['Experiment', 'promoter-like regions'],
})
return item
@pytest.fixture
def page_3(page):
item = page.copy()
item.update({
'schema_version': '1',
})
return item
def test_page_upgrade_keep(upgrader, page_1):
value = upgrader.upgrade('page', page_1, target_version='2')
assert value['schema_version'] == '2'
assert value['news_keywords'] == ['RNA binding', 'DNA methylation', 'Conferences']
def test_page_upgrade_empty(upgrader, page_2):
value = upgrader.upgrade('page', page_2, target_version='2')
assert value['schema_version'] == '2'
assert value['news_keywords'] == []
def test_page_upgrade_none(upgrader, page_3):
value = upgrader.upgrade('page', page_3, target_version='2')
assert value['schema_version'] == '2'
assert 'news_keywords' not in value
|
TamiaLab/carnetdumaker | refs/heads/master | apps/snippets/search_indexes.py | 1 | """
Search indexes for the snippets app.
"""
from haystack import indexes
from .models import (CodeSnippet,
CodeSnippetBundle)
class CodeSnippetIndex(indexes.SearchIndex, indexes.Indexable):
"""
Search indexes for the ``CodeSnippet`` model.
"""
text = indexes.CharField(document=True, use_template=True)
license = indexes.CharField(model_attr='license', null=True)
author = indexes.CharField(model_attr='author', null=True)
def get_model(self):
"""
Return the model class for this index.
"""
return CodeSnippet
def index_queryset(self, using=None):
"""
Used when the entire index for this model is updated.
"""
return self.get_model().objects.all().select_related('author', 'license')
def get_updated_field(self):
"""
Return the field name used to filter out recently modified objects.
"""
return 'last_modification_date'
class CodeSnippetBundleIndex(indexes.SearchIndex, indexes.Indexable):
"""
Search indexes for the ``CodeSnippetBundle`` model.
"""
text = indexes.CharField(document=True, use_template=True)
author = indexes.CharField(model_attr='author', null=True)
def get_model(self):
"""
Return the model class for this index.
"""
return CodeSnippetBundle
def index_queryset(self, using=None):
"""
Used when the entire index for this model is updated.
"""
return self.get_model().objects.all().select_related('author')
def get_updated_field(self):
"""
Return the field name used to filter out recently modified objects.
"""
return 'last_modification_date'
|
rolepoint/flump | refs/heads/master | test/methods/test_get_single.py | 1 | from mock import ANY
from ..helpers import create_user, get_user
def test_get(flask_client):
user = create_user(flask_client)
response = get_user(flask_client, user.json['data']['id'])
assert response.status_code == 200
assert response.json == {
'data': {
'attributes': {'name': 'Carl', 'age': 26},
'id': '1', 'type': 'user', 'meta': {'etag': ANY}
},
'links': {'self': 'http://localhost/tester/user/1'}
}
def test_get_returns_not_modified_for_same_etag(flask_client):
user = create_user(flask_client)
response = get_user(flask_client, user.json['data']['id'])
response = get_user(
flask_client, user.json['data']['id'], etag=response.headers['Etag'] # noqa
)
assert response.status_code == 304
assert not response.data
def test_get_fails_if_entity_does_not_exist(flask_client):
response = get_user(flask_client, '1')
assert response.status_code == 404
|
sztanko/hadoop-common | refs/heads/HADOOP-3628 | src/contrib/hod/testing/testHodRing.py | 118 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import unittest, os, sys, re, threading, time
myDirectory = os.path.realpath(sys.argv[0])
rootDirectory = re.sub("/testing/.*", "", myDirectory)
sys.path.append(rootDirectory)
from testing.lib import BaseTestSuite
excludes = []
import tempfile, getpass, logging
from xml.dom import minidom
from hodlib.Hod.hadoop import hadoopConfig
from hodlib.HodRing.hodRing import CommandDesc, HadoopCommand
# All test-case classes should have the naming convention test_.*
class test_HadoopCommand(unittest.TestCase):
def setUp(self):
self.rootDir = '/tmp/hod-%s' % getpass.getuser()
self.id = 0
self.desc = None
self.tempDir = os.path.join(self.rootDir,'test_HadoopCommand_tempDir')
self.pkgDir = os.path.join(self.rootDir,'test_HadoopCommand_pkgDir')
self.log = logging.getLogger() # TODO Use MockLogger
self.javaHome = '/usr/java/bin/'
self.mrSysDir = '/user/' + getpass.getuser() + '/mapredsystem'
self.attrs = {}
self.finalAttrs = {
'fs.default.name': 'nohost.apache.com:56366',
'mapred.child.java.opts' : '-Xmx1024m',
'mapred.compress.map.output' : 'false',
}
self.attrs = {
'mapred.userlog.limit' : '200',
'mapred.userlog.retain.hours' : '10',
'mapred.reduce.parallel.copies' : '20',
}
self.desc = CommandDesc(
{
'name' : 'dummyHadoop',
'program' : 'bin/hadoop',
'pkgdirs' : self.pkgDir,
'final-attrs' : self.finalAttrs,
'attrs' : self.attrs,
}, self.log
)
# TODO
# 4th arg to HadoopCommand 'tardir' is not used at all. Instead pkgdir is
# specified through HadoopCommand.run(pkgdir). This could be changed so
# that pkgdir is specified at the time of object creation.
# END OF TODO
self.hadoopCommand = HadoopCommand(self.id, self.desc, self.tempDir,
self.pkgDir, (50000, 60000), self.log, self.javaHome,
self.mrSysDir, restart=True)
self.hadoopSite = os.path.join( self.hadoopCommand.confdir,
'hadoop-site.xml')
pass
def test_createHadoopSiteXml(self):
self.hadoopCommand._createHadoopSiteXml()
xmldoc = minidom.parse(self.hadoopSite)
xmldoc = xmldoc.childNodes[0] # leave out xml spec
properties = xmldoc.childNodes # children of tag configuration
keyvals = {}
for prop in properties:
if not isinstance(prop,minidom.Comment):
# ---------- tag -------------------- -value elem-- data --
name = prop.getElementsByTagName('name')[0].childNodes[0].data
value = prop.getElementsByTagName('value')[0].childNodes[0].data
keyvals[name] = value
# fs.default.name should start with hdfs://
assert(keyvals['fs.default.name'].startswith('hdfs://'))
# TODO other tests
pass
def tearDown(self):
pass
class HodRingTestSuite(BaseTestSuite):
def __init__(self):
# suite setup
BaseTestSuite.__init__(self, __name__, excludes)
pass
def cleanUp(self):
# suite tearDown
pass
def RunHodRingTests():
# modulename_suite
suite = HodRingTestSuite()
testResult = suite.runTests()
suite.cleanUp()
return testResult
if __name__ == "__main__":
RunHodRingTests()
|
jcmarks/jcmarks-mobile | refs/heads/master | lib/flask/globals.py | 783 | # -*- coding: utf-8 -*-
"""
flask.globals
~~~~~~~~~~~~~
Defines all the global objects that are proxies to the current
active context.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import partial
from werkzeug.local import LocalStack, LocalProxy
def _lookup_req_object(name):
top = _request_ctx_stack.top
if top is None:
raise RuntimeError('working outside of request context')
return getattr(top, name)
def _lookup_app_object(name):
top = _app_ctx_stack.top
if top is None:
raise RuntimeError('working outside of application context')
return getattr(top, name)
def _find_app():
top = _app_ctx_stack.top
if top is None:
raise RuntimeError('working outside of application context')
return top.app
# context locals
_request_ctx_stack = LocalStack()
_app_ctx_stack = LocalStack()
current_app = LocalProxy(_find_app)
request = LocalProxy(partial(_lookup_req_object, 'request'))
session = LocalProxy(partial(_lookup_req_object, 'session'))
g = LocalProxy(partial(_lookup_app_object, 'g'))
|
poljeff/odoo | refs/heads/8.0 | addons/report_intrastat/report_intrastat.py | 201 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.sql import drop_view_if_exists
from openerp.addons.decimal_precision import decimal_precision as dp
class res_country(osv.osv):
_name = 'res.country'
_inherit = 'res.country'
_columns = {
'intrastat': fields.boolean('Intrastat member'),
}
_defaults = {
'intrastat': lambda *a: False,
}
class report_intrastat_code(osv.osv):
_name = "report.intrastat.code"
_description = "Intrastat code"
_translate = False
_columns = {
'name': fields.char('Intrastat Code'),
'description': fields.char('Description'),
}
class product_template(osv.osv):
_name = "product.template"
_inherit = "product.template"
_columns = {
'intrastat_id': fields.many2one('report.intrastat.code', 'Intrastat code'),
}
class report_intrastat(osv.osv):
_name = "report.intrastat"
_description = "Intrastat report"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
'supply_units':fields.float('Supply Units', readonly=True),
'ref':fields.char('Source document', readonly=True),
'code': fields.char('Country code', size=2, readonly=True),
'intrastat_id': fields.many2one('report.intrastat.code', 'Intrastat code', readonly=True),
'weight': fields.float('Weight', readonly=True),
'value': fields.float('Value', readonly=True, digits_compute=dp.get_precision('Account')),
'type': fields.selection([('import', 'Import'), ('export', 'Export')], 'Type'),
'currency_id': fields.many2one('res.currency', "Currency", readonly=True),
}
def init(self, cr):
drop_view_if_exists(cr, 'report_intrastat')
cr.execute("""
create or replace view report_intrastat as (
select
to_char(inv.create_date, 'YYYY') as name,
to_char(inv.create_date, 'MM') as month,
min(inv_line.id) as id,
intrastat.id as intrastat_id,
upper(inv_country.code) as code,
sum(case when inv_line.price_unit is not null
then inv_line.price_unit * inv_line.quantity
else 0
end) as value,
sum(
case when uom.category_id != puom.category_id then (pt.weight_net * inv_line.quantity)
else (pt.weight_net * inv_line.quantity * uom.factor) end
) as weight,
sum(
case when uom.category_id != puom.category_id then inv_line.quantity
else (inv_line.quantity * uom.factor) end
) as supply_units,
inv.currency_id as currency_id,
inv.number as ref,
case when inv.type in ('out_invoice','in_refund')
then 'export'
else 'import'
end as type
from
account_invoice inv
left join account_invoice_line inv_line on inv_line.invoice_id=inv.id
left join (product_template pt
left join product_product pp on (pp.product_tmpl_id = pt.id))
on (inv_line.product_id = pp.id)
left join product_uom uom on uom.id=inv_line.uos_id
left join product_uom puom on puom.id = pt.uom_id
left join report_intrastat_code intrastat on pt.intrastat_id = intrastat.id
left join (res_partner inv_address
left join res_country inv_country on (inv_country.id = inv_address.country_id))
on (inv_address.id = inv.partner_id)
where
inv.state in ('open','paid')
and inv_line.product_id is not null
and inv_country.intrastat=true
group by to_char(inv.create_date, 'YYYY'), to_char(inv.create_date, 'MM'),intrastat.id,inv.type,pt.intrastat_id, inv_country.code,inv.number, inv.currency_id
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
jdsika/TUM_HOly | refs/heads/master | openrave/sympy/polys/domains/modularinteger.py | 5 | """Implementation of :class:`ModularInteger` class. """
class ModularInteger(object):
"""A class representing an modular integers. """
mod, dom, sym = None, None, None
__slots__ = ['val']
def __init__(self, val):
self.val = val % self.mod
def __hash__(self):
return hash((self.val, self.mod))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, self.val)
def __str__(self):
return "%s mod %s" % (self.val, self.mod)
def __int__(self):
return int(self.to_int())
def to_int(self):
if self.sym:
if self.val <= self.mod // 2:
return self.val
else:
return self.val - self.mod
else:
return self.val
def __pos__(self):
return self
def __neg__(self):
return self.__class__(-self.val)
def __add__(self, other):
return self.__class__(self.val + other.val)
def __sub__(self, other):
return self.__class__(self.val - other.val)
def __mul__(self, other):
return self.__class__(self.val * other.val)
def __div__(self, other):
return self.__class__(self.val * self.dom.invert(other.val, self.mod))
__truediv__ = __div__
def __mod__(self, other):
return self.__class__(self.val % other.val)
def __pow__(self, exp):
if not exp:
return self.__class__(self.dom.one)
if exp < 0:
val, exp = self.dom.invert(self.val, self.mod), -exp
else:
val = self.val
return self.__class__(val**exp)
def __eq__(self, other):
return isinstance(other, ModularInteger) and self.val == other.val
def __ne__(self, other):
return not isinstance(other, ModularInteger) or self.val != other.val
def __nonzero__(self):
return bool(self.val)
def ModularIntegerFactory(_mod, _dom, _sym):
"""Create custom class for specific integer modulus."""
class cls(ModularInteger):
mod, dom, sym = _dom.convert(_mod), _dom, _sym
if _sym:
cls.__name__ = "SymmetricModularInteger%s" % _mod
else:
cls.__name__ = "ModularInteger%s" % _mod
return cls
|
ytjia/leetcode | refs/heads/master | algorithms/python/leetcode/tests/test_FindAllAnagramsinaString.py | 2 | # -*- coding: utf-8 -*-
# Authors: Y. Jia <ytjia.zju@gmail.com>
import unittest
from .. import FindAllAnagramsinaString
class test_FindAllAnagramsinaString(unittest.TestCase):
solution = FindAllAnagramsinaString.Solution()
def test_findAnagrams(self):
self.assertEqual(self.solution.findAnagrams("cbaebabacd", "abc"), [0, 6])
self.assertEqual(self.solution.findAnagrams("", "abc"), [])
self.assertEqual(self.solution.findAnagrams("dfasdf", "abc"), [])
self.assertEqual(self.solution.findAnagrams("abab", "ab"), [0, 1, 2])
if __name__ == '__main__':
unittest.main()
|
ssarangi/numba | refs/heads/master | numba/hsa/hlc/config.py | 6 | from __future__ import print_function, absolute_import
import sys
import os
# Set a default for HSAILBIN if it is not defined.
# This is only used for cmdline HLC
os.environ['HSAILBIN'] = os.environ.get('HSAILBIN', '/opt/amd/bin')
# The default location of the HSAIL builtins library
DEFAULT_BUILTIN_PATH = os.path.join(sys.prefix, 'lib', 'builtins-hsail.opt.bc')
# The path where numba will look for the HSAIL builtins library.
# Use user specified path if it is defined.
BUILTIN_PATH = os.environ.get("NUMBA_HSAIL_BUILTINS_BC", DEFAULT_BUILTIN_PATH)
|
sch3m4/intelmq | refs/heads/master | intelmq/bots/parsers/dshield/__init__.py | 12133432 | |
unixxxx/simplecms | refs/heads/master | controllers/__init__.py | 12133432 | |
dpassante/ansible | refs/heads/devel | lib/ansible/module_utils/facts/network/sunos.py | 170 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.module_utils.facts.network.base import NetworkCollector
from ansible.module_utils.facts.network.generic_bsd import GenericBsdIfconfigNetwork
class SunOSNetwork(GenericBsdIfconfigNetwork):
"""
This is the SunOS Network Class.
It uses the GenericBsdIfconfigNetwork.
Solaris can have different FLAGS and MTU for IPv4 and IPv6 on the same interface
so these facts have been moved inside the 'ipv4' and 'ipv6' lists.
"""
platform = 'SunOS'
# Solaris 'ifconfig -a' will print interfaces twice, once for IPv4 and again for IPv6.
# MTU and FLAGS also may differ between IPv4 and IPv6 on the same interface.
# 'parse_interface_line()' checks for previously seen interfaces before defining
# 'current_if' so that IPv6 facts don't clobber IPv4 facts (or vice versa).
def get_interfaces_info(self, ifconfig_path):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses=[],
all_ipv6_addresses=[],
)
rc, out, err = self.module.run_command([ifconfig_path, '-a'])
for line in out.splitlines():
if line:
words = line.split()
if re.match(r'^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words, current_if, interfaces)
interfaces[current_if['device']] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
# 'parse_interface_line' and 'parse_inet*_line' leave two dicts in the
# ipv4/ipv6 lists which is ugly and hard to read.
# This quick hack merges the dictionaries. Purely cosmetic.
for iface in interfaces:
for v in 'ipv4', 'ipv6':
combined_facts = {}
for facts in interfaces[iface][v]:
combined_facts.update(facts)
if len(combined_facts.keys()) > 0:
interfaces[iface][v] = [combined_facts]
return interfaces, ips
def parse_interface_line(self, words, current_if, interfaces):
device = words[0][0:-1]
if device not in interfaces:
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
else:
current_if = interfaces[device]
flags = self.get_options(words[1])
v = 'ipv4'
if 'IPv6' in flags:
v = 'ipv6'
if 'LOOPBACK' in flags:
current_if['type'] = 'loopback'
current_if[v].append({'flags': flags, 'mtu': words[3]})
current_if['macaddress'] = 'unknown' # will be overwritten later
return current_if
# Solaris displays single digit octets in MAC addresses e.g. 0:1:2:d:e:f
# Add leading zero to each octet where needed.
def parse_ether_line(self, words, current_if, ips):
macaddress = ''
for octet in words[1].split(':'):
octet = ('0' + octet)[-2:None]
macaddress += (octet + ':')
current_if['macaddress'] = macaddress[0:-1]
class SunOSNetworkCollector(NetworkCollector):
_fact_class = SunOSNetwork
_platform = 'SunOS'
|
poojavade/Genomics_Docker | refs/heads/master | Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/scipy/weave/examples/binary_search.py | 97 | # Offers example of inline C for binary search algorithm.
# Borrowed from Kalle Svensson in the Python Cookbook.
# The results are nearly in the "not worth it" category.
#
# C:\home\ej\wrk\scipy\compiler\examples>python binary_search.py
# Binary search for 3000 items in 100000 length list of integers:
# speed in python: 0.139999985695
# speed in c: 0.0900000333786
# speed up: 1.41
# search(a,3450) 3450 3450
# search(a,-1) -1 -1
# search(a,10001) 10001 10001
#
# Note -- really need to differentiate between conversion errors and
# run time errors. This would reduce useless compiles and provide a
# more intelligent control of things.
from __future__ import absolute_import, print_function
import sys
sys.path.insert(0,'..')
# from compiler import inline_tools
import scipy.weave.inline_tools as inline_tools
from bisect import bisect_left as bisect
import types
def c_int_search(seq,t,chk=1):
# do partial type checking in Python.
# checking that list items are ints should happen in py_to_scalar<int>
# if chk:
# assert(type(t) is int)
# assert(type(seq) is list)
code = """
#line 33 "binary_search.py"
if (!PyList_Check(py_seq))
py::fail(PyExc_TypeError, "seq must be a list");
if (!PyInt_Check(py_t))
py::fail(PyExc_TypeError, "t must be an integer");
int val, m, min = 0;
int max = seq.len()- 1;
for(;;)
{
if (max < min )
{
return_val = -1;
break;
}
m = (min + max) / 2;
val = py_to_int(PyList_GET_ITEM(py_seq,m),"val");
if (val < t)
min = m + 1;
else if (val > t)
max = m - 1;
else
{
return_val = m;
break;
}
}
"""
# return inline_tools.inline(code,['seq','t'],compiler='msvc')
return inline_tools.inline(code,['seq','t'],verbose=2)
def c_int_search_scxx(seq,t,chk=1):
# do partial type checking in Python.
# checking that list items are ints should happen in py_to_scalar<int>
if chk:
assert(type(t) is int)
assert(type(seq) is list)
code = """
#line 67 "binary_search.py"
int val, m, min = 0;
int max = seq.len()- 1;
for(;;)
{
if (max < min )
{
return_val = -1;
break;
}
m = (min + max) / 2;
val = seq[m];
if (val < t)
min = m + 1;
else if (val > t)
max = m - 1;
else
{
return_val = m;
break;
}
}
"""
# return inline_tools.inline(code,['seq','t'],compiler='msvc')
return inline_tools.inline(code,['seq','t'],verbose=2)
try:
from numpy import *
def c_array_int_search(seq,t):
code = """
#line 62 "binary_search.py"
int val, m, min = 0;
int max = Nseq[0] - 1;
PyObject *py_val;
for(;;)
{
if (max < min )
{
return_val = -1;
break;
}
m = (min + max) / 2;
val = seq[m];
if (val < t)
min = m + 1;
else if (val > t)
max = m - 1;
else
{
return_val = m;
break;
}
}
"""
# return inline_tools.inline(code,['seq','t'],compiler='msvc')
return inline_tools.inline(code,['seq','t'],verbose=2,
extra_compile_args=['-O2','-G6'])
except:
pass
def py_int_search(seq, t):
min = 0
max = len(seq) - 1
while 1:
if max < min:
return -1
m = (min + max) / 2
if seq[m] < t:
min = m + 1
elif seq[m] > t:
max = m - 1
else:
return m
import time
def search_compare(a,n):
print('Binary search for %d items in %d length list of integers:' % (n,m))
t1 = time.time()
for i in range(n):
py_int_search(a,i)
t2 = time.time()
py = (t2-t1)
print(' speed in python:', (t2 - t1))
# bisect
t1 = time.time()
for i in range(n):
bisect(a,i)
t2 = time.time()
bi = (t2-t1) + 1e-20 # protect against div by zero
print(' speed of bisect:', bi)
print(' speed up: %3.2f' % (py/bi))
# get it in cache
c_int_search(a,i)
t1 = time.time()
for i in range(n):
c_int_search(a,i,chk=1)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed in c:',sp)
print(' speed up: %3.2f' % (py/sp))
# get it in cache
c_int_search(a,i)
t1 = time.time()
for i in range(n):
c_int_search(a,i,chk=0)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed in c(no asserts):',sp)
print(' speed up: %3.2f' % (py/sp))
# get it in cache
c_int_search_scxx(a,i)
t1 = time.time()
for i in range(n):
c_int_search_scxx(a,i,chk=1)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed for scxx:',sp)
print(' speed up: %3.2f' % (py/sp))
# get it in cache
c_int_search_scxx(a,i)
t1 = time.time()
for i in range(n):
c_int_search_scxx(a,i,chk=0)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed for scxx(no asserts):',sp)
print(' speed up: %3.2f' % (py/sp))
# get it in cache
a = array(a)
try:
a = array(a)
c_array_int_search(a,i)
t1 = time.time()
for i in range(n):
c_array_int_search(a,i)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed in c(numpy arrays):',sp)
print(' speed up: %3.2f' % (py/sp))
except:
pass
if __name__ == "__main__":
# note bisect returns index+1 compared to other algorithms
m = 100000
a = range(m)
n = 50000
search_compare(a,n)
print('search(a,3450)', c_int_search(a,3450), py_int_search(a,3450), bisect(a,3450))
print('search(a,-1)', c_int_search(a,-1), py_int_search(a,-1), bisect(a,-1))
print('search(a,10001)', c_int_search(a,10001), py_int_search(a,10001),bisect(a,10001))
|
Just-D/panda3d | refs/heads/master | direct/src/directdevices/DirectRadamec.py | 12 | """ Class used to create and control radamec device """
from math import *
from direct.showbase.DirectObject import DirectObject
from DirectDeviceManager import *
from direct.directnotify import DirectNotifyGlobal
"""
TODO:
Handle interaction between widget, followSelectedTask and updateTask
"""
# ANALOGS
RAD_PAN = 0
RAD_TILT = 1
RAD_ZOOM = 2
RAD_FOCUS = 3
class DirectRadamec(DirectObject):
radamecCount = 0
notify = DirectNotifyGlobal.directNotify.newCategory('DirectRadamec')
def __init__(self, device = 'Analog0', nodePath = base.direct.camera):
# See if device manager has been initialized
if base.direct.deviceManager == None:
base.direct.deviceManager = DirectDeviceManager()
# Set name
self.name = 'Radamec-' + repr(DirectRadamec.radamecCount)
DirectRadamec.radamecCount += 1
# Get analogs
self.device = device
self.analogs = base.direct.deviceManager.createAnalogs(self.device)
self.numAnalogs = len(self.analogs)
self.aList = [0, 0, 0, 0, 0, 0, 0, 0]
# Radamec device max/mins
# Note: These values change quite often, i.e. everytime
# you unplug the radamec cords, or jostle them too
# much. For best results, re-record these values often.
self.minRange = [-180.0, -90, 522517.0, 494762.0]
self.maxRange = [180.0, 90, 547074.0, 533984.0]
# Spawn update task
self.enable()
def enable(self):
# Kill existing task
self.disable()
# Update task
taskMgr.add(self.updateTask, self.name + '-updateTask')
def disable(self):
taskMgr.remove(self.name + '-updateTask')
def destroy(self):
self.disable()
def updateTask(self, state):
# Update analogs
for i in range(len(self.analogs)):
self.aList[i] = self.analogs.getControlState(i)
return Task.cont
def radamecDebug(self):
panVal = self.normalizeChannel(RAD_PAN, -180, 180)
tiltVal = self.normalizeChannel(RAD_TILT, -90, 90)
self.notify.debug("PAN = %s" % self.aList[RAD_PAN])
self.notify.debug("TILT = %s" % self.aList[RAD_TILT])
self.notify.debug("ZOOM = %s" % self.aList[RAD_ZOOM])
self.notify.debug("FOCUS = %s" % self.aList[RAD_FOCUS])
self.notify.debug("Normalized: panVal: %s tiltVal: %s" % (panVal, tiltVal))
# Normalize to the range [-minVal, maxVal] based on some hard-coded
# max/min numbers of the Radamec device
def normalizeChannel(self, chan, minVal = -1, maxVal = 1):
try:
maxRange = self.maxRange[chan]
minRange = self.minRange[chan]
except IndexError:
raise RuntimeError, "can't normalize this channel (chanel %d)" % chan
range = maxRange - minRange
clampedVal = CLAMP(self.aList[chan], minRange, maxRange)
return ((maxVal - minVal) * (clampedVal - minRange) / range) + minVal
|
BartoszCichecki/onlinepython | refs/heads/master | onlinepython/pypy-2.4.0-win32/lib-python/2.7/encodings/palmos.py | 647 | """ Python Character Mapping Codec for PalmOS 3.5.
Written by Sjoerd Mullender (sjoerd@acm.org); based on iso8859_15.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='palmos',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
# The PalmOS character set is mostly iso-8859-1 with some differences.
decoding_map.update({
0x0080: 0x20ac, # EURO SIGN
0x0082: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x0083: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x2020, # DAGGER
0x0087: 0x2021, # DOUBLE DAGGER
0x0088: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
0x0089: 0x2030, # PER MILLE SIGN
0x008a: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x008b: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x008c: 0x0152, # LATIN CAPITAL LIGATURE OE
0x008d: 0x2666, # BLACK DIAMOND SUIT
0x008e: 0x2663, # BLACK CLUB SUIT
0x008f: 0x2665, # BLACK HEART SUIT
0x0090: 0x2660, # BLACK SPADE SUIT
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: 0x02dc, # SMALL TILDE
0x0099: 0x2122, # TRADE MARK SIGN
0x009a: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x009c: 0x0153, # LATIN SMALL LIGATURE OE
0x009f: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
|
KenMutemi/saleor | refs/heads/master | saleor/graphql/api.py | 5 | import functools
import graphene
import operator
from django.db.models import Q
from graphene import relay
from graphene_django import DjangoObjectType, DjangoConnectionField
from graphene_django.debug import DjangoDebug
from django_prices.templatetags import prices_i18n
from ..product.models import (AttributeChoiceValue, Category, Product,
ProductAttribute, ProductImage, ProductVariant)
from ..product.utils import get_availability, products_for_api
from ..product.templatetags.product_images import product_first_image
from .scalars import AttributesFilterScalar
from .utils import (CategoryAncestorsCache, DjangoPkInterface)
CONTEXT_CACHE_NAME = '__cache__'
CACHE_ANCESTORS = 'ancestors'
def get_ancestors_from_cache(category, context):
cache = getattr(context, CONTEXT_CACHE_NAME, None)
if cache and CACHE_ANCESTORS in cache:
return cache[CACHE_ANCESTORS].get(category)
else:
return category.get_ancestors()
class ProductAvailabilityType(graphene.ObjectType):
available = graphene.Boolean()
discount = graphene.Field(lambda: PriceType)
discount_local_currency = graphene.Field(lambda: PriceType)
price_range = graphene.Field(lambda: PriceRangeType)
price_range_undiscounted = graphene.Field(lambda: PriceRangeType)
price_range_local_currency = graphene.Field(lambda: PriceRangeType)
class ProductType(DjangoObjectType):
url = graphene.String()
thumbnail_url = graphene.String(
size=graphene.Argument(
graphene.String,
description="The size of a thumbnail, for example 255x255"))
images = graphene.List(lambda: ProductImageType)
variants = graphene.List(lambda: ProductVariantType)
availability = graphene.Field(lambda: ProductAvailabilityType)
price = graphene.Field(lambda: PriceType)
class Meta:
model = Product
interfaces = (relay.Node, DjangoPkInterface)
def resolve_thumbnail_url(self, args, context, info):
size = args.get('size')
if not size:
size = '255x255'
return product_first_image(self, size)
def resolve_images(self, args, context, info):
return self.images.all()
def resolve_variants(self, args, context, info):
return self.variants.all()
def resolve_url(self, args, context, info):
return self.get_absolute_url()
def resolve_availability(self, args, context, info):
a = get_availability(self, context.discounts, context.currency)
return ProductAvailabilityType(**a._asdict())
class CategoryType(DjangoObjectType):
products = DjangoConnectionField(
ProductType,
attributes=graphene.Argument(
graphene.List(AttributesFilterScalar),
description="""A list of attribute:value pairs to filter
the products by"""),
order_by=graphene.Argument(
graphene.String,
description="""A name of field to sort the products by. The negative
sign in front of name implies descending order."""),
price_lte=graphene.Argument(
graphene.Float, description="""Get the products with price lower
than or equal to the given value"""),
price_gte=graphene.Argument(
graphene.Float, description="""Get the products with price greater
than or equal to the given value"""))
products_count = graphene.Int()
url = graphene.String()
ancestors = graphene.List(lambda: CategoryType)
children = graphene.List(lambda: CategoryType)
siblings = graphene.List(lambda: CategoryType)
class Meta:
model = Category
interfaces = (relay.Node, DjangoPkInterface)
def resolve_ancestors(self, args, context, info):
return get_ancestors_from_cache(self, context)
def resolve_children(self, args, context, info):
return self.children.all()
def resolve_siblings(self, args, context, info):
return self.get_siblings()
def resolve_products_count(self, args, context, info):
return self.products.count()
def resolve_url(self, args, context, info):
ancestors = get_ancestors_from_cache(self, context)
return self.get_absolute_url(ancestors)
def resolve_products(self, args, context, info):
def filter_by_price(queryset, value, operator):
return [obj for obj in queryset if operator(get_availability(
obj, context.discounts).price_range.min_price.gross, value)]
tree = self.get_descendants(include_self=True)
qs = products_for_api(context.user)
qs = qs.filter(categories__in=tree)
attributes_filter = args.get('attributes')
order_by = args.get('order_by')
price_lte = args.get('price_lte')
price_gte = args.get('price_gte')
if attributes_filter:
attributes = ProductAttribute.objects.prefetch_related('values')
attributes_map = {attribute.slug: attribute.pk
for attribute in attributes}
values_map = {attr.slug: {value.slug: value.pk
for value in attr.values.all()}
for attr in attributes}
queries = {}
# Convert attribute:value pairs into a dictionary where
# attributes are keys and values are grouped in lists
for attr_name, val_slug in attributes_filter:
try:
attr_pk = attributes_map[attr_name]
except KeyError:
attr_pk = None
else:
try:
attr_val_pk = values_map[attr_name][val_slug]
except KeyError:
attr_val_pk = None
else:
if attr_val_pk is not None and attr_pk not in queries:
queries[attr_pk] = [attr_val_pk]
else:
queries[attr_pk].append(attr_val_pk)
if queries:
# Combine filters of the same attribute with OR operator
# and then combine full query with AND operator.
combine_and = [functools.reduce(operator.or_, [
Q(**{'variants__attributes__%s' % key: v}) |
Q(**{'attributes__%s' % key: v})
for v in values]) for key, values in queries.items()]
query = functools.reduce(operator.and_, combine_and)
qs = qs.filter(query).distinct()
if order_by:
qs = qs.order_by(order_by)
if price_lte:
qs = filter_by_price(qs, price_lte, operator.le)
if price_gte:
qs = filter_by_price(qs, price_gte, operator.ge)
return qs
class ProductVariantType(DjangoObjectType):
stock_quantity = graphene.Int()
price_override = graphene.Field(lambda: PriceType)
class Meta:
model = ProductVariant
interfaces = (relay.Node, DjangoPkInterface)
def resolve_stock_quantity(self, args, context, info):
return self.get_stock_quantity()
class ProductImageType(DjangoObjectType):
url = graphene.String(size=graphene.String())
class Meta:
model = ProductImage
interfaces = (relay.Node, DjangoPkInterface)
def resolve_url(self, args, context, info):
size = args.get('size')
if size:
return self.image.crop[size].url
return self.image.url
class ProductAttributeValue(DjangoObjectType):
class Meta:
model = AttributeChoiceValue
interfaces = (relay.Node, DjangoPkInterface)
class ProductAttributeType(DjangoObjectType):
values = graphene.List(lambda: ProductAttributeValue)
class Meta:
model = ProductAttribute
interfaces = (relay.Node, DjangoPkInterface)
def resolve_values(self, args, context, info):
return self.values.all()
class PriceType(graphene.ObjectType):
currency = graphene.String()
gross = graphene.Float()
gross_localized = graphene.String()
net = graphene.Float()
net_localized = graphene.String()
def resolve_gross_localized(self, args, context, info):
return prices_i18n.gross(self)
def resolve_net_localized(self, args, context, info):
return prices_i18n.net(self)
class PriceRangeType(graphene.ObjectType):
max_price = graphene.Field(lambda: PriceType)
min_price = graphene.Field(lambda: PriceType)
class Query(graphene.ObjectType):
attributes = graphene.List(
ProductAttributeType,
category_pk=graphene.Argument(graphene.Int, required=False))
category = graphene.Field(
CategoryType,
pk=graphene.Argument(graphene.Int, required=True))
node = relay.Node.Field()
root = graphene.Field(lambda: Query)
debug = graphene.Field(DjangoDebug, name='__debug')
def resolve_category(self, args, context, info):
categories = Category.tree.filter(pk=args.get('pk')).get_cached_trees()
if categories:
category = categories[0]
cache = {CACHE_ANCESTORS: CategoryAncestorsCache(category)}
setattr(context, CONTEXT_CACHE_NAME, cache)
return category
return None
def resolve_attributes(self, args, context, info):
category_pk = args.get('category_pk')
queryset = ProductAttribute.objects.prefetch_related('values')
if category_pk:
# Get attributes that are used with product classes
# within the given category.
tree = Category.objects.get(
pk=category_pk).get_descendants(include_self=True)
product_classes = set(
[obj[0] for obj in Product.objects.filter(
categories__in=tree).values_list('product_class_id')])
queryset = queryset.filter(
Q(products_class__in=product_classes) |
Q(product_variants_class__in=product_classes))
return queryset.distinct()
def resolve_root(self, args, context, info):
# Re-expose the root query object. Workaround for the issue in Relay:
# https://github.com/facebook/relay/issues/112
return Query()
schema = graphene.Schema(Query)
|
Dunkas12/BeepBoopBot | refs/heads/master | lib/chardet/langhungarianmodel.py | 2762 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin2_HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 71, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,
175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 75,198,199,200,201,202,203,204,205,
79,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 81,222, 78,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 69, 63,239,240,241,
82, 14, 74,242, 70, 80,243, 72,244, 15, 83, 77, 84, 30, 76, 85,
245,246,247, 25, 73, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
win1250HungarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 28, 40, 54, 45, 32, 50, 49, 38, 39, 53, 36, 41, 34, 35, 47,
46, 72, 43, 33, 37, 57, 48, 64, 68, 55, 52,253,253,253,253,253,
253, 2, 18, 26, 17, 1, 27, 12, 20, 9, 22, 7, 6, 13, 4, 8,
23, 67, 10, 5, 3, 21, 19, 65, 62, 16, 11,253,253,253,253,253,
161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,
177,178,179,180, 78,181, 69,182,183,184,185,186,187,188,189,190,
191,192,193,194,195,196,197, 76,198,199,200,201,202,203,204,205,
81,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,
221, 51, 83,222, 80,223,224,225,226, 44,227,228,229, 61,230,231,
232,233,234, 58,235, 66, 59,236,237,238, 60, 70, 63,239,240,241,
84, 14, 75,242, 71, 82,243, 73,244, 15, 85, 79, 86, 30, 77, 87,
245,246,247, 25, 74, 42, 24,248,249,250, 31, 56, 29,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 94.7368%
# first 1024 sequences:5.2623%
# rest sequences: 0.8894%
# negative sequences: 0.0009%
HungarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,2,3,3,1,1,2,2,2,2,2,1,2,
3,2,2,3,3,3,3,3,2,3,3,3,3,3,3,1,2,3,3,3,3,2,3,3,1,1,3,3,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,
3,2,1,3,3,3,3,3,2,3,3,3,3,3,1,1,2,3,3,3,3,3,3,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,2,3,3,3,1,3,3,3,3,3,1,3,3,2,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,3,3,2,3,3,2,2,3,2,3,2,0,3,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,3,3,2,3,3,3,1,2,3,2,2,3,1,2,3,3,2,2,0,3,3,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,3,2,3,3,3,3,2,3,3,3,3,0,2,3,2,
0,0,0,1,1,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,3,3,2,1,3,2,2,3,2,1,3,2,2,1,0,3,3,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,2,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,3,2,2,3,1,1,3,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,1,3,3,3,3,3,2,2,1,3,3,3,0,1,1,2,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,3,3,3,2,0,3,2,3,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,1,3,2,2,2,3,1,1,3,3,1,1,0,3,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,2,3,3,3,2,3,2,3,3,3,2,3,3,3,3,3,1,2,3,2,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,1,3,3,2,2,1,3,3,3,1,1,3,1,2,3,2,3,2,2,2,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,2,1,3,3,3,2,2,3,2,1,0,3,2,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,3,3,3,3,3,1,2,3,3,3,3,1,1,0,3,3,3,3,0,2,3,0,0,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,2,2,3,3,2,2,2,2,3,3,0,1,2,3,2,3,2,2,3,2,1,2,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,
3,3,3,3,3,3,1,2,3,3,3,2,1,2,3,3,2,2,2,3,2,3,3,1,3,3,1,1,0,2,3,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,2,2,2,2,3,3,3,1,1,1,3,3,1,1,3,1,1,3,2,1,2,3,1,1,0,2,2,2,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,1,2,1,1,3,3,1,1,1,1,3,3,1,1,2,2,1,2,1,1,2,2,1,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,1,1,2,1,1,3,3,1,0,1,1,3,3,2,0,1,1,2,3,1,0,2,2,1,0,0,1,3,2,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,2,1,3,3,3,3,3,1,2,3,2,3,3,2,1,1,3,2,3,2,1,2,2,0,1,2,1,0,0,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,2,2,2,3,1,2,2,1,1,3,3,0,3,2,1,2,3,2,1,3,3,1,1,0,2,1,3,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,3,3,2,2,2,3,2,3,3,3,2,1,1,3,3,1,1,1,2,2,3,2,3,2,2,2,1,0,2,2,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,0,3,3,3,3,3,0,0,3,3,2,3,0,0,0,2,3,3,1,0,1,2,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,2,3,3,3,3,3,1,2,3,3,2,2,1,1,0,3,3,2,2,1,2,2,1,0,2,2,0,1,1,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,2,1,3,1,2,3,3,2,2,1,1,2,2,1,1,1,1,3,2,1,1,1,1,2,1,0,1,2,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
2,3,3,1,1,1,1,1,3,3,3,0,1,1,3,3,1,1,1,1,1,2,2,0,3,1,1,2,0,2,1,1,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,
3,1,0,1,2,1,2,2,0,1,2,3,1,2,0,0,0,2,1,1,1,1,1,2,0,0,1,1,0,0,0,0,
1,2,1,2,2,2,1,2,1,2,0,2,0,2,2,1,1,2,1,1,2,1,1,1,0,1,0,0,0,1,1,0,
1,1,1,2,3,2,3,3,0,1,2,2,3,1,0,1,0,2,1,2,2,0,1,1,0,0,1,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,3,3,2,2,1,0,0,3,2,3,2,0,0,0,1,1,3,0,0,1,1,0,0,2,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,1,0,1,3,2,3,1,1,1,0,1,1,1,1,1,3,1,0,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,2,2,2,1,0,1,2,3,3,2,0,0,0,2,1,1,1,2,1,1,1,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,2,1,1,1,1,1,1,0,1,1,1,0,0,1,1,
3,2,2,1,0,0,1,1,2,2,0,3,0,1,2,1,1,0,0,1,1,1,0,1,1,1,1,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,1,1,1,1,1,2,1,1,1,2,3,1,1,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,3,3,1,0,0,1,2,2,1,0,0,0,0,2,0,0,1,1,1,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,1,0,1,1,0,1,1,1,0,1,2,1,1,0,1,1,1,1,1,1,1,0,1,
2,3,3,0,1,0,0,0,2,2,0,0,0,0,1,2,2,0,0,0,0,1,0,0,1,1,0,0,2,0,1,0,
2,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
3,2,2,0,1,0,1,0,2,3,2,0,0,1,2,2,1,0,0,1,1,1,0,0,2,1,0,1,2,2,1,1,
2,1,1,1,1,1,1,2,1,1,1,1,1,1,0,2,1,0,1,1,0,1,1,1,0,1,1,2,1,1,0,1,
2,2,2,0,0,1,0,0,2,2,1,1,0,0,2,1,1,0,0,0,1,2,0,0,2,1,0,0,2,1,1,1,
2,1,1,1,1,2,1,2,1,1,1,2,2,1,1,2,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,
1,2,3,0,0,0,1,0,3,2,1,0,0,1,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,2,1,
1,1,0,0,0,1,0,1,1,1,1,1,2,0,0,1,0,0,0,2,0,0,1,1,1,1,1,1,1,1,0,1,
3,0,0,2,1,2,2,1,0,0,2,1,2,2,0,0,0,2,1,1,1,0,1,1,0,0,1,1,2,0,0,0,
1,2,1,2,2,1,1,2,1,2,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,0,0,1,
1,3,2,0,0,0,1,0,2,2,2,0,0,0,2,2,1,0,0,0,0,3,1,1,1,1,0,0,2,1,1,1,
2,1,0,1,1,1,0,1,1,1,1,1,1,1,0,2,1,0,0,1,0,1,1,0,1,1,1,1,1,1,0,1,
2,3,2,0,0,0,1,0,2,2,0,0,0,0,2,1,1,0,0,0,0,2,1,0,1,1,0,0,2,1,1,0,
2,1,1,1,1,2,1,2,1,2,0,1,1,1,0,2,1,1,1,2,1,1,1,1,0,1,1,1,1,1,0,1,
3,1,1,2,2,2,3,2,1,1,2,2,1,1,0,1,0,2,2,1,1,1,1,1,0,0,1,1,0,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,0,0,0,0,0,2,2,0,0,0,0,2,2,1,0,0,0,1,1,0,0,1,2,0,0,2,1,1,1,
2,2,1,1,1,2,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,1,1,0,1,2,1,1,1,0,1,
1,0,0,1,2,3,2,1,0,0,2,0,1,1,0,0,0,1,1,1,1,0,1,1,0,0,1,0,0,0,0,0,
1,2,1,2,1,2,1,1,1,2,0,2,1,1,1,0,1,2,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,3,2,0,0,0,0,0,1,1,2,1,0,0,1,1,1,0,0,0,0,2,0,0,1,1,0,0,2,1,1,1,
2,1,1,1,1,1,1,2,1,0,1,1,1,1,0,2,1,1,1,1,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,1,1,1,0,2,2,2,0,0,0,3,2,1,0,0,0,1,1,0,0,1,1,0,1,1,1,0,0,
1,1,0,1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,2,1,1,1,0,0,1,1,1,0,1,0,1,
2,1,0,2,1,1,2,2,1,1,2,1,1,1,0,0,0,1,1,0,1,1,1,1,0,0,1,1,1,0,0,0,
1,2,2,2,2,2,1,1,1,2,0,2,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,1,0,
1,2,3,0,0,0,1,0,2,2,0,0,0,0,2,2,0,0,0,0,0,1,0,0,1,0,0,0,2,0,1,0,
2,1,1,1,1,1,0,2,0,0,0,1,2,1,1,1,1,0,1,2,0,1,0,1,0,1,1,1,0,1,0,1,
2,2,2,0,0,0,1,0,2,1,2,0,0,0,1,1,2,0,0,0,0,1,0,0,1,1,0,0,2,1,0,1,
2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,1,0,2,2,2,0,0,0,1,1,0,0,0,0,0,1,1,0,2,0,0,1,1,1,0,1,
1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,0,0,1,1,0,1,0,1,1,1,1,1,0,0,0,1,
1,0,0,1,0,1,2,1,0,0,1,1,1,2,0,0,0,1,1,0,1,0,1,1,0,0,1,0,0,0,0,0,
0,2,1,2,1,1,1,1,1,2,0,2,0,1,1,0,1,2,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,0,1,2,0,0,1,1,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,0,0,0,2,1,0,1,
2,2,1,1,1,1,1,2,1,1,0,1,1,1,1,2,1,1,1,2,1,1,0,1,0,1,1,1,1,1,0,1,
1,2,2,0,0,0,0,0,1,1,0,0,0,0,2,1,0,0,0,0,0,2,0,0,2,2,0,0,2,0,0,1,
2,1,1,1,1,1,1,1,0,1,1,0,1,1,0,1,0,0,0,1,1,1,1,0,0,1,1,1,1,0,0,1,
1,1,2,0,0,3,1,0,2,1,1,1,0,0,1,1,1,0,0,0,1,1,0,0,0,1,0,0,1,0,1,0,
1,2,1,0,1,1,1,2,1,1,0,1,1,1,1,1,0,0,0,1,1,1,1,1,0,1,0,0,0,1,0,0,
2,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,2,0,0,0,
2,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,2,1,1,0,0,1,1,1,1,1,0,1,
2,1,1,1,2,1,1,1,0,1,1,2,1,0,0,0,0,1,1,1,1,0,1,0,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,1,1,1,1,0,0,1,1,2,1,0,0,0,1,1,0,0,0,1,1,0,0,1,0,1,0,0,0,
1,2,1,1,1,1,1,1,1,1,0,1,0,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,
2,0,0,0,1,1,1,1,0,0,1,1,0,0,0,0,0,1,1,1,2,0,0,1,0,0,1,0,1,0,0,0,
0,1,1,1,1,1,1,1,1,2,0,1,1,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,0,0,2,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,1,0,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,0,0,1,1,0,1,0,1,0,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,1,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,0,1,0,0,1,1,0,1,0,1,1,0,1,1,1,0,1,1,1,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,0,0,0,1,1,1,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,1,1,1,1,1,0,1,1,0,1,0,1,0,0,1,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
)
Latin2HungarianModel = {
'charToOrderMap': Latin2_HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "ISO-8859-2"
}
Win1250HungarianModel = {
'charToOrderMap': win1250HungarianCharToOrderMap,
'precedenceMatrix': HungarianLangModel,
'mTypicalPositiveRatio': 0.947368,
'keepEnglishLetter': True,
'charsetName': "windows-1250"
}
# flake8: noqa
|
Brahimbaz/venom-xbmc-addons-beta | refs/heads/master | plugin.video.vstream/resources/lib/handler/__init__.py | 12133432 | |
AltSchool/django | refs/heads/master | django/conf/locale/ja/__init__.py | 12133432 | |
nirmeshk/oh-mainline | refs/heads/master | vendor/packages/django-registration/registration/__init__.py | 12133432 | |
huntxu/neutron | refs/heads/master | neutron/tests/unit/services/trunk/rpc/__init__.py | 12133432 | |
Jetsly/pjsip-csharp | refs/heads/master | tests/pjsua/scripts-sipp/uas-cancel-no-final.py | 41 | # $Id: uas-cancel-no-final.py 4188 2012-06-29 09:01:17Z nanang $
#
import inc_const as const
PJSUA = ["--null-audio --max-calls=1 $SIPP_URI"]
PJSUA_EXPECTS = [[0, const.STATE_EARLY, "h"]]
|
rsvip/Django | refs/heads/master | django/template/loaders/cached.py | 313 | """
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
"""
import hashlib
import warnings
from django.template import Origin, Template, TemplateDoesNotExist
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_bytes
from django.utils.inspect import func_supports_parameter
from .base import Loader as BaseLoader
class Loader(BaseLoader):
def __init__(self, engine, loaders):
self.template_cache = {}
self.find_template_cache = {} # RemovedInDjango20Warning
self.get_template_cache = {}
self.loaders = engine.get_template_loaders(loaders)
super(Loader, self).__init__(engine)
def get_contents(self, origin):
return origin.loader.get_contents(origin)
def get_template(self, template_name, template_dirs=None, skip=None):
key = self.cache_key(template_name, template_dirs, skip)
cached = self.get_template_cache.get(key)
if cached:
if isinstance(cached, TemplateDoesNotExist):
raise cached
return cached
try:
template = super(Loader, self).get_template(
template_name, template_dirs, skip,
)
except TemplateDoesNotExist as e:
self.get_template_cache[key] = e
raise
else:
self.get_template_cache[key] = template
return template
def get_template_sources(self, template_name, template_dirs=None):
for loader in self.loaders:
args = [template_name]
# RemovedInDjango20Warning: Add template_dirs for compatibility
# with old loaders
if func_supports_parameter(loader.get_template_sources, 'template_dirs'):
args.append(template_dirs)
for origin in loader.get_template_sources(*args):
yield origin
def cache_key(self, template_name, template_dirs, skip=None):
"""
Generate a cache key for the template name, dirs, and skip.
If skip is provided, only origins that match template_name are included
in the cache key. This ensures each template is only parsed and cached
once if contained in different extend chains like:
x -> a -> a
y -> a -> a
z -> a -> a
"""
dirs_prefix = ''
skip_prefix = ''
if skip:
matching = [origin.name for origin in skip if origin.template_name == template_name]
if matching:
skip_prefix = self.generate_hash(matching)
if template_dirs:
dirs_prefix = self.generate_hash(template_dirs)
return ("%s-%s-%s" % (template_name, skip_prefix, dirs_prefix)).strip('-')
def generate_hash(self, values):
return hashlib.sha1(force_bytes('|'.join(values))).hexdigest()
@property
def supports_recursion(self):
"""
RemovedInDjango20Warning: This is an internal property used by the
ExtendsNode during the deprecation of non-recursive loaders.
"""
return all(hasattr(loader, 'get_contents') for loader in self.loaders)
def find_template(self, name, dirs=None):
"""
RemovedInDjango20Warning: An internal method to lookup the template
name in all the configured loaders.
"""
key = self.cache_key(name, dirs)
try:
result = self.find_template_cache[key]
except KeyError:
result = None
for loader in self.loaders:
try:
template, display_name = loader(name, dirs)
except TemplateDoesNotExist:
pass
else:
origin = Origin(
name=display_name,
template_name=name,
loader=loader,
)
result = template, origin
break
self.find_template_cache[key] = result
if result:
return result
else:
self.template_cache[key] = TemplateDoesNotExist
raise TemplateDoesNotExist(name)
def load_template(self, template_name, template_dirs=None):
warnings.warn(
'The load_template() method is deprecated. Use get_template() '
'instead.', RemovedInDjango20Warning,
)
key = self.cache_key(template_name, template_dirs)
template_tuple = self.template_cache.get(key)
# A cached previous failure:
if template_tuple is TemplateDoesNotExist:
raise TemplateDoesNotExist
elif template_tuple is None:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = Template(template, origin, template_name, self.engine)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
self.template_cache[key] = (template, origin)
self.template_cache[key] = (template, None)
return self.template_cache[key]
def reset(self):
"Empty the template cache."
self.template_cache.clear()
self.find_template_cache.clear() # RemovedInDjango20Warning
self.get_template_cache.clear()
|
LazyCodingCat/gyp | refs/heads/master | test/ios/gyptest-per-config-settings.py | 193 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that device and simulator bundles are built correctly.
"""
import plistlib
import TestGyp
import os
import struct
import subprocess
import sys
import tempfile
def CheckFileType(file, expected):
proc = subprocess.Popen(['lipo', '-info', file], stdout=subprocess.PIPE)
o = proc.communicate()[0].strip()
assert not proc.returncode
if not expected in o:
print 'File: Expected %s, got %s' % (expected, o)
test.fail_test()
def HasCerts():
# Because the bots do not have certs, don't check them if there are no
# certs available.
proc = subprocess.Popen(['security','find-identity','-p', 'codesigning',
'-v'], stdout=subprocess.PIPE)
return "0 valid identities found" not in proc.communicate()[0].strip()
def CheckSignature(file):
proc = subprocess.Popen(['codesign', '-v', file], stdout=subprocess.PIPE)
o = proc.communicate()[0].strip()
assert not proc.returncode
if "code object is not signed at all" in o:
print 'File %s not properly signed.' % (file)
test.fail_test()
def CheckEntitlements(file, expected_entitlements):
with tempfile.NamedTemporaryFile() as temp:
proc = subprocess.Popen(['codesign', '--display', '--entitlements',
temp.name, file], stdout=subprocess.PIPE)
o = proc.communicate()[0].strip()
assert not proc.returncode
data = temp.read()
entitlements = ParseEntitlements(data)
if not entitlements:
print 'No valid entitlements found in %s.' % (file)
test.fail_test()
if entitlements != expected_entitlements:
print 'Unexpected entitlements found in %s.' % (file)
test.fail_test()
def ParseEntitlements(data):
if len(data) < 8:
return None
magic, length = struct.unpack('>II', data[:8])
if magic != 0xfade7171 or length != len(data):
return None
return data[8:]
def GetProductVersion():
args = ['xcodebuild','-version','-sdk','iphoneos','ProductVersion']
job = subprocess.Popen(args, stdout=subprocess.PIPE)
return job.communicate()[0].strip()
def CheckPlistvalue(plist, key, expected):
if key not in plist:
print '%s not set in plist' % key
test.fail_test()
return
actual = plist[key]
if actual != expected:
print 'File: Expected %s, got %s for %s' % (expected, actual, key)
test.fail_test()
def CheckPlistNotSet(plist, key):
if key in plist:
print '%s should not be set in plist' % key
test.fail_test()
return
def ConvertBinaryPlistToXML(path):
proc = subprocess.call(['plutil', '-convert', 'xml1', path],
stdout=subprocess.PIPE)
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'xcode'])
test.run_gyp('test-device.gyp', chdir='app-bundle')
test_configs = ['Default-iphoneos', 'Default']
# TODO(justincohen): Disabling 'Default-iphoneos' for xcode until bots are
# configured with signing certs.
if test.format == 'xcode':
test_configs.remove('Default-iphoneos')
for configuration in test_configs:
test.set_configuration(configuration)
test.build('test-device.gyp', 'test_app', chdir='app-bundle')
result_file = test.built_file_path('Test App Gyp.bundle/Test App Gyp',
chdir='app-bundle')
test.must_exist(result_file)
info_plist = test.built_file_path('Test App Gyp.bundle/Info.plist',
chdir='app-bundle')
# plistlib doesn't support binary plists, but that's what Xcode creates.
if test.format == 'xcode':
ConvertBinaryPlistToXML(info_plist)
plist = plistlib.readPlist(info_plist)
CheckPlistvalue(plist, 'UIDeviceFamily', [1, 2])
if configuration == 'Default-iphoneos':
CheckFileType(result_file, 'armv7')
CheckPlistvalue(plist, 'DTPlatformVersion', GetProductVersion())
CheckPlistvalue(plist, 'CFBundleSupportedPlatforms', ['iPhoneOS'])
CheckPlistvalue(plist, 'DTPlatformName', 'iphoneos')
else:
CheckFileType(result_file, 'i386')
CheckPlistNotSet(plist, 'DTPlatformVersion')
CheckPlistvalue(plist, 'CFBundleSupportedPlatforms', ['iPhoneSimulator'])
CheckPlistvalue(plist, 'DTPlatformName', 'iphonesimulator')
if HasCerts() and configuration == 'Default-iphoneos':
test.build('test-device.gyp', 'sig_test', chdir='app-bundle')
result_file = test.built_file_path('sig_test.bundle/sig_test',
chdir='app-bundle')
CheckSignature(result_file)
info_plist = test.built_file_path('sig_test.bundle/Info.plist',
chdir='app-bundle')
plist = plistlib.readPlist(info_plist)
CheckPlistvalue(plist, 'UIDeviceFamily', [1])
entitlements_file = test.built_file_path('sig_test.xcent',
chdir='app-bundle')
if os.path.isfile(entitlements_file):
expected_entitlements = open(entitlements_file).read()
CheckEntitlements(result_file, expected_entitlements)
test.pass_test()
|
mvexel/mr-tnav | refs/heads/master | maproulette/models.py | 1 | # """This file contains the SQLAlchemy ORM models"""
from sqlalchemy import create_engine, and_, or_
from sqlalchemy.orm import synonym
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
from sqlalchemy.ext.declarative import declarative_base
from flask.ext.sqlalchemy import SQLAlchemy
from geoalchemy2.types import Geometry
from geoalchemy2.shape import from_shape, to_shape
import random
from datetime import datetime
from maproulette import app
from flask import session
from shapely.geometry import Polygon
import pytz
# set up the ORM engine and database object
engine = create_engine(app.config['SQLALCHEMY_DATABASE_URI'],
convert_unicode=True)
Base = declarative_base()
db = SQLAlchemy(app)
random.seed()
world_polygon = Polygon([
(-180, -90),
(-180, 90),
(180, 90),
(180, -90),
(-180, -90)])
def getrandom():
return random.random()
class User(db.Model):
"""A MapRoulette User"""
__tablename__ = 'users'
id = db.Column(
db.Integer,
unique=True,
primary_key=True,
nullable=False)
oauth_token = db.Column(
db.String)
oauth_secret = db.Column(
db.String)
display_name = db.Column(
db.String,
nullable=False)
home_location = db.Column(
Geometry('POINT', management=True))
languages = db.Column(
db.String)
changeset_count = db.Column(
db.Integer)
last_changeset_id = db.Column(
db.Integer)
last_changeset_date = db.Column(
db.DateTime)
last_changeset_bbox = db.Column(
Geometry('POLYGON',
management=True))
osm_account_created = db.Column(
db.DateTime)
difficulty = db.Column(
db.SmallInteger)
def __unicode__(self):
return self.display_name
class Challenge(db.Model):
"""A MapRoulette Challenge"""
__tablename__ = 'challenges'
id = db.Column(
db.Integer,
unique=True,
primary_key=True,
nullable=False)
slug = db.Column(
db.String(72),
unique=True,
primary_key=True,
nullable=False)
title = db.Column(
db.String(128),
nullable=False)
description = db.Column(
db.String,
default="")
blurb = db.Column(
db.String,
default="")
geom = db.Column(
Geometry('POLYGON'))
help = db.Column(
db.String,
default="")
instruction = db.Column(
db.String,
default="")
active = db.Column(
db.Boolean,
nullable=False)
difficulty = db.Column(
db.SmallInteger,
nullable=False,
default=1)
tasks = db.relationship(
"Task",
backref="challenges")
type = db.Column(
db.String,
default='default',
nullable=False)
## @validates('slug')
## def validate_slug(self, key, slug):
## app.logger.debug("Slug passed in: " + slug)
## app.logger.debug("Type: " + type(slug))
## assert match('^[a-z0-9]+$', str(slug))
## return slug
# note that spatial indexes seem to be created automagically
def __init__(self,
slug,
title,
geometry=None,
description=None,
blurb=None,
help=None,
instruction=None,
active=None,
difficulty=None):
if geometry is None:
geometry = world_polygon
if active is None:
active = False
self.slug = slug
self.title = title
self.geometry = from_shape(geometry)
self.description = description
self.blurb = blurb
self.help = help
self.instruction = instruction
self.active = active
self.difficulty = difficulty
def __unicode__(self):
return self.slug
def __repr__(self):
return '<Challenge %s>' % self.slug
@hybrid_property
def polygon(self):
"""Retrieve the polygon for this challenge,
or return the World if there is none"""
if self.geom is not None:
return to_shape(self.geom)
else:
return Polygon([(-180, -90),
(-180, 90),
(180, 90),
(180, -90),
(-180, -90)])
@polygon.setter
def polygon(self, shape):
"""Set the polygon for the challenge from a Shapely geometry"""
self.geom = from_shape(shape)
polygon = synonym('geom', descriptor=polygon)
@property
def approx_tasks_available(self):
"""Return the approximate number of tasks
available for this challenge."""
return len(
[t for t in self.tasks if t.status in [
'created',
'skipped',
'available']])
@hybrid_property
def islocal(self):
"""Returns the localness of a challenge (is it small)"""
# If the challange has no geometry, it is global
if self.geom is None:
return False
# otherwise get the area and compare against local threshold
area = db.session.query(self.geom.ST_Area()).one()[0]
return (area <= app.config['MAX_SQ_DEGREES_FOR_LOCAL'])
class Task(db.Model):
"""A MapRoulette task"""
__tablename__ = 'tasks'
id = db.Column(
db.Integer,
unique=True,
primary_key=True,
nullable=False)
identifier = db.Column(
db.String(72),
nullable=False)
challenge_slug = db.Column(
db.String,
db.ForeignKey('challenges.slug', onupdate="cascade"))
random = db.Column(
db.Float,
default=getrandom,
nullable=False)
manifest = db.Column(
db.String) # not used for now
geometries = db.relationship(
"TaskGeometry",
cascade='all,delete-orphan',
backref=db.backref("task"))
actions = db.relationship(
"Action",
cascade='all,delete-orphan',
backref=db.backref("task"))
status = db.Column(
db.String)
instruction = db.Column(
db.String)
# note that spatial indexes seem to be created automagically
__table_args__ = (
db.Index('idx_id', id),
db.Index('idx_identifer', identifier),
db.Index('idx_challenge', challenge_slug),
db.Index('idx_random', random))
def __init__(self, challenge_slug, identifier, instruction=None):
self.challenge_slug = challenge_slug
self.identifier = identifier
self.instruction = instruction
self.append_action(Action('created'))
def __repr__(self):
return '<Task %s>' % (self.identifier)
def __str__(self):
return self.identifier
@hybrid_method
def has_status(self, statuses):
if not type(statuses) == list:
statuses = [statuses]
return self.status in statuses
@has_status.expression
def has_status(cls, statuses):
if not type(statuses) == list:
statuses = [statuses]
return cls.status.in_(statuses)
@hybrid_property
def is_available(self):
return self.has_status([
'available',
'created',
'skipped']) or (self.has_status([
'assigned',
'editing']) and datetime.utcnow() -
app.config['TASK_EXPIRATION_THRESHOLD'] >
self.actions[-1].timestamp)
# with statuses as (select distinct on (task_id) timestamp,
# status, task_id from actions order by task_id, id desc) select id,
# challenge_slug from tasks join statuses c on (id = task_id)
# where c.status in ('available','skipped','created') or (c.status in
# ('editing','assigned') and now() - c.timestamp > '1 hour');
@is_available.expression
def is_available(cls):
# the common table expression
current_actions = db.session.query(Action).distinct(
Action.task_id).order_by(Action.task_id).order_by(
Action.id.desc()).cte(
name="current_actions")
# before this time, a challenge is available even if it's
# 'assigned' or 'editing'
available_time = datetime.utcnow() -\
app.config['TASK_EXPIRATION_THRESHOLD']
res = cls.id.in_(
db.session.query(Task.id).join(current_actions).filter(
or_(
current_actions.c.status.in_([
'available',
'skipped',
'created']),
and_(
current_actions.c.status.in_([
'editing',
'assigned']),
available_time >
current_actions.c.timestamp))
))
return res
@property
def location(self):
"""Returns the location for this task as a Shapely geometry.
This is meant to give the client a quick hint about where the
task is located without having to transfer and decode the entire
task geometry. In reality what we do is transmit the first
geometry we find for the task. This is then parsed into a single
representative lon/lat in the API by getting the first coordinate
of the geometry retrieved here. See also the PointField class in
the API code."""
if not hasattr(self, 'geometries') or len(self.geometries) == 0:
return 'POINT(0 0)'
else:
g = self.geometries[0].geom
return to_shape(g)
@location.setter
def location(self, shape):
"""Set the location for this task from a Shapely object"""
self.geom = from_shape(shape)
def append_action(self, action):
self.actions.append(action)
# duplicate the action status string in the tasks table to save lookups
self.status = action.status
db.session.commit()
# if action.status == 'fixed':
# self.validate_fixed()
def update(self, new_values, geometries, commit=True):
"""This updates a task based on a dict with new values"""
for k, v in new_values.iteritems():
# if a status is set, append an action
if k == 'status':
self.append_action(Action(v))
elif not hasattr(self, k):
app.logger.debug('task does not have %s' % (k,))
return False
setattr(self, k, v)
self.geometries = []
for geometry in geometries:
self.geometries = geometries
db.session.merge(self)
if commit:
db.session.commit()
return True
def validate_fixed(self):
from maproulette.oauth import get_latest_changeset
from maproulette.helpers import get_envelope
import iso8601
intersecting = False
timeframe = False
# get the latest changeset
latest_changeset = get_latest_changeset(session.get('osm_id'))
# check if the changeset bounding box covers the task geometries
sw = (float(latest_changeset.get('min_lon')),
float(latest_changeset.get('min_lat')))
ne = (float(latest_changeset.get('max_lon')),
float(latest_changeset.get('max_lat')))
envelope = get_envelope([ne, sw])
app.logger.debug(envelope)
for geom in [to_shape(taskgeom.geom)
for taskgeom in self.geometries]:
if geom.intersects(envelope):
intersecting = True
break
app.logger.debug('intersecting: %s ' % (intersecting,))
# check if the timestamp is between assigned and fixed
assigned_action = Action.query.filter_by(
task_id=self.id).filter_by(
user_id=session.get('osm_id')).filter_by(
status='assigned').first()
# get assigned time in UTC
assigned_timestamp = assigned_action.timestamp
assigned_timestamp = assigned_timestamp.replace(tzinfo=pytz.utc)
# get the timestamp when the changeset was closed in UTC
changeset_closed_timestamp = iso8601.parse_date(
latest_changeset.get('closed_at')).replace(tzinfo=pytz.utc)
app.logger.debug(assigned_timestamp)
app.logger.debug(changeset_closed_timestamp)
app.logger.debug(datetime.now(pytz.utc))
timeframe = assigned_timestamp <\
changeset_closed_timestamp <\
datetime.now(pytz.utc) + app.config['MAX_CHANGESET_OFFSET']
if intersecting and timeframe:
app.logger.debug('validated')
self.append_action(Action('validated', session.get('osm_id')))
else:
app.logger.debug('could not validate')
class TaskGeometry(db.Model):
"""The collection of geometries (1+) belonging to a task"""
__tablename__ = 'task_geometries'
id = db.Column(
db.Integer,
nullable=False,
unique=True,
primary_key=True)
osmid = db.Column(
db.BigInteger)
task_id = db.Column(
db.Integer,
db.ForeignKey('tasks.id', onupdate="cascade"),
nullable=False)
geom = db.Column(
Geometry,
nullable=False)
def __init__(self, osmid, shape):
self.osmid = osmid
self.geom = from_shape(shape)
@hybrid_property
def geometry(self):
"""Return the task geometry collection as a Shapely object"""
return to_shape(self.geom)
@geometry.setter
def geometry(self, shape):
"""Set the task geometry collection from a Shapely object"""
self.geom = from_shape(shape)
geometry = synonym('geom', descriptor=geometry)
class Action(db.Model):
"""An action on a task"""
__tablename__ = 'actions'
id = db.Column(
db.Integer,
unique=True,
primary_key=True,
nullable=False)
timestamp = db.Column(
db.DateTime,
# store the timestamp as naive UTC time
default=datetime.now(pytz.utc).replace(tzinfo=None),
nullable=False)
user_id = db.Column(
db.Integer,
db.ForeignKey('users.id', onupdate="cascade"))
task_id = db.Column(
db.Integer,
db.ForeignKey('tasks.id', onupdate="cascade"))
status = db.Column(
db.String(),
nullable=False)
editor = db.Column(
db.String())
def __repr__(self):
return "<Action %s set on %s>" % (self.status, self.timestamp)
def __init__(self, status, user_id=None, editor=None):
self.status = status
# store the timestamp as naive UTC time
self.timestamp = datetime.now(pytz.utc).replace(tzinfo=None)
if user_id:
self.user_id = user_id
if editor:
self.editor = editor
|
antonioUnina/neutron | refs/heads/master | neutron/db/migration/alembic_migrations/versions/3b85b693a95f_remove_service_tables.py | 15 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Drop unused servicedefinitions and servicetypes tables.
These tables are created independently on plugins but only dropped if
LoadBalancer plugin is used. Meaning that if LoadBalancer plugin is not set
then these tables were created and never used.
Revision ID: 3b85b693a95f
Revises: 327ee5fde2c7
Create Date: 2014-07-22 03:30:05.837152
"""
# revision identifiers, used by Alembic.
revision = '3b85b693a95f'
down_revision = '327ee5fde2c7'
from alembic import op
from neutron.db import migration
def upgrade():
for table in ('servicedefinitions', 'servicetypes'):
if migration.schema_has_table(table):
op.drop_table(table)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.