repo_name stringlengths 5 100 | ref stringlengths 12 67 | path stringlengths 4 244 | copies stringlengths 1 8 | content stringlengths 0 1.05M ⌀ |
|---|---|---|---|---|
tiagochiavericosta/edx-platform | refs/heads/master | common/djangoapps/django_locale/tests.py | 81 | # pylint: disable=invalid-name, line-too-long, super-method-not-called
"""
Tests taken from Django upstream:
https://github.com/django/django/blob/e6b34193c5c7d117ededdab04bb16caf8864f07c/tests/regressiontests/i18n/tests.py
"""
from django.conf import settings
from django.test import TestCase, RequestFactory
from django_locale.trans_real import (
parse_accept_lang_header, get_language_from_request, LANGUAGE_SESSION_KEY
)
# Added to test middleware around dark lang
from django.contrib.auth.models import User
from django.test.utils import override_settings
from dark_lang.models import DarkLangConfig
# Adding to support test differences between Django and our own settings
@override_settings(LANGUAGES=[
('pt', 'Portuguese'),
('pt-br', 'Portuguese-Brasil'),
('es', 'Spanish'),
('es-ar', 'Spanish (Argentina)'),
('de', 'Deutch'),
('zh-cn', 'Chinese (China)'),
('ar-sa', 'Arabic (Saudi Arabia)'),
])
class MiscTests(TestCase):
"""
Tests taken from Django upstream:
https://github.com/django/django/blob/e6b34193c5c7d117ededdab04bb16caf8864f07c/tests/regressiontests/i18n/tests.py
"""
def setUp(self):
self.rf = RequestFactory()
# Added to test middleware around dark lang
user = User()
user.save()
DarkLangConfig(
released_languages='pt, pt-br, es, de, es-ar, zh-cn, ar-sa',
changed_by=user,
enabled=True
).save()
def test_parse_spec_http_header(self):
"""
Testing HTTP header parsing. First, we test that we can parse the
values according to the spec (and that we extract all the pieces in
the right order).
"""
p = parse_accept_lang_header
# Good headers.
self.assertEqual([('de', 1.0)], p('de'))
self.assertEqual([('en-AU', 1.0)], p('en-AU'))
self.assertEqual([('es-419', 1.0)], p('es-419'))
self.assertEqual([('*', 1.0)], p('*;q=1.00'))
self.assertEqual([('en-AU', 0.123)], p('en-AU;q=0.123'))
self.assertEqual([('en-au', 0.5)], p('en-au;q=0.5'))
self.assertEqual([('en-au', 1.0)], p('en-au;q=1.0'))
self.assertEqual([('da', 1.0), ('en', 0.5), ('en-gb', 0.25)], p('da, en-gb;q=0.25, en;q=0.5'))
self.assertEqual([('en-au-xx', 1.0)], p('en-au-xx'))
self.assertEqual([('de', 1.0), ('en-au', 0.75), ('en-us', 0.5), ('en', 0.25), ('es', 0.125), ('fa', 0.125)], p('de,en-au;q=0.75,en-us;q=0.5,en;q=0.25,es;q=0.125,fa;q=0.125'))
self.assertEqual([('*', 1.0)], p('*'))
self.assertEqual([('de', 1.0)], p('de;q=0.'))
self.assertEqual([('en', 1.0), ('*', 0.5)], p('en; q=1.0, * ; q=0.5'))
self.assertEqual([], p(''))
# Bad headers; should always return [].
self.assertEqual([], p('en-gb;q=1.0000'))
self.assertEqual([], p('en;q=0.1234'))
self.assertEqual([], p('en;q=.2'))
self.assertEqual([], p('abcdefghi-au'))
self.assertEqual([], p('**'))
self.assertEqual([], p('en,,gb'))
self.assertEqual([], p('en-au;q=0.1.0'))
self.assertEqual([], p('XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXZ,en'))
self.assertEqual([], p('da, en-gb;q=0.8, en;q=0.7,#'))
self.assertEqual([], p('de;q=2.0'))
self.assertEqual([], p('de;q=0.a'))
self.assertEqual([], p('12-345'))
self.assertEqual([], p(''))
def test_parse_literal_http_header(self):
"""
Now test that we parse a literal HTTP header correctly.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt-br'}
self.assertEqual('pt-br', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'pt'}
self.assertEqual('pt', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es,de'}
self.assertEqual('es', g(r))
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-ar,de'}
self.assertEqual('es-ar', g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'es-us'}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh_CN)
# the user sets zh-cn as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.META = {'HTTP_ACCEPT_LANGUAGE': 'zh-cn,de'}
self.assertEqual(g(r), 'zh-cn')
def test_logic_masked_by_darklang(self):
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'ar-qa'}
self.assertEqual('ar-sa', g(r))
r.session = {LANGUAGE_SESSION_KEY: 'es'}
self.assertEqual('es', g(r))
def test_parse_language_cookie(self):
"""
Now test that we parse language preferences stored in a cookie correctly.
"""
g = get_language_from_request
r = self.rf.get('/')
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt-br'}
r.META = {}
self.assertEqual('pt-br', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'pt'}
r.META = {}
self.assertEqual('pt', g(r))
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual('es', g(r))
# This test assumes there won't be a Django translation to a US
# variation of the Spanish language, a safe assumption. When the
# user sets it as the preferred language, the main 'es'
# translation should be selected instead.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'es-us'}
r.META = {}
self.assertEqual(g(r), 'es')
# This tests the following scenario: there isn't a main language (zh)
# translation of Django but there is a translation to variation (zh_CN)
# the user sets zh-cn as the preferred language, it should be selected
# by Django without falling back nor ignoring it.
r.COOKIES = {settings.LANGUAGE_COOKIE_NAME: 'zh-cn'}
r.META = {'HTTP_ACCEPT_LANGUAGE': 'de'}
self.assertEqual(g(r), 'zh-cn')
|
sonuyos/couchpotato | refs/heads/master | libs/apscheduler/jobstores/base.py | 145 | """
Abstract base class that provides the interface needed by all job stores.
Job store methods are also documented here.
"""
class JobStore(object):
def add_job(self, job):
"""Adds the given job from this store."""
raise NotImplementedError
def update_job(self, job):
"""Persists the running state of the given job."""
raise NotImplementedError
def remove_job(self, job):
"""Removes the given jobs from this store."""
raise NotImplementedError
def load_jobs(self):
"""Loads jobs from this store into memory."""
raise NotImplementedError
def close(self):
"""Frees any resources still bound to this job store."""
|
friedrich420/S4-AEL-GPE-LOLLIPOP | refs/heads/master | tools/perf/python/twatch.py | 7370 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
maelnor/cinder | refs/heads/master | cinder/tests/backup/fake_rados.py | 2 | # Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class mock_rados(object):
class ioctx(object):
def __init__(self, *args, **kwargs):
pass
def close(self, *args, **kwargs):
pass
class Rados(object):
def __init__(self, *args, **kwargs):
pass
def connect(self, *args, **kwargs):
pass
def open_ioctx(self, *args, **kwargs):
return mock_rados.ioctx()
def shutdown(self, *args, **kwargs):
pass
class Error():
def __init__(self, *args, **kwargs):
pass
class mock_rbd(object):
class ImageBusy(Exception):
def __init__(self, *args, **kwargs):
pass
class ImageNotFound(Exception):
def __init__(self, *args, **kwargs):
pass
class Image(object):
def __init__(self, *args, **kwargs):
pass
def create_snap(self, *args, **kwargs):
pass
def remove_snap(self, *args, **kwargs):
pass
def read(self, *args, **kwargs):
raise NotImplementedError()
def write(self, *args, **kwargs):
raise NotImplementedError()
def resize(self, *args, **kwargs):
raise NotImplementedError()
def close(self):
pass
def list_snaps(self):
raise NotImplementedError()
def size(self):
raise NotImplementedError()
class RBD(object):
def __init__(self, *args, **kwargs):
pass
def create(self, *args, **kwargs):
pass
def remove(self, *args, **kwargs):
pass
def list(self, *args, **kwargs):
raise NotImplementedError()
|
muff1nman/duplicity | refs/heads/master | duplicity/statistics.py | 2 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright 2002 Ben Escoto <ben@emerose.org>
# Copyright 2007 Kenneth Loafman <kenneth@loafman.com>
#
# This file is part of duplicity.
#
# Duplicity is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# Duplicity is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with duplicity; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Generate and process backup statistics"""
from future_builtins import map
import re
import time
import os
from duplicity import dup_time
class StatsException(Exception):
pass
class StatsObj:
"""Contains various statistics, provide string conversion functions"""
# used when quoting files in get_stats_line
space_regex = re.compile(" ")
stat_file_attrs = ('SourceFiles',
'SourceFileSize',
'NewFiles',
'NewFileSize',
'DeletedFiles',
'ChangedFiles',
'ChangedFileSize',
'ChangedDeltaSize',
'DeltaEntries',
'RawDeltaSize')
stat_misc_attrs = ('Errors',
'TotalDestinationSizeChange')
stat_time_attrs = ('StartTime',
'EndTime',
'ElapsedTime')
stat_attrs = (('Filename',) + stat_time_attrs +
stat_misc_attrs + stat_file_attrs)
# Below, the second value in each pair is true iff the value
# indicates a number of bytes
stat_file_pairs = (('SourceFiles', False),
('SourceFileSize', True),
('NewFiles', False),
('NewFileSize', True),
('DeletedFiles', False),
('ChangedFiles', False),
('ChangedFileSize', True),
('ChangedDeltaSize', True),
('DeltaEntries', False),
('RawDeltaSize', True))
# This is used in get_byte_summary_string below
byte_abbrev_list = ((1024 * 1024 * 1024 * 1024, "TB"),
(1024 * 1024 * 1024, "GB"),
(1024 * 1024, "MB"),
(1024, "KB"))
def __init__(self):
"""Set attributes to None"""
for attr in self.stat_attrs:
self.__dict__[attr] = None
def get_stat(self, attribute):
"""Get a statistic"""
return self.__dict__[attribute]
def set_stat(self, attr, value):
"""Set attribute to given value"""
self.__dict__[attr] = value
def increment_stat(self, attr):
"""Add 1 to value of attribute"""
self.__dict__[attr] += 1
def get_total_dest_size_change(self):
"""Return total destination size change
This represents the total increase in the size of the
duplicity destination directory, or None if not available.
"""
return 0 # this needs to be re-done for duplicity
def get_stats_line(self, index, use_repr=1):
"""Return one line abbreviated version of full stats string"""
file_attrs = [str(self.get_stat(a)) for a in self.stat_file_attrs]
if not index:
filename = "."
else:
filename = os.path.join(*index)
if use_repr:
# use repr to quote newlines in relative filename, then
# take of leading and trailing quote and quote spaces.
filename = self.space_regex.sub("\\x20", repr(filename)[1:-1])
return " ".join([filename, ] + file_attrs)
def set_stats_from_line(self, line):
"""Set statistics from given line"""
def error():
raise StatsException("Bad line '%s'" % line)
if line[-1] == "\n":
line = line[:-1]
lineparts = line.split(" ")
if len(lineparts) < len(self.stat_file_attrs):
error()
for attr, val_string in zip(self.stat_file_attrs,
lineparts[-len(self.stat_file_attrs):]):
try:
val = int(val_string)
except ValueError:
try:
val = float(val_string)
except ValueError:
error()
self.set_stat(attr, val)
return self
def get_stats_string(self):
"""Return extended string printing out statistics"""
return "%s%s%s" % (self.get_timestats_string(),
self.get_filestats_string(),
self.get_miscstats_string())
def get_timestats_string(self):
"""Return portion of statistics string dealing with time"""
timelist = []
if self.StartTime is not None:
timelist.append("StartTime %.2f (%s)\n" %
(self.StartTime, dup_time.timetopretty(self.StartTime)))
if self.EndTime is not None:
timelist.append("EndTime %.2f (%s)\n" %
(self.EndTime, dup_time.timetopretty(self.EndTime)))
if self.ElapsedTime or (self.StartTime is not None and
self.EndTime is not None):
if self.ElapsedTime is None:
self.ElapsedTime = self.EndTime - self.StartTime
timelist.append("ElapsedTime %.2f (%s)\n" %
(self.ElapsedTime, dup_time.inttopretty(self.ElapsedTime)))
return "".join(timelist)
def get_filestats_string(self):
"""Return portion of statistics string about files and bytes"""
def fileline(stat_file_pair):
"""Return zero or one line of the string"""
attr, in_bytes = stat_file_pair
val = self.get_stat(attr)
if val is None:
return ""
if in_bytes:
return "%s %s (%s)\n" % (attr, val,
self.get_byte_summary_string(val))
else:
return "%s %s\n" % (attr, val)
return "".join(map(fileline, self.stat_file_pairs))
def get_miscstats_string(self):
"""Return portion of extended stat string about misc attributes"""
misc_string = ""
tdsc = self.TotalDestinationSizeChange
if tdsc is not None:
misc_string += ("TotalDestinationSizeChange %s (%s)\n" %
(tdsc, self.get_byte_summary_string(tdsc)))
if self.Errors is not None:
misc_string += "Errors %d\n" % self.Errors
return misc_string
def get_byte_summary_string(self, byte_count):
"""Turn byte count into human readable string like "7.23GB" """
if byte_count < 0:
sign = "-"
byte_count = -byte_count
else:
sign = ""
for abbrev_bytes, abbrev_string in self.byte_abbrev_list:
if byte_count >= abbrev_bytes:
# Now get 3 significant figures
abbrev_count = float(byte_count) / abbrev_bytes
if abbrev_count >= 100:
precision = 0
elif abbrev_count >= 10:
precision = 1
else:
precision = 2
return "%s%%.%df %s" % (sign, precision, abbrev_string) \
% (abbrev_count,)
byte_count = round(byte_count)
if byte_count == 1:
return sign + "1 byte"
else:
return "%s%d bytes" % (sign, byte_count)
def get_stats_logstring(self, title):
"""Like get_stats_string, but add header and footer"""
header = "--------------[ %s ]--------------" % title
footer = "-" * len(header)
return "%s\n%s%s\n" % (header, self.get_stats_string(), footer)
def set_stats_from_string(self, s):
"""Initialize attributes from string, return self for convenience"""
def error(line):
raise StatsException("Bad line '%s'" % line)
for line in s.split("\n"):
if not line:
continue
line_parts = line.split()
if len(line_parts) < 2:
error(line)
attr, value_string = line_parts[:2]
if attr not in self.stat_attrs:
error(line)
try:
try:
val1 = int(value_string)
except ValueError:
val1 = None
val2 = float(value_string)
if val1 == val2:
self.set_stat(attr, val1) # use integer val
else:
self.set_stat(attr, val2) # use float
except ValueError:
error(line)
return self
def write_stats_to_path(self, path):
"""Write statistics string to given path"""
fin = path.open("w")
fin.write(self.get_stats_string())
assert not fin.close()
def read_stats_from_path(self, path):
"""Set statistics from path, return self for convenience"""
fp = path.open("r")
self.set_stats_from_string(fp.read())
assert not fp.close()
return self
def stats_equal(self, s):
"""Return true if s has same statistics as self"""
assert isinstance(s, StatsObj)
for attr in self.stat_file_attrs:
if self.get_stat(attr) != s.get_stat(attr):
return None
return 1
def set_to_average(self, statobj_list):
"""Set self's attributes to average of those in statobj_list"""
for attr in self.stat_attrs:
self.set_stat(attr, 0)
for statobj in statobj_list:
for attr in self.stat_attrs:
if statobj.get_stat(attr) is None:
self.set_stat(attr, None)
elif self.get_stat(attr) is not None:
self.set_stat(attr, statobj.get_stat(attr) +
self.get_stat(attr))
# Don't compute average starting/stopping time
self.StartTime = None
self.EndTime = None
for attr in self.stat_attrs:
if self.get_stat(attr) is not None:
self.set_stat(attr,
self.get_stat(attr) / float(len(statobj_list)))
return self
def get_statsobj_copy(self):
"""Return new StatsObj object with same stats as self"""
s = StatsObj()
for attr in self.stat_attrs:
s.set_stat(attr, self.get_stat(attr))
return s
class StatsDeltaProcess(StatsObj):
"""Keep track of statistics during DirDelta process"""
def __init__(self):
"""StatsDeltaProcess initializer - zero file attributes"""
StatsObj.__init__(self)
for attr in StatsObj.stat_file_attrs:
self.__dict__[attr] = 0
self.Errors = 0
self.StartTime = time.time()
self.files_changed = []
def add_new_file(self, path):
"""Add stats of new file path to statistics"""
filesize = path.getsize()
self.SourceFiles += 1
# SourceFileSize is added-to incrementally as read
self.NewFiles += 1
self.NewFileSize += filesize
self.DeltaEntries += 1
self.add_delta_entries_file(path, 'new')
def add_changed_file(self, path):
"""Add stats of file that has changed since last backup"""
filesize = path.getsize()
self.SourceFiles += 1
# SourceFileSize is added-to incrementally as read
self.ChangedFiles += 1
self.ChangedFileSize += filesize
self.DeltaEntries += 1
self.add_delta_entries_file(path, 'changed')
def add_deleted_file(self, path):
"""Add stats of file no longer in source directory"""
self.DeletedFiles += 1 # can't add size since not available
self.DeltaEntries += 1
self.add_delta_entries_file(path, 'deleted')
def add_unchanged_file(self, path):
"""Add stats of file that hasn't changed since last backup"""
filesize = path.getsize()
self.SourceFiles += 1
self.SourceFileSize += filesize
def close(self):
"""End collection of data, set EndTime"""
self.EndTime = time.time()
def add_delta_entries_file(self, path, action_type):
if path.isreg():
self.files_changed.append((path.get_relative_path(), action_type))
def get_delta_entries_file(self):
return self.files_changed
|
tiagochiavericosta/edx-platform | refs/heads/master | common/djangoapps/third_party_auth/__init__.py | 160 | """Third party authentication. """
from microsite_configuration import microsite
def is_enabled():
"""Check whether third party authentication has been enabled. """
# We do this import internally to avoid initializing settings prematurely
from django.conf import settings
return microsite.get_value(
"ENABLE_THIRD_PARTY_AUTH",
settings.FEATURES.get("ENABLE_THIRD_PARTY_AUTH")
)
|
OptimusGitEtna/RestSymf | refs/heads/master | Python-3.4.2/Lib/ctypes/_endian.py | 99 | import sys
from ctypes import *
_array_type = type(Array)
def _other_endian(typ):
"""Return the type with the 'other' byte order. Simple types like
c_int and so on already have __ctype_be__ and __ctype_le__
attributes which contain the types, for more complicated types
arrays and structures are supported.
"""
# check _OTHER_ENDIAN attribute (present if typ is primitive type)
if hasattr(typ, _OTHER_ENDIAN):
return getattr(typ, _OTHER_ENDIAN)
# if typ is array
if isinstance(typ, _array_type):
return _other_endian(typ._type_) * typ._length_
# if typ is structure
if issubclass(typ, Structure):
return typ
raise TypeError("This type does not support other endian: %s" % typ)
class _swapped_meta(type(Structure)):
def __setattr__(self, attrname, value):
if attrname == "_fields_":
fields = []
for desc in value:
name = desc[0]
typ = desc[1]
rest = desc[2:]
fields.append((name, _other_endian(typ)) + rest)
value = fields
super().__setattr__(attrname, value)
################################################################
# Note: The Structure metaclass checks for the *presence* (not the
# value!) of a _swapped_bytes_ attribute to determine the bit order in
# structures containing bit fields.
if sys.byteorder == "little":
_OTHER_ENDIAN = "__ctype_be__"
LittleEndianStructure = Structure
class BigEndianStructure(Structure, metaclass=_swapped_meta):
"""Structure with big endian byte order"""
_swappedbytes_ = None
elif sys.byteorder == "big":
_OTHER_ENDIAN = "__ctype_le__"
BigEndianStructure = Structure
class LittleEndianStructure(Structure, metaclass=_swapped_meta):
"""Structure with little endian byte order"""
_swappedbytes_ = None
else:
raise RuntimeError("Invalid byteorder")
|
EaseCloud/wechatpy | refs/heads/master | wechatpy/enterprise/__init__.py | 12 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from wechatpy.enterprise.parser import parse_message # NOQA
from wechatpy.enterprise.replies import create_reply # NOQA
from wechatpy.enterprise.crypto import WeChatCrypto # NOQA
from wechatpy.enterprise.client import WeChatClient # NOQA
|
jpippy/pyo | refs/heads/master | embedded/openframeworks/PyoTemplate/scripts/stereoDelay.py | 12 | # Get the input sound and apply a stereo delay + reverb on it.
st_input = Input([0,1])
st_delay = Delay(st_input, delay=[.4, .5], feedback=0.7)
st_rev = WGVerb(st_delay, feedback=0.8, cutoff=4000, bal=0.25).out()
|
Sing-Li/go-buildpack | refs/heads/master | builds/runtimes/python-2.7.6/lib/python2.7/test/seq_tests.py | 103 | """
Tests common to tuple, list and UserList.UserList
"""
import unittest
import sys
# Various iterables
# This is used for checking the constructor (here and in test_deque.py)
def iterfunc(seqn):
'Regular generator'
for i in seqn:
yield i
class Sequence:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class IterFunc:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class IterGen:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class IterNextOnly:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class IterNoNext:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class IterGenExc:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
3 // 0
class IterFuncStop:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def next(self):
raise StopIteration
from itertools import chain, imap
def itermulti(seqn):
'Test multiple tiers of iterators'
return chain(imap(lambda x:x, iterfunc(IterGen(Sequence(seqn)))))
class CommonTest(unittest.TestCase):
# The type to be tested
type2test = None
def test_constructors(self):
l0 = []
l1 = [0]
l2 = [0, 1]
u = self.type2test()
u0 = self.type2test(l0)
u1 = self.type2test(l1)
u2 = self.type2test(l2)
uu = self.type2test(u)
uu0 = self.type2test(u0)
uu1 = self.type2test(u1)
uu2 = self.type2test(u2)
v = self.type2test(tuple(u))
class OtherSeq:
def __init__(self, initseq):
self.__data = initseq
def __len__(self):
return len(self.__data)
def __getitem__(self, i):
return self.__data[i]
s = OtherSeq(u0)
v0 = self.type2test(s)
self.assertEqual(len(v0), len(s))
s = "this is also a sequence"
vv = self.type2test(s)
self.assertEqual(len(vv), len(s))
# Create from various iteratables
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (Sequence, IterFunc, IterGen,
itermulti, iterfunc):
self.assertEqual(self.type2test(g(s)), self.type2test(s))
self.assertEqual(self.type2test(IterFuncStop(s)), self.type2test())
self.assertEqual(self.type2test(c for c in "123"), self.type2test("123"))
self.assertRaises(TypeError, self.type2test, IterNextOnly(s))
self.assertRaises(TypeError, self.type2test, IterNoNext(s))
self.assertRaises(ZeroDivisionError, self.type2test, IterGenExc(s))
def test_truth(self):
self.assertFalse(self.type2test())
self.assertTrue(self.type2test([42]))
def test_getitem(self):
u = self.type2test([0, 1, 2, 3, 4])
for i in xrange(len(u)):
self.assertEqual(u[i], i)
self.assertEqual(u[long(i)], i)
for i in xrange(-len(u), -1):
self.assertEqual(u[i], len(u)+i)
self.assertEqual(u[long(i)], len(u)+i)
self.assertRaises(IndexError, u.__getitem__, -len(u)-1)
self.assertRaises(IndexError, u.__getitem__, len(u))
self.assertRaises(ValueError, u.__getitem__, slice(0,10,0))
u = self.type2test()
self.assertRaises(IndexError, u.__getitem__, 0)
self.assertRaises(IndexError, u.__getitem__, -1)
self.assertRaises(TypeError, u.__getitem__)
a = self.type2test([10, 11])
self.assertEqual(a[0], 10)
self.assertEqual(a[1], 11)
self.assertEqual(a[-2], 10)
self.assertEqual(a[-1], 11)
self.assertRaises(IndexError, a.__getitem__, -3)
self.assertRaises(IndexError, a.__getitem__, 3)
def test_getslice(self):
l = [0, 1, 2, 3, 4]
u = self.type2test(l)
self.assertEqual(u[0:0], self.type2test())
self.assertEqual(u[1:2], self.type2test([1]))
self.assertEqual(u[-2:-1], self.type2test([3]))
self.assertEqual(u[-1000:1000], u)
self.assertEqual(u[1000:-1000], self.type2test([]))
self.assertEqual(u[:], u)
self.assertEqual(u[1:None], self.type2test([1, 2, 3, 4]))
self.assertEqual(u[None:3], self.type2test([0, 1, 2]))
# Extended slices
self.assertEqual(u[::], u)
self.assertEqual(u[::2], self.type2test([0, 2, 4]))
self.assertEqual(u[1::2], self.type2test([1, 3]))
self.assertEqual(u[::-1], self.type2test([4, 3, 2, 1, 0]))
self.assertEqual(u[::-2], self.type2test([4, 2, 0]))
self.assertEqual(u[3::-2], self.type2test([3, 1]))
self.assertEqual(u[3:3:-2], self.type2test([]))
self.assertEqual(u[3:2:-2], self.type2test([3]))
self.assertEqual(u[3:1:-2], self.type2test([3]))
self.assertEqual(u[3:0:-2], self.type2test([3, 1]))
self.assertEqual(u[::-100], self.type2test([4]))
self.assertEqual(u[100:-100:], self.type2test([]))
self.assertEqual(u[-100:100:], u)
self.assertEqual(u[100:-100:-1], u[::-1])
self.assertEqual(u[-100:100:-1], self.type2test([]))
self.assertEqual(u[-100L:100L:2L], self.type2test([0, 2, 4]))
# Test extreme cases with long ints
a = self.type2test([0,1,2,3,4])
self.assertEqual(a[ -pow(2,128L): 3 ], self.type2test([0,1,2]))
self.assertEqual(a[ 3: pow(2,145L) ], self.type2test([3,4]))
self.assertRaises(TypeError, u.__getslice__)
def test_contains(self):
u = self.type2test([0, 1, 2])
for i in u:
self.assertIn(i, u)
for i in min(u)-1, max(u)+1:
self.assertNotIn(i, u)
self.assertRaises(TypeError, u.__contains__)
def test_contains_fake(self):
class AllEq:
# Sequences must use rich comparison against each item
# (unless "is" is true, or an earlier item answered)
# So instances of AllEq must be found in all non-empty sequences.
def __eq__(self, other):
return True
__hash__ = None # Can't meet hash invariant requirements
self.assertNotIn(AllEq(), self.type2test([]))
self.assertIn(AllEq(), self.type2test([1]))
def test_contains_order(self):
# Sequences must test in-order. If a rich comparison has side
# effects, these will be visible to tests against later members.
# In this test, the "side effect" is a short-circuiting raise.
class DoNotTestEq(Exception):
pass
class StopCompares:
def __eq__(self, other):
raise DoNotTestEq
checkfirst = self.type2test([1, StopCompares()])
self.assertIn(1, checkfirst)
checklast = self.type2test([StopCompares(), 1])
self.assertRaises(DoNotTestEq, checklast.__contains__, 1)
def test_len(self):
self.assertEqual(len(self.type2test()), 0)
self.assertEqual(len(self.type2test([])), 0)
self.assertEqual(len(self.type2test([0])), 1)
self.assertEqual(len(self.type2test([0, 1, 2])), 3)
def test_minmax(self):
u = self.type2test([0, 1, 2])
self.assertEqual(min(u), 0)
self.assertEqual(max(u), 2)
def test_addmul(self):
u1 = self.type2test([0])
u2 = self.type2test([0, 1])
self.assertEqual(u1, u1 + self.type2test())
self.assertEqual(u1, self.type2test() + u1)
self.assertEqual(u1 + self.type2test([1]), u2)
self.assertEqual(self.type2test([-1]) + u1, self.type2test([-1, 0]))
self.assertEqual(self.type2test(), u2*0)
self.assertEqual(self.type2test(), 0*u2)
self.assertEqual(self.type2test(), u2*0L)
self.assertEqual(self.type2test(), 0L*u2)
self.assertEqual(u2, u2*1)
self.assertEqual(u2, 1*u2)
self.assertEqual(u2, u2*1L)
self.assertEqual(u2, 1L*u2)
self.assertEqual(u2+u2, u2*2)
self.assertEqual(u2+u2, 2*u2)
self.assertEqual(u2+u2, u2*2L)
self.assertEqual(u2+u2, 2L*u2)
self.assertEqual(u2+u2+u2, u2*3)
self.assertEqual(u2+u2+u2, 3*u2)
class subclass(self.type2test):
pass
u3 = subclass([0, 1])
self.assertEqual(u3, u3*1)
self.assertIsNot(u3, u3*1)
def test_iadd(self):
u = self.type2test([0, 1])
u += self.type2test()
self.assertEqual(u, self.type2test([0, 1]))
u += self.type2test([2, 3])
self.assertEqual(u, self.type2test([0, 1, 2, 3]))
u += self.type2test([4, 5])
self.assertEqual(u, self.type2test([0, 1, 2, 3, 4, 5]))
u = self.type2test("spam")
u += self.type2test("eggs")
self.assertEqual(u, self.type2test("spameggs"))
def test_imul(self):
u = self.type2test([0, 1])
u *= 3
self.assertEqual(u, self.type2test([0, 1, 0, 1, 0, 1]))
def test_getitemoverwriteiter(self):
# Verify that __getitem__ overrides are not recognized by __iter__
class T(self.type2test):
def __getitem__(self, key):
return str(key) + '!!!'
self.assertEqual(iter(T((1,2))).next(), 1)
def test_repeat(self):
for m in xrange(4):
s = tuple(range(m))
for n in xrange(-3, 5):
self.assertEqual(self.type2test(s*n), self.type2test(s)*n)
self.assertEqual(self.type2test(s)*(-4), self.type2test([]))
self.assertEqual(id(s), id(s*1))
def test_bigrepeat(self):
import sys
if sys.maxint <= 2147483647:
x = self.type2test([0])
x *= 2**16
self.assertRaises(MemoryError, x.__mul__, 2**16)
if hasattr(x, '__imul__'):
self.assertRaises(MemoryError, x.__imul__, 2**16)
def test_subscript(self):
a = self.type2test([10, 11])
self.assertEqual(a.__getitem__(0L), 10)
self.assertEqual(a.__getitem__(1L), 11)
self.assertEqual(a.__getitem__(-2L), 10)
self.assertEqual(a.__getitem__(-1L), 11)
self.assertRaises(IndexError, a.__getitem__, -3)
self.assertRaises(IndexError, a.__getitem__, 3)
self.assertEqual(a.__getitem__(slice(0,1)), self.type2test([10]))
self.assertEqual(a.__getitem__(slice(1,2)), self.type2test([11]))
self.assertEqual(a.__getitem__(slice(0,2)), self.type2test([10, 11]))
self.assertEqual(a.__getitem__(slice(0,3)), self.type2test([10, 11]))
self.assertEqual(a.__getitem__(slice(3,5)), self.type2test([]))
self.assertRaises(ValueError, a.__getitem__, slice(0, 10, 0))
self.assertRaises(TypeError, a.__getitem__, 'x')
def test_count(self):
a = self.type2test([0, 1, 2])*3
self.assertEqual(a.count(0), 3)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(3), 0)
self.assertRaises(TypeError, a.count)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
self.assertRaises(BadExc, a.count, BadCmp())
def test_index(self):
u = self.type2test([0, 1])
self.assertEqual(u.index(0), 0)
self.assertEqual(u.index(1), 1)
self.assertRaises(ValueError, u.index, 2)
u = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(u.count(0), 2)
self.assertEqual(u.index(0), 2)
self.assertEqual(u.index(0, 2), 2)
self.assertEqual(u.index(-2, -10), 0)
self.assertEqual(u.index(0, 3), 3)
self.assertEqual(u.index(0, 3, 4), 3)
self.assertRaises(ValueError, u.index, 2, 0, -10)
self.assertRaises(TypeError, u.index)
class BadExc(Exception):
pass
class BadCmp:
def __eq__(self, other):
if other == 2:
raise BadExc()
return False
a = self.type2test([0, 1, 2, 3])
self.assertRaises(BadExc, a.index, BadCmp())
a = self.type2test([-2, -1, 0, 0, 1, 2])
self.assertEqual(a.index(0), 2)
self.assertEqual(a.index(0, 2), 2)
self.assertEqual(a.index(0, -4), 2)
self.assertEqual(a.index(-2, -10), 0)
self.assertEqual(a.index(0, 3), 3)
self.assertEqual(a.index(0, -3), 3)
self.assertEqual(a.index(0, 3, 4), 3)
self.assertEqual(a.index(0, -3, -2), 3)
self.assertEqual(a.index(0, -4*sys.maxint, 4*sys.maxint), 2)
self.assertRaises(ValueError, a.index, 0, 4*sys.maxint,-4*sys.maxint)
self.assertRaises(ValueError, a.index, 2, 0, -10)
|
s40523116/2016fallcp_hw | refs/heads/gh-pages | plugin/liquid_tags/test_generation.py | 306 | # -*- coding: utf-8 -*-
from __future__ import print_function
import filecmp
import os
import unittest
from shutil import rmtree
from tempfile import mkdtemp
import pytest
from pelican import Pelican
from pelican.settings import read_settings
from .notebook import IPYTHON_VERSION
PLUGIN_DIR = os.path.dirname(__file__)
TEST_DATA_DIR = os.path.join(PLUGIN_DIR, 'test_data')
class TestFullRun(unittest.TestCase):
'''Test running Pelican with the Plugin'''
def setUp(self):
'''Create temporary output and cache folders'''
self.temp_path = mkdtemp(prefix='pelicantests.')
self.temp_cache = mkdtemp(prefix='pelican_cache.')
os.chdir(TEST_DATA_DIR)
def tearDown(self):
'''Remove output and cache folders'''
rmtree(self.temp_path)
rmtree(self.temp_cache)
os.chdir(PLUGIN_DIR)
@pytest.mark.skipif(IPYTHON_VERSION >= 3,
reason="output must be created with ipython version 2")
def test_generate_with_ipython3(self):
'''Test generation of site with the plugin.'''
base_path = os.path.dirname(os.path.abspath(__file__))
base_path = os.path.join(base_path, 'test_data')
content_path = os.path.join(base_path, 'content')
output_path = os.path.join(base_path, 'output')
settings_path = os.path.join(base_path, 'pelicanconf.py')
settings = read_settings(path=settings_path,
override={'PATH': content_path,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
}
)
pelican = Pelican(settings)
pelican.run()
# test existence
assert os.path.exists(os.path.join(self.temp_path,
'test-ipython-notebook-nb-format-3.html'))
assert os.path.exists(os.path.join(self.temp_path,
'test-ipython-notebook-nb-format-4.html'))
# test differences
#assert filecmp.cmp(os.path.join(output_path,
# 'test-ipython-notebook-v2.html'),
# os.path.join(self.temp_path,
# 'test-ipython-notebook.html'))
@pytest.mark.skipif(IPYTHON_VERSION < 3,
reason="output must be created with ipython version 3")
def test_generate_with_ipython2(self):
'''Test generation of site with the plugin.'''
base_path = os.path.dirname(os.path.abspath(__file__))
base_path = os.path.join(base_path, 'test_data')
content_path = os.path.join(base_path, 'content')
output_path = os.path.join(base_path, 'output')
settings_path = os.path.join(base_path, 'pelicanconf.py')
settings = read_settings(path=settings_path,
override={'PATH': content_path,
'OUTPUT_PATH': self.temp_path,
'CACHE_PATH': self.temp_cache,
}
)
pelican = Pelican(settings)
pelican.run()
# test existence
assert os.path.exists(os.path.join(self.temp_path,
'test-ipython-notebook-nb-format-3.html'))
assert os.path.exists(os.path.join(self.temp_path,
'test-ipython-notebook-nb-format-4.html'))
# test differences
#assert filecmp.cmp(os.path.join(output_path,
# 'test-ipython-notebook-v3.html'),
# os.path.join(self.temp_path,
# 'test-ipython-notebook.html'))
|
sinkuri256/python-for-android | refs/heads/master | python-modules/twisted/twisted/internet/utils.py | 61 | # -*- test-case-name: twisted.test.test_iutils -*-
# Copyright (c) 2001-2008 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Utility methods.
"""
import sys, warnings
from twisted.internet import protocol, defer
from twisted.python import failure, util as tputil
try:
import cStringIO as StringIO
except ImportError:
import StringIO
def _callProtocolWithDeferred(protocol, executable, args, env, path, reactor=None):
if reactor is None:
from twisted.internet import reactor
d = defer.Deferred()
p = protocol(d)
reactor.spawnProcess(p, executable, (executable,)+tuple(args), env, path)
return d
class _UnexpectedErrorOutput(IOError):
"""
Standard error data was received where it was not expected. This is a
subclass of L{IOError} to preserve backward compatibility with the previous
error behavior of L{getProcessOutput}.
@ivar processEnded: A L{Deferred} which will fire when the process which
produced the data on stderr has ended (exited and all file descriptors
closed).
"""
def __init__(self, text, processEnded):
IOError.__init__(self, "got stderr: %r" % (text,))
self.processEnded = processEnded
class _BackRelay(protocol.ProcessProtocol):
"""
Trivial protocol for communicating with a process and turning its output
into the result of a L{Deferred}.
@ivar deferred: A L{Deferred} which will be called back with all of stdout
and, if C{errortoo} is true, all of stderr as well (mixed together in
one string). If C{errortoo} is false and any bytes are received over
stderr, this will fire with an L{_UnexpectedErrorOutput} instance and
the attribute will be set to C{None}.
@ivar onProcessEnded: If C{errortoo} is false and bytes are received over
stderr, this attribute will refer to a L{Deferred} which will be called
back when the process ends. This C{Deferred} is also associated with
the L{_UnexpectedErrorOutput} which C{deferred} fires with earlier in
this case so that users can determine when the process has actually
ended, in addition to knowing when bytes have been received via stderr.
"""
def __init__(self, deferred, errortoo=0):
self.deferred = deferred
self.s = StringIO.StringIO()
if errortoo:
self.errReceived = self.errReceivedIsGood
else:
self.errReceived = self.errReceivedIsBad
def errReceivedIsBad(self, text):
if self.deferred is not None:
self.onProcessEnded = defer.Deferred()
err = _UnexpectedErrorOutput(text, self.onProcessEnded)
self.deferred.errback(failure.Failure(err))
self.deferred = None
self.transport.loseConnection()
def errReceivedIsGood(self, text):
self.s.write(text)
def outReceived(self, text):
self.s.write(text)
def processEnded(self, reason):
if self.deferred is not None:
self.deferred.callback(self.s.getvalue())
elif self.onProcessEnded is not None:
self.onProcessEnded.errback(reason)
def getProcessOutput(executable, args=(), env={}, path=None, reactor=None,
errortoo=0):
"""
Spawn a process and return its output as a deferred returning a string.
@param executable: The file name to run and get the output of - the
full path should be used.
@param args: the command line arguments to pass to the process; a
sequence of strings. The first string should *NOT* be the
executable's name.
@param env: the environment variables to pass to the processs; a
dictionary of strings.
@param path: the path to run the subprocess in - defaults to the
current directory.
@param reactor: the reactor to use - defaults to the default reactor
@param errortoo: If true, include stderr in the result. If false, if
stderr is received the returned L{Deferred} will errback with an
L{IOError} instance with a C{processEnded} attribute. The
C{processEnded} attribute refers to a L{Deferred} which fires when the
executed process ends.
"""
return _callProtocolWithDeferred(lambda d:
_BackRelay(d, errortoo=errortoo),
executable, args, env, path,
reactor)
class _ValueGetter(protocol.ProcessProtocol):
def __init__(self, deferred):
self.deferred = deferred
def processEnded(self, reason):
self.deferred.callback(reason.value.exitCode)
def getProcessValue(executable, args=(), env={}, path=None, reactor=None):
"""Spawn a process and return its exit code as a Deferred."""
return _callProtocolWithDeferred(_ValueGetter, executable, args, env, path,
reactor)
class _EverythingGetter(protocol.ProcessProtocol):
def __init__(self, deferred):
self.deferred = deferred
self.outBuf = StringIO.StringIO()
self.errBuf = StringIO.StringIO()
self.outReceived = self.outBuf.write
self.errReceived = self.errBuf.write
def processEnded(self, reason):
out = self.outBuf.getvalue()
err = self.errBuf.getvalue()
e = reason.value
code = e.exitCode
if e.signal:
self.deferred.errback((out, err, e.signal))
else:
self.deferred.callback((out, err, code))
def getProcessOutputAndValue(executable, args=(), env={}, path=None,
reactor=None):
"""Spawn a process and returns a Deferred that will be called back with
its output (from stdout and stderr) and it's exit code as (out, err, code)
If a signal is raised, the Deferred will errback with the stdout and
stderr up to that point, along with the signal, as (out, err, signalNum)
"""
return _callProtocolWithDeferred(_EverythingGetter, executable, args, env, path,
reactor)
def _resetWarningFilters(passthrough, addedFilters):
for f in addedFilters:
try:
warnings.filters.remove(f)
except ValueError:
pass
return passthrough
def runWithWarningsSuppressed(suppressedWarnings, f, *a, **kw):
"""Run the function C{f}, but with some warnings suppressed.
@param suppressedWarnings: A list of arguments to pass to filterwarnings.
Must be a sequence of 2-tuples (args, kwargs).
@param f: A callable, followed by its arguments and keyword arguments
"""
for args, kwargs in suppressedWarnings:
warnings.filterwarnings(*args, **kwargs)
addedFilters = warnings.filters[:len(suppressedWarnings)]
try:
result = f(*a, **kw)
except:
exc_info = sys.exc_info()
_resetWarningFilters(None, addedFilters)
raise exc_info[0], exc_info[1], exc_info[2]
else:
if isinstance(result, defer.Deferred):
result.addBoth(_resetWarningFilters, addedFilters)
else:
_resetWarningFilters(None, addedFilters)
return result
def suppressWarnings(f, *suppressedWarnings):
"""
Wrap C{f} in a callable which suppresses the indicated warnings before
invoking C{f} and unsuppresses them afterwards. If f returns a Deferred,
warnings will remain suppressed until the Deferred fires.
"""
def warningSuppressingWrapper(*a, **kw):
return runWithWarningsSuppressed(suppressedWarnings, f, *a, **kw)
return tputil.mergeFunctionMetadata(f, warningSuppressingWrapper)
__all__ = [
"runWithWarningsSuppressed", "suppressWarnings",
"getProcessOutput", "getProcessValue", "getProcessOutputAndValue",
]
|
rob356/SickRage | refs/heads/master | lib/send2trash/plat_other.py | 68 | # Copyright 2013 Hardcoded Software (http://www.hardcoded.net)
# This software is licensed under the "BSD" License as described in the "LICENSE" file,
# which should be included with this package. The terms are also available at
# http://www.hardcoded.net/licenses/bsd_license
# This is a reimplementation of plat_other.py with reference to the
# freedesktop.org trash specification:
# [1] http://www.freedesktop.org/wiki/Specifications/trash-spec
# [2] http://www.ramendik.ru/docs/trashspec.html
# See also:
# [3] http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
#
# For external volumes this implementation will raise an exception if it can't
# find or create the user's trash directory.
from __future__ import unicode_literals
import sys
import os
import os.path as op
from datetime import datetime
import stat
try:
from urllib.parse import quote
except ImportError:
# Python 2
from urllib import quote
FILES_DIR = 'files'
INFO_DIR = 'info'
INFO_SUFFIX = '.trashinfo'
# Default of ~/.local/share [3]
XDG_DATA_HOME = op.expanduser(os.environ.get('XDG_DATA_HOME', '~/.local/share'))
HOMETRASH = op.join(XDG_DATA_HOME, 'Trash')
uid = os.getuid()
TOPDIR_TRASH = '.Trash'
TOPDIR_FALLBACK = '.Trash-' + str(uid)
def is_parent(parent, path):
path = op.realpath(path) # In case it's a symlink
parent = op.realpath(parent)
return path.startswith(parent)
def format_date(date):
return date.strftime("%Y-%m-%dT%H:%M:%S")
def info_for(src, topdir):
# ...it MUST not include a ".."" directory, and for files not "under" that
# directory, absolute pathnames must be used. [2]
if topdir is None or not is_parent(topdir, src):
src = op.abspath(src)
else:
src = op.relpath(src, topdir)
info = "[Trash Info]\n"
info += "Path=" + quote(src) + "\n"
info += "DeletionDate=" + format_date(datetime.now()) + "\n"
return info
def check_create(dir):
# use 0700 for paths [3]
if not op.exists(dir):
os.makedirs(dir, 0o700)
def trash_move(src, dst, topdir=None):
filename = op.basename(src)
filespath = op.join(dst, FILES_DIR)
infopath = op.join(dst, INFO_DIR)
base_name, ext = op.splitext(filename)
counter = 0
destname = filename
while op.exists(op.join(filespath, destname)) or op.exists(op.join(infopath, destname + INFO_SUFFIX)):
counter += 1
destname = '%s %s%s' % (base_name, counter, ext)
check_create(filespath)
check_create(infopath)
os.rename(src, op.join(filespath, destname))
f = open(op.join(infopath, destname + INFO_SUFFIX), 'w')
f.write(info_for(src, topdir))
f.close()
def find_mount_point(path):
# Even if something's wrong, "/" is a mount point, so the loop will exit.
# Use realpath in case it's a symlink
path = op.realpath(path) # Required to avoid infinite loop
while not op.ismount(path):
path = op.split(path)[0]
return path
def find_ext_volume_global_trash(volume_root):
# from [2] Trash directories (1) check for a .Trash dir with the right
# permissions set.
trash_dir = op.join(volume_root, TOPDIR_TRASH)
if not op.exists(trash_dir):
return None
mode = os.lstat(trash_dir).st_mode
# vol/.Trash must be a directory, cannot be a symlink, and must have the
# sticky bit set.
if not op.isdir(trash_dir) or op.islink(trash_dir) or not (mode & stat.S_ISVTX):
return None
trash_dir = op.join(trash_dir, str(uid))
try:
check_create(trash_dir)
except OSError:
return None
return trash_dir
def find_ext_volume_fallback_trash(volume_root):
# from [2] Trash directories (1) create a .Trash-$uid dir.
trash_dir = op.join(volume_root, TOPDIR_FALLBACK)
# Try to make the directory, if we can't the OSError exception will escape
# be thrown out of send2trash.
check_create(trash_dir)
return trash_dir
def find_ext_volume_trash(volume_root):
trash_dir = find_ext_volume_global_trash(volume_root)
if trash_dir is None:
trash_dir = find_ext_volume_fallback_trash(volume_root)
return trash_dir
# Pull this out so it's easy to stub (to avoid stubbing lstat itself)
def get_dev(path):
return os.lstat(path).st_dev
def send2trash(path):
if not isinstance(path, str):
# path = str(path, sys.getfilesystemencoding()) # removed invalid arg passed to str function, shouldn't be used anyway
path = str(path)
if not op.exists(path):
raise OSError("File not found: %s" % path)
# ...should check whether the user has the necessary permissions to delete
# it, before starting the trashing operation itself. [2]
if not os.access(path, os.W_OK):
raise OSError("Permission denied: %s" % path)
# if the file to be trashed is on the same device as HOMETRASH we
# want to move it there.
path_dev = get_dev(path)
# If XDG_DATA_HOME or HOMETRASH do not yet exist we need to stat the
# home directory, and these paths will be created further on if needed.
trash_dev = get_dev(op.expanduser('~'))
if path_dev == trash_dev:
topdir = XDG_DATA_HOME
dest_trash = HOMETRASH
else:
topdir = find_mount_point(path)
trash_dev = get_dev(topdir)
if trash_dev != path_dev:
raise OSError("Couldn't find mount point for %s" % path)
dest_trash = find_ext_volume_trash(topdir)
trash_move(path, dest_trash, topdir)
|
oskar456/youtube-dl | refs/heads/master | youtube_dl/extractor/tudou.py | 50 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
class TudouPlaylistIE(InfoExtractor):
IE_NAME = 'tudou:playlist'
_VALID_URL = r'https?://(?:www\.)?tudou\.com/listplay/(?P<id>[\w-]{11})\.html'
_TESTS = [{
'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo.html',
'info_dict': {
'id': 'zzdE77v6Mmo',
},
'playlist_mincount': 209,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
playlist_data = self._download_json(
'http://www.tudou.com/tvp/plist.action?lcode=%s' % playlist_id, playlist_id)
entries = [self.url_result(
'http://www.tudou.com/programs/view/%s' % item['icode'],
'Tudou', item['icode'],
item['kw']) for item in playlist_data['items']]
return self.playlist_result(entries, playlist_id)
class TudouAlbumIE(InfoExtractor):
IE_NAME = 'tudou:album'
_VALID_URL = r'https?://(?:www\.)?tudou\.com/album(?:cover|play)/(?P<id>[\w-]{11})'
_TESTS = [{
'url': 'http://www.tudou.com/albumplay/v5qckFJvNJg.html',
'info_dict': {
'id': 'v5qckFJvNJg',
},
'playlist_mincount': 45,
}]
def _real_extract(self, url):
album_id = self._match_id(url)
album_data = self._download_json(
'http://www.tudou.com/tvp/alist.action?acode=%s' % album_id, album_id)
entries = [self.url_result(
'http://www.tudou.com/programs/view/%s' % item['icode'],
'Tudou', item['icode'],
item['kw']) for item in album_data['items']]
return self.playlist_result(entries, album_id)
|
ryosuzuki/crowdsource-platform | refs/heads/develop2 | crowdsourcing/migrations/oauth2_provider/0001_initial.py | 23 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oauth2_provider.validators
import oauth2_provider.generators
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AccessToken',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(max_length=255, db_index=True)),
('expires', models.DateTimeField()),
('scope', models.TextField(blank=True)),
],
),
migrations.CreateModel(
name='Application',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('client_id', models.CharField(default=oauth2_provider.generators.generate_client_id, unique=True, max_length=100, db_index=True)),
('redirect_uris', models.TextField(help_text='Allowed URIs list, space separated', blank=True, validators=[oauth2_provider.validators.validate_uris])),
('client_type', models.CharField(max_length=32, choices=[('confidential', 'Confidential'), ('public', 'Public')])),
('authorization_grant_type', models.CharField(max_length=32, choices=[('authorization-code', 'Authorization code'), ('implicit', 'Implicit'), ('password', 'Resource owner password-based'), ('client-credentials', 'Client credentials')])),
('client_secret', models.CharField(default=oauth2_provider.generators.generate_client_secret, max_length=255, db_index=True, blank=True)),
('name', models.CharField(max_length=255, blank=True)),
('skip_authorization', models.BooleanField(default=False)),
('user', models.ForeignKey(related_name='oauth2_provider_application', to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Grant',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('code', models.CharField(max_length=255, db_index=True)),
('expires', models.DateTimeField()),
('redirect_uri', models.CharField(max_length=255)),
('scope', models.TextField(blank=True)),
('application', models.ForeignKey(to=settings.OAUTH2_PROVIDER_APPLICATION_MODEL)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='RefreshToken',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(max_length=255, db_index=True)),
('access_token', models.OneToOneField(related_name='refresh_token', to='oauth2_provider.AccessToken')),
('application', models.ForeignKey(to=settings.OAUTH2_PROVIDER_APPLICATION_MODEL)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='accesstoken',
name='application',
field=models.ForeignKey(to=settings.OAUTH2_PROVIDER_APPLICATION_MODEL),
),
migrations.AddField(
model_name='accesstoken',
name='user',
field=models.ForeignKey(blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
]
|
chiviak/CouchPotatoServer | refs/heads/master | couchpotato/core/media/movie/providers/automation/moviemeter.py | 38 | from couchpotato.core.helpers.rss import RSS
from couchpotato.core.logger import CPLog
from couchpotato.core.media.movie.providers.automation.base import Automation
log = CPLog(__name__)
autoload = 'Moviemeter'
class Moviemeter(Automation, RSS):
interval = 1800
rss_url = 'http://www.moviemeter.nl/rss/cinema'
def getIMDBids(self):
movies = []
rss_movies = self.getRSSData(self.rss_url)
for movie in rss_movies:
imdb = self.search(self.getTextElement(movie, 'title'))
if imdb and self.isMinimalMovie(imdb):
movies.append(imdb['imdb'])
return movies
config = [{
'name': 'moviemeter',
'groups': [
{
'tab': 'automation',
'list': 'automation_providers',
'name': 'moviemeter_automation',
'label': 'Moviemeter',
'description': 'Imports movies from the current top 10 of moviemeter.nl.',
'options': [
{
'name': 'automation_enabled',
'default': False,
'type': 'enabler',
},
],
},
],
}]
|
asm-products/movie-database-service | refs/heads/master | ani/lib/python2.7/site-packages/django/db/models/options.py | 104 | from __future__ import unicode_literals
import re
from bisect import bisect
import warnings
from django.conf import settings
from django.db.models.fields.related import ManyToManyRel
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.proxy import OrderWrt
from django.db.models.loading import get_models, app_cache_ready
from django.utils import six
from django.utils.functional import cached_property
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_text, smart_text, python_2_unicode_compatible
from django.utils.translation import activate, deactivate_all, get_language, string_concat
# Calculate the verbose_name by converting from InitialCaps to "lowercase with spaces".
get_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name).lower().strip()
DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together', 'select_on_save')
@python_2_unicode_compatible
class Options(object):
def __init__(self, meta, app_label=None):
self.local_fields, self.local_many_to_many = [], []
self.virtual_fields = []
self.model_name, self.verbose_name = None, None
self.verbose_name_plural = None
self.db_table = ''
self.ordering = []
self.unique_together = []
self.index_together = []
self.select_on_save = False
self.permissions = []
self.object_name, self.app_label = None, app_label
self.get_latest_by = None
self.order_with_respect_to = None
self.db_tablespace = settings.DEFAULT_TABLESPACE
self.meta = meta
self.pk = None
self.has_auto_field, self.auto_field = False, None
self.abstract = False
self.managed = True
self.proxy = False
# For any class that is a proxy (including automatically created
# classes for deferred object loading), proxy_for_model tells us
# which class this model is proxying. Note that proxy_for_model
# can create a chain of proxy models. For non-proxy models, the
# variable is always None.
self.proxy_for_model = None
# For any non-abstract class, the concrete class is the model
# in the end of the proxy_for_model chain. In particular, for
# concrete models, the concrete_model is always the class itself.
self.concrete_model = None
self.swappable = None
self.parents = SortedDict()
self.auto_created = False
# To handle various inheritance situations, we need to track where
# managers came from (concrete or abstract base classes).
self.abstract_managers = []
self.concrete_managers = []
# List of all lookups defined in ForeignKey 'limit_choices_to' options
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.util import truncate_name
cls._meta = self
self.model = cls
self.installed = re.sub('\.models$', '', cls.__module__) in settings.INSTALLED_APPS
# First, construct the default values for these options.
self.object_name = cls.__name__
self.model_name = self.object_name.lower()
self.verbose_name = get_verbose_name(self.object_name)
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
for name in self.meta.__dict__:
# Ignore any private attributes that Django doesn't care about.
# NOTE: We can't modify a dictionary's contents while looping
# over it, so we loop over the *original* dictionary instead.
if name.startswith('_'):
del meta_attrs[name]
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
# unique_together can be either a tuple of tuples, or a single
# tuple of two strings. Normalize it to a tuple of tuples, so that
# calling code can uniformly expect that.
ut = meta_attrs.pop('unique_together', self.unique_together)
if ut and not isinstance(ut[0], (tuple, list)):
ut = (ut,)
self.unique_together = ut
# verbose_name_plural is a special case because it uses a 's'
# by default.
if self.verbose_name_plural is None:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
# Any leftover attributes must be invalid.
if meta_attrs != {}:
raise TypeError("'class Meta' got invalid attribute(s): %s" % ','.join(meta_attrs.keys()))
else:
self.verbose_name_plural = string_concat(self.verbose_name, 's')
del self.meta
# If the db_table wasn't provided, use the app_label + model_name.
if not self.db_table:
self.db_table = "%s_%s" % (self.app_label, self.model_name)
self.db_table = truncate_name(self.db_table, connection.ops.max_name_length())
@property
def module_name(self):
"""
This property has been deprecated in favor of `model_name`. refs #19689
"""
warnings.warn(
"Options.module_name has been deprecated in favor of model_name",
PendingDeprecationWarning, stacklevel=2)
return self.model_name
def _prepare(self, model):
if self.order_with_respect_to:
self.order_with_respect_to = self.get_field(self.order_with_respect_to)
self.ordering = ('_order',)
model.add_to_class('_order', OrderWrt())
else:
self.order_with_respect_to = None
if self.pk is None:
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = next(six.itervalues(self.parents))
# Look for a local field with the same name as the
# first parent link. If a local field has already been
# created, use it instead of promoting the parent
already_created = [fld for fld in self.local_fields if fld.name == field.name]
if already_created:
field = already_created[0]
field.primary_key = True
self.setup_pk(field)
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
def add_field(self, field):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if field.rel and isinstance(field.rel, ManyToManyRel):
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
if hasattr(self, '_m2m_cache'):
del self._m2m_cache
else:
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
if hasattr(self, '_field_cache'):
del self._field_cache
del self._field_name_cache
# The fields, concrete_fields and local_concrete_fields are
# implemented as cached properties for performance reasons.
# The attrs will not exists if the cached property isn't
# accessed yet, hence the try-excepts.
try:
del self.fields
except AttributeError:
pass
try:
del self.concrete_fields
except AttributeError:
pass
try:
del self.local_concrete_fields
except AttributeError:
pass
if hasattr(self, '_name_map'):
del self._name_map
def add_virtual_field(self, field):
self.virtual_fields.append(field)
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def pk_index(self):
"""
Returns the index of the primary key field in the self.concrete_fields
list.
"""
return self.concrete_fields.index(self.pk)
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
self.proxy_for_model = target
self.db_table = target._meta.db_table
def __repr__(self):
return '<Options for %s>' % self.object_name
def __str__(self):
return "%s.%s" % (smart_text(self.app_label), smart_text(self.model_name))
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
lang = get_language()
deactivate_all()
raw = force_text(self.verbose_name)
activate(lang)
return raw
verbose_name_raw = property(verbose_name_raw)
def _swapped(self):
"""
Has this model been swapped out for another? If so, return the model
name of the replacement; otherwise, return None.
For historical reasons, model name lookups using get_model() are
case insensitive, so we make sure we are case insensitive here.
"""
if self.swappable:
model_label = '%s.%s' % (self.app_label, self.model_name)
swapped_for = getattr(settings, self.swappable, None)
if swapped_for:
try:
swapped_label, swapped_object = swapped_for.split('.')
except ValueError:
# setting not in the format app_label.model_name
# raising ImproperlyConfigured here causes problems with
# test cleanup code - instead it is raised in get_user_model
# or as part of validation.
return swapped_for
if '%s.%s' % (swapped_label, swapped_object.lower()) not in (None, model_label):
return swapped_for
return None
swapped = property(_swapped)
@cached_property
def fields(self):
"""
The getter for self.fields. This returns the list of field objects
available to this model (including through parent models).
Callers are not permitted to modify this list, since it's a reference
to this instance (not a copy).
"""
try:
self._field_name_cache
except AttributeError:
self._fill_fields_cache()
return self._field_name_cache
@cached_property
def concrete_fields(self):
return [f for f in self.fields if f.column is not None]
@cached_property
def local_concrete_fields(self):
return [f for f in self.local_fields if f.column is not None]
def get_fields_with_model(self):
"""
Returns a sequence of (field, model) pairs for all fields. The "model"
element is None for fields on the current model. Mostly of use when
constructing queries so that we know which model a field belongs to.
"""
try:
self._field_cache
except AttributeError:
self._fill_fields_cache()
return self._field_cache
def get_concrete_fields_with_model(self):
return [(field, model) for field, model in self.get_fields_with_model() if
field.column is not None]
def _fill_fields_cache(self):
cache = []
for parent in self.parents:
for field, model in parent._meta.get_fields_with_model():
if model:
cache.append((field, model))
else:
cache.append((field, parent))
cache.extend([(f, None) for f in self.local_fields])
self._field_cache = tuple(cache)
self._field_name_cache = [x for x, _ in cache]
def _many_to_many(self):
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return list(self._m2m_cache)
many_to_many = property(_many_to_many)
def get_m2m_with_model(self):
"""
The many-to-many version of get_fields_with_model().
"""
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return list(six.iteritems(self._m2m_cache))
def _fill_m2m_cache(self):
cache = SortedDict()
for parent in self.parents:
for field, model in parent._meta.get_m2m_with_model():
if model:
cache[field] = model
else:
cache[field] = parent
for field in self.local_many_to_many:
cache[field] = None
self._m2m_cache = cache
def get_field(self, name, many_to_many=True):
"""
Returns the requested field by name. Raises FieldDoesNotExist on error.
"""
to_search = (self.fields + self.many_to_many) if many_to_many else self.fields
for f in to_search:
if f.name == name:
return f
raise FieldDoesNotExist('%s has no field named %r' % (self.object_name, name))
def get_field_by_name(self, name):
"""
Returns the (field_object, model, direct, m2m), where field_object is
the Field instance for the given name, model is the model containing
this field (None for local fields), direct is True if the field exists
on this model, and m2m is True for many-to-many relations. When
'direct' is False, 'field_object' is the corresponding RelatedObject
for this field (since the field doesn't have an instance associated
with it).
Uses a cache internally, so after the first access, this is very fast.
"""
try:
try:
return self._name_map[name]
except AttributeError:
cache = self.init_name_map()
return cache[name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r'
% (self.object_name, name))
def get_all_field_names(self):
"""
Returns a list of all field names that are possible for this model
(including reverse relation names). This is used for pretty printing
debugging output (a list of choices), so any internal-only field names
are not included.
"""
try:
cache = self._name_map
except AttributeError:
cache = self.init_name_map()
names = sorted(cache.keys())
# Internal-only names end with "+" (symmetrical m2m related names being
# the main example). Trim them.
return [val for val in names if not val.endswith('+')]
def init_name_map(self):
"""
Initialises the field name -> field object mapping.
"""
cache = {}
# We intentionally handle related m2m objects first so that symmetrical
# m2m accessor names can be overridden, if necessary.
for f, model in self.get_all_related_m2m_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, True)
for f, model in self.get_all_related_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, False)
for f, model in self.get_m2m_with_model():
cache[f.name] = (f, model, True, True)
for f, model in self.get_fields_with_model():
cache[f.name] = (f, model, True, False)
for f in self.virtual_fields:
if hasattr(f, 'related'):
cache[f.name] = (f.related, None if f.model == self.model else f.model, True, False)
if app_cache_ready():
self._name_map = cache
return cache
def get_add_permission(self):
"""
This method has been deprecated in favor of
`django.contrib.auth.get_permission_codename`. refs #20642
"""
warnings.warn(
"`Options.get_add_permission` has been deprecated in favor "
"of `django.contrib.auth.get_permission_codename`.",
PendingDeprecationWarning, stacklevel=2)
return 'add_%s' % self.model_name
def get_change_permission(self):
"""
This method has been deprecated in favor of
`django.contrib.auth.get_permission_codename`. refs #20642
"""
warnings.warn(
"`Options.get_change_permission` has been deprecated in favor "
"of `django.contrib.auth.get_permission_codename`.",
PendingDeprecationWarning, stacklevel=2)
return 'change_%s' % self.model_name
def get_delete_permission(self):
"""
This method has been deprecated in favor of
`django.contrib.auth.get_permission_codename`. refs #20642
"""
warnings.warn(
"`Options.get_delete_permission` has been deprecated in favor "
"of `django.contrib.auth.get_permission_codename`.",
PendingDeprecationWarning, stacklevel=2)
return 'delete_%s' % self.model_name
def get_all_related_objects(self, local_only=False, include_hidden=False,
include_proxy_eq=False):
return [k for k, v in self.get_all_related_objects_with_model(
local_only=local_only, include_hidden=include_hidden,
include_proxy_eq=include_proxy_eq)]
def get_all_related_objects_with_model(self, local_only=False,
include_hidden=False,
include_proxy_eq=False):
"""
Returns a list of (related-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
self._related_objects_cache
except AttributeError:
self._fill_related_objects_cache()
predicates = []
if local_only:
predicates.append(lambda k, v: not v)
if not include_hidden:
predicates.append(lambda k, v: not k.field.rel.is_hidden())
cache = (self._related_objects_proxy_cache if include_proxy_eq
else self._related_objects_cache)
return [t for t in cache.items() if all(p(*t) for p in predicates)]
def _fill_related_objects_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_objects_with_model(include_hidden=True):
if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
# Collect also objects which are in relation to some proxy child/parent of self.
proxy_cache = cache.copy()
for klass in get_models(include_auto_created=True, only_installed=False):
if not klass._meta.swapped:
for f in klass._meta.local_fields:
if f.rel and not isinstance(f.rel.to, six.string_types) and f.generate_reverse_relation:
if self == f.rel.to._meta:
cache[f.related] = None
proxy_cache[f.related] = None
elif self.concrete_model == f.rel.to._meta.concrete_model:
proxy_cache[f.related] = None
self._related_objects_cache = cache
self._related_objects_proxy_cache = proxy_cache
def get_all_related_many_to_many_objects(self, local_only=False):
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
if local_only:
return [k for k, v in cache.items() if not v]
return list(cache)
def get_all_related_m2m_objects_with_model(self):
"""
Returns a list of (related-m2m-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
return list(six.iteritems(cache))
def _fill_related_many_to_many_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_m2m_objects_with_model():
if obj.field.creation_counter < 0 and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models(only_installed=False):
if not klass._meta.swapped:
for f in klass._meta.local_many_to_many:
if (f.rel
and not isinstance(f.rel.to, six.string_types)
and self == f.rel.to._meta):
cache[f.related] = None
if app_cache_ready():
self._related_many_to_many_cache = cache
return cache
def get_base_chain(self, model):
"""
Returns a list of parent classes leading to 'model' (order from closet
to most distant ancestor). This has to handle the case were 'model' is
a granparent or even more distant relation.
"""
if not self.parents:
return None
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
return None
def get_parent_list(self):
"""
Returns a list of all the ancestor of this model as a list. Useful for
determining if something is an ancestor, regardless of lineage.
"""
result = set()
for parent in self.parents:
result.add(parent)
result.update(parent._meta.get_parent_list())
return result
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
for parent in self.parents:
# Tries to get a link field from the immediate parent
parent_link = parent._meta.get_ancestor_link(ancestor)
if parent_link:
# In case of a proxied model, the first link
# of the chain to the ancestor is that parent
# links
return self.parents[parent] or parent_link
|
kaixinjxq/crosswalk-test-suite | refs/heads/master | apptools/apptools-android-tests/apptools/CI/manifest_basic.py | 12 | #!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Yun, Liu<yunx.liu@intel.com>
import unittest
import os
from xml.etree import ElementTree
import json
import sys
sys.path.append("../")
import comm
class TestCrosswalkApptoolsFunctions(unittest.TestCase):
def test_apkName_contains_appVersion(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
with open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json") as json_file:
data = json.load(json_file)
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
appVersion = comm.build(self, buildcmd)
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(data['xwalk_app_version'].strip(os.linesep), "0.1")
self.assertEquals(data['xwalk_app_version'].strip(os.linesep), appVersion)
def test_name_normal(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
comm.build(self, buildcmd)
root = ElementTree.parse(comm.ConstPath + "/../tools/org.xwalk.test/prj/android/AndroidManifest.xml").getroot()
application_attributes = root.find('application').attrib
for x in application_attributes.keys():
if x.find("label") != -1:
application_xml = application_attributes[x]
break
activity_attributes = root.find('application').find('activity').attrib
for y in activity_attributes.keys():
if y.find("label") != -1:
activity_xml = activity_attributes[y]
break
comm.clear("org.xwalk.test")
self.assertEquals(application_xml, "org.xwalk.test")
self.assertEquals(activity_xml, "org.xwalk.test")
def test_packageID_normal(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
with open(comm.ConstPath + "/../tools/org.xwalk.test/app/manifest.json") as json_file:
data = json.load(json_file)
comm.clear("org.xwalk.test")
self.assertEquals(data['xwalk_package_id'].strip(os.linesep), "org.xwalk.test")
def test_versionCode_normal(self):
comm.setUp()
comm.create(self)
os.chdir('org.xwalk.test')
buildcmd = comm.HOST_PREFIX + comm.PackTools + "crosswalk-app build"
buildstatus = os.popen(buildcmd).readlines()
index = 0
for x in range(len(buildstatus),0,-1):
index = x -1
if buildstatus[index].find("Using android:versionCode") != -1:
break
versionCode = buildstatus[index].strip(" *\nUsing android:versionCode").split(' ')[-1][1:-1]
root = ElementTree.parse(comm.ConstPath + "/../tools/org.xwalk.test/prj/android/AndroidManifest.xml").getroot()
attributes = root.attrib
for x in attributes.keys():
if x.find("versionCode") != -1:
versionCode_xml = attributes[x]
break
comm.run(self)
comm.clear("org.xwalk.test")
self.assertEquals(versionCode, versionCode_xml)
if __name__ == '__main__':
unittest.main()
|
doismellburning/edx-platform | refs/heads/master | lms/djangoapps/certificates/urls.py | 56 | """
URLs for the certificates app.
"""
from django.conf.urls import patterns, url
from django.conf import settings
from certificates import views
urlpatterns = patterns(
'',
# Certificates HTML view
url(
r'^user/(?P<user_id>[^/]*)/course/{course_id}'.format(course_id=settings.COURSE_ID_PATTERN),
views.render_html_view,
name='html_view'
),
# End-points used by student support
# The views in the lms/djangoapps/support use these end-points
# to retrieve certificate information and regenerate certificates.
url(r'search', views.search_by_user, name="search"),
url(r'regenerate', views.regenerate_certificate_for_user, name="regenerate_certificate_for_user"),
)
if settings.FEATURES.get("ENABLE_OPENBADGES", False):
urlpatterns += (
url(
r'^badge_share_tracker/{}/(?P<network>[^/]+)/(?P<student_username>[^/]+)/$'.format(
settings.COURSE_ID_PATTERN
),
views.track_share_redirect,
name='badge_share_tracker'
),
)
|
NicolasHerin/game_book_share | refs/heads/master | node_modules/node-gyp/gyp/pylib/gyp/common_test.py | 2542 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the common.py file."""
import gyp.common
import unittest
import sys
class TestTopologicallySorted(unittest.TestCase):
def test_Valid(self):
"""Test that sorting works on a valid graph with one possible order."""
graph = {
'a': ['b', 'c'],
'b': [],
'c': ['d'],
'd': ['b'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertEqual(
gyp.common.TopologicallySorted(graph.keys(), GetEdge),
['a', 'c', 'd', 'b'])
def test_Cycle(self):
"""Test that an exception is thrown on a cyclic graph."""
graph = {
'a': ['b'],
'b': ['c'],
'c': ['d'],
'd': ['a'],
}
def GetEdge(node):
return tuple(graph[node])
self.assertRaises(
gyp.common.CycleError, gyp.common.TopologicallySorted,
graph.keys(), GetEdge)
class TestGetFlavor(unittest.TestCase):
"""Test that gyp.common.GetFlavor works as intended"""
original_platform = ''
def setUp(self):
self.original_platform = sys.platform
def tearDown(self):
sys.platform = self.original_platform
def assertFlavor(self, expected, argument, param):
sys.platform = argument
self.assertEqual(expected, gyp.common.GetFlavor(param))
def test_platform_default(self):
self.assertFlavor('freebsd', 'freebsd9' , {})
self.assertFlavor('freebsd', 'freebsd10', {})
self.assertFlavor('openbsd', 'openbsd5' , {})
self.assertFlavor('solaris', 'sunos5' , {});
self.assertFlavor('solaris', 'sunos' , {});
self.assertFlavor('linux' , 'linux2' , {});
self.assertFlavor('linux' , 'linux3' , {});
def test_param(self):
self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
if __name__ == '__main__':
unittest.main()
|
GLolol/variety | refs/heads/master | tests/TestImageFetcher.py | 1 | #!/usr/bin/python2
# -*- Mode: Python; coding: utf-8; indent-tabs-mode: nil; tab-width: 4 -*-
### BEGIN LICENSE
# Copyright (c) 2012, Peter Levi <peterlevi@peterlevi.com>
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3, as published
# by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
### END LICENSE
import shutil
import sys
import os.path
import unittest
from variety import Util
sys.path.insert(0, os.path.realpath(os.path.join(os.path.dirname(__file__), "..")))
from variety.ImageFetcher import ImageFetcher
class TestImageFetcher(unittest.TestCase):
def test_fetch(self):
target_folder = '/tmp/variety/ImageFetcher'
shutil.rmtree(target_folder, ignore_errors=True)
os.makedirs(target_folder)
for url in ["http://wallpapers.wallhaven.cc/wallpapers/full/wallhaven-207261.jpg",
"http://unsplash.com/photos/7EqQ1s3wIAI/download",
"http://az608707.vo.msecnd.net/files/GreaterFlamingos_EN-US13682107304_1366x768.jpg",
"http://a.desktopprassets.com/wallpapers/07865fb0cb575e82fe43d3e1b634f6e2309e2114/foto_alese45.jpg",
]:
f = ImageFetcher.fetch(url, target_folder, verbose=False)
self.assertIsNotNone(f)
self.assertTrue(os.path.isfile(f))
self.assertTrue(Util.is_image(f, check_contents=False))
self.assertTrue(Util.is_image(f, check_contents=True))
self.assertNotEqual('download', f)
def test_extract_from_cd(self):
self.assertEqual("img.jpg", ImageFetcher.extract_filename_from_content_disposition("attachment; filename=img.jpg"))
self.assertEqual("img.jpg", ImageFetcher.extract_filename_from_content_disposition("attachment; filename='img.jpg'"))
self.assertEqual("img.jpg", ImageFetcher.extract_filename_from_content_disposition('attachment; filename="img.jpg"'))
self.assertEqual(None, ImageFetcher.extract_filename_from_content_disposition('attachment; a=b'))
def test_url_ok(self):
self.assertFalse(ImageFetcher.url_ok("some garbage", False, ["flickr.com", "wallbase.cc"]))
self.assertFalse(ImageFetcher.url_ok("some garbage", True, ["flickr.com", "wallbase.cc"]))
self.assertFalse(ImageFetcher.url_ok("http://www.host.com/x/y/z", False, ["flickr.com"]))
self.assertFalse(ImageFetcher.url_ok("http://cnn.com/x/y", True, ["flickr.com", "wallbase.cc"]))
self.assertFalse(ImageFetcher.url_ok("http://somehost.com/x/y", True, ["","flickr.com", "wallbase.cc"]))
self.assertTrue(ImageFetcher.url_ok("http://www.host.com/x/y/z.jpg", False, ["flickr.com"]))
self.assertTrue(ImageFetcher.url_ok("http://www.wallbase.cc/x/y/z", True, ["flickr.com", "wallbase.cc"]))
self.assertTrue(ImageFetcher.url_ok("https://www.flickr.com/a", True, ["flickr.com", "wallbase.cc"]))
if __name__ == '__main__':
unittest.main()
|
alxgu/ansible | refs/heads/devel | lib/ansible/modules/system/aix_lvg.py | 47 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
author:
- Kairo Araujo (@kairoaraujo)
module: aix_lvg
short_description: Manage LVM volume groups on AIX
description:
- This module creates, removes or resize volume groups on AIX LVM.
version_added: '2.8'
options:
force:
description:
- Force volume group creation.
type: bool
default: no
pp_size:
description:
- The size of the physical partition in megabytes.
type: int
pvs:
description:
- List of comma-separated devices to use as physical devices in this volume group.
- Required when creating or extending (C(present) state) the volume group.
- If not informed reducing (C(absent) state) the volume group will be removed.
type: list
state:
description:
- Control if the volume group exists and volume group AIX state varyonvg C(varyon) or varyoffvg C(varyoff).
type: str
choices: [ absent, present, varyoff, varyon ]
default: present
vg:
description:
- The name of the volume group.
type: str
required: true
vg_type:
description:
- The type of the volume group.
type: str
choices: [ big, normal, scalable ]
default: normal
notes:
- AIX will permit remove VG only if all LV/Filesystems are not busy.
- Module does not modify PP size for already present volume group.
'''
EXAMPLES = r'''
- name: Create a volume group datavg
aix_lvg:
vg: datavg
pp_size: 128
vg_type: scalable
state: present
- name: Removing a volume group datavg
aix_lvg:
vg: datavg
state: absent
- name: Extending rootvg
aix_lvg:
vg: rootvg
pvs: hdisk1
state: present
- name: Reducing rootvg
aix_lvg:
vg: rootvg
pvs: hdisk1
state: absent
'''
RETURN = r''' # '''
from ansible.module_utils.basic import AnsibleModule
def _validate_pv(module, vg, pvs):
"""
Function to validate if the physical volume (PV) is not already in use by
another volume group or Oracle ASM.
:param module: Ansible module argument spec.
:param vg: Volume group name.
:param pvs: Physical volume list.
:return: [bool, message] or module.fail_json for errors.
"""
lspv_cmd = module.get_bin_path('lspv', True)
rc, current_lspv, stderr = module.run_command("%s" % lspv_cmd)
if rc != 0:
module.fail_json(msg="Failed executing 'lspv' command.", rc=rc, stdout=current_lspv, stderr=stderr)
for pv in pvs:
# Get pv list.
lspv_list = {}
for line in current_lspv.splitlines():
pv_data = line.split()
lspv_list[pv_data[0]] = pv_data[2]
# Check if pv exists and is free.
if pv not in lspv_list.keys():
module.fail_json(msg="Physical volume '%s' doesn't exist." % pv)
if lspv_list[pv] == 'None':
# Disk None, looks free.
# Check if PV is not already in use by Oracle ASM.
lquerypv_cmd = module.get_bin_path('lquerypv', True)
rc, current_lquerypv, stderr = module.run_command("%s -h /dev/%s 20 10" % (lquerypv_cmd, pv))
if rc != 0:
module.fail_json(msg="Failed executing lquerypv command.", rc=rc, stdout=current_lquerypv, stderr=stderr)
if 'ORCLDISK' in current_lquerypv:
module.fail_json("Physical volume '%s' is already used by Oracle ASM." % pv)
msg = "Physical volume '%s' is ok to be used." % pv
return True, msg
# Check if PV is already in use for the same vg.
elif vg != lspv_list[pv]:
module.fail_json(msg="Physical volume '%s' is in use by another volume group '%s'." % (pv, lspv_list[pv]))
msg = "Physical volume '%s' is already used by volume group '%s'." % (pv, lspv_list[pv])
return False, msg
def _validate_vg(module, vg):
"""
Check the current state of volume group.
:param module: Ansible module argument spec.
:param vg: Volume Group name.
:return: True (VG in varyon state) or False (VG in varyoff state) or
None (VG does not exist), message.
"""
lsvg_cmd = module.get_bin_path('lsvg', True)
rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd)
if rc != 0:
module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd)
if rc != 0:
module.fail_json(msg="Failed executing '%s' command." % lsvg_cmd)
if vg in current_all_vgs and vg not in current_active_vgs:
msg = "Volume group '%s' is in varyoff state." % vg
return False, msg
if vg in current_active_vgs:
msg = "Volume group '%s' is in varyon state." % vg
return True, msg
msg = "Volume group '%s' does not exist." % vg
return None, msg
def create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation):
""" Creates or extend a volume group. """
# Command option parameters.
force_opt = {
True: '-f',
False: ''
}
vg_opt = {
'normal': '',
'big': '-B',
'scalable': '-S',
}
# Validate if PV are not already in use.
pv_state, msg = _validate_pv(module, vg, pvs)
if not pv_state:
changed = False
return changed, msg
vg_state, msg = vg_validation
if vg_state is False:
changed = False
return changed, msg
elif vg_state is True:
# Volume group extension.
changed = True
msg = ""
if not module.check_mode:
extendvg_cmd = module.get_bin_path('extendvg', True)
rc, output, err = module.run_command("%s %s %s" % (extendvg_cmd, vg, ' '.join(pvs)))
if rc != 0:
changed = False
msg = "Extending volume group '%s' has failed." % vg
return changed, msg
msg = "Volume group '%s' extended." % vg
return changed, msg
elif vg_state is None:
# Volume group creation.
changed = True
msg = ''
if not module.check_mode:
mkvg_cmd = module.get_bin_path('mkvg', True)
rc, output, err = module.run_command("%s %s %s %s -y %s %s" % (mkvg_cmd, vg_opt[vg_type], pp_size, force_opt[force], vg, ' '.join(pvs)))
if rc != 0:
changed = False
msg = "Creating volume group '%s' failed." % vg
return changed, msg
msg = "Volume group '%s' created." % vg
return changed, msg
def reduce_vg(module, vg, pvs, vg_validation):
vg_state, msg = vg_validation
if vg_state is False:
changed = False
return changed, msg
elif vg_state is None:
changed = False
return changed, msg
# Define pvs_to_remove (list of physical volumes to be removed).
if pvs is None:
# Remove VG if pvs are note informed.
# Remark: AIX will permit remove only if the VG has not LVs.
lsvg_cmd = module.get_bin_path('lsvg', True)
rc, current_pvs, err = module.run_command("%s -p %s" % (lsvg_cmd, vg))
if rc != 0:
module.fail_json(msg="Failing to execute '%s' command." % lsvg_cmd)
pvs_to_remove = []
for line in current_pvs.splitlines()[2:]:
pvs_to_remove.append(line.split()[0])
reduce_msg = "Volume group '%s' removed." % vg
else:
pvs_to_remove = pvs
reduce_msg = ("Physical volume(s) '%s' removed from Volume group '%s'." % (' '.join(pvs_to_remove), vg))
# Reduce volume group.
if len(pvs_to_remove) <= 0:
changed = False
msg = "No physical volumes to remove."
return changed, msg
changed = True
msg = ''
if not module.check_mode:
reducevg_cmd = module.get_bin_path('reducevg', True)
rc, stdout, stderr = module.run_command("%s -df %s %s" % (reducevg_cmd, vg, ' '.join(pvs_to_remove)))
if rc != 0:
module.fail_json(msg="Unable to remove '%s'." % vg, rc=rc, stdout=stdout, stderr=stderr)
msg = reduce_msg
return changed, msg
def state_vg(module, vg, state, vg_validation):
vg_state, msg = vg_validation
if vg_state is None:
module.fail_json(msg=msg)
if state == 'varyon':
if vg_state is True:
changed = False
return changed, msg
changed = True
msg = ''
if not module.check_mode:
varyonvg_cmd = module.get_bin_path('varyonvg', True)
rc, varyonvg_out, err = module.run_command("%s %s" % (varyonvg_cmd, vg))
if rc != 0:
module.fail_json(msg="Command 'varyonvg' failed.", rc=rc, err=err)
msg = "Varyon volume group %s completed." % vg
return changed, msg
elif state == 'varyoff':
if vg_state is False:
changed = False
return changed, msg
changed = True
msg = ''
if not module.check_mode:
varyonvg_cmd = module.get_bin_path('varyoffvg', True)
rc, varyonvg_out, stderr = module.run_command("%s %s" % (varyonvg_cmd, vg))
if rc != 0:
module.fail_json(msg="Command 'varyoffvg' failed.", rc=rc, stdout=varyonvg_out, stderr=stderr)
msg = "Varyoff volume group %s completed." % vg
return changed, msg
def main():
module = AnsibleModule(
argument_spec=dict(
force=dict(type='bool', default=False),
pp_size=dict(type='int'),
pvs=dict(type='list'),
state=dict(type='str', default='present', choices=['absent', 'present', 'varyoff', 'varyon']),
vg=dict(type='str', required=True),
vg_type=dict(type='str', default='normal', choices=['big', 'normal', 'scalable'])
),
supports_check_mode=True,
)
force = module.params['force']
pp_size = module.params['pp_size']
pvs = module.params['pvs']
state = module.params['state']
vg = module.params['vg']
vg_type = module.params['vg_type']
if pp_size is None:
pp_size = ''
else:
pp_size = "-s %s" % pp_size
vg_validation = _validate_vg(module, vg)
if state == 'present':
if not pvs:
changed = False
msg = "pvs is required to state 'present'."
module.fail_json(msg=msg)
else:
changed, msg = create_extend_vg(module, vg, pvs, pp_size, vg_type, force, vg_validation)
elif state == 'absent':
changed, msg = reduce_vg(module, vg, pvs, vg_validation)
elif state == 'varyon' or state == 'varyoff':
changed, msg = state_vg(module, vg, state, vg_validation)
else:
changed = False
msg = "Unexpected state"
module.exit_json(changed=changed, msg=msg, state=state)
if __name__ == '__main__':
main()
|
ammaradil/fibonacci | refs/heads/master | Lib/site-packages/psycopg2/tests/test_types_basic.py | 7 | #!/usr/bin/env python
#
# types_basic.py - tests for basic types conversions
#
# Copyright (C) 2004-2010 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import decimal
import sys
from functools import wraps
import testutils
from testutils import unittest, ConnectingTestCase, decorate_all_tests
import psycopg2
from psycopg2.extensions import b
class TypesBasicTests(ConnectingTestCase):
"""Test that all type conversions are working."""
def execute(self, *args):
curs = self.conn.cursor()
curs.execute(*args)
return curs.fetchone()[0]
def testQuoting(self):
s = "Quote'this\\! ''ok?''"
self.failUnless(self.execute("SELECT %s AS foo", (s,)) == s,
"wrong quoting: " + s)
def testUnicode(self):
s = u"Quote'this\\! ''ok?''"
self.failUnless(self.execute("SELECT %s AS foo", (s,)) == s,
"wrong unicode quoting: " + s)
def testNumber(self):
s = self.execute("SELECT %s AS foo", (1971,))
self.failUnless(s == 1971, "wrong integer quoting: " + str(s))
s = self.execute("SELECT %s AS foo", (1971L,))
self.failUnless(s == 1971L, "wrong integer quoting: " + str(s))
def testBoolean(self):
x = self.execute("SELECT %s as foo", (False,))
self.assert_(x is False)
x = self.execute("SELECT %s as foo", (True,))
self.assert_(x is True)
def testDecimal(self):
s = self.execute("SELECT %s AS foo", (decimal.Decimal("19.10"),))
self.failUnless(s - decimal.Decimal("19.10") == 0,
"wrong decimal quoting: " + str(s))
s = self.execute("SELECT %s AS foo", (decimal.Decimal("NaN"),))
self.failUnless(str(s) == "NaN", "wrong decimal quoting: " + str(s))
self.failUnless(type(s) == decimal.Decimal, "wrong decimal conversion: " + repr(s))
s = self.execute("SELECT %s AS foo", (decimal.Decimal("infinity"),))
self.failUnless(str(s) == "NaN", "wrong decimal quoting: " + str(s))
self.failUnless(type(s) == decimal.Decimal, "wrong decimal conversion: " + repr(s))
s = self.execute("SELECT %s AS foo", (decimal.Decimal("-infinity"),))
self.failUnless(str(s) == "NaN", "wrong decimal quoting: " + str(s))
self.failUnless(type(s) == decimal.Decimal, "wrong decimal conversion: " + repr(s))
def testFloatNan(self):
try:
float("nan")
except ValueError:
return self.skipTest("nan not available on this platform")
s = self.execute("SELECT %s AS foo", (float("nan"),))
self.failUnless(str(s) == "nan", "wrong float quoting: " + str(s))
self.failUnless(type(s) == float, "wrong float conversion: " + repr(s))
def testFloatInf(self):
try:
self.execute("select 'inf'::float")
except psycopg2.DataError:
return self.skipTest("inf::float not available on the server")
except ValueError:
return self.skipTest("inf not available on this platform")
s = self.execute("SELECT %s AS foo", (float("inf"),))
self.failUnless(str(s) == "inf", "wrong float quoting: " + str(s))
self.failUnless(type(s) == float, "wrong float conversion: " + repr(s))
s = self.execute("SELECT %s AS foo", (float("-inf"),))
self.failUnless(str(s) == "-inf", "wrong float quoting: " + str(s))
def testBinary(self):
if sys.version_info[0] < 3:
s = ''.join([chr(x) for x in range(256)])
b = psycopg2.Binary(s)
buf = self.execute("SELECT %s::bytea AS foo", (b,))
self.assertEqual(s, str(buf))
else:
s = bytes(range(256))
b = psycopg2.Binary(s)
buf = self.execute("SELECT %s::bytea AS foo", (b,))
self.assertEqual(s, buf.tobytes())
def testBinaryNone(self):
b = psycopg2.Binary(None)
buf = self.execute("SELECT %s::bytea AS foo", (b,))
self.assertEqual(buf, None)
def testBinaryEmptyString(self):
# test to make sure an empty Binary is converted to an empty string
if sys.version_info[0] < 3:
b = psycopg2.Binary('')
self.assertEqual(str(b), "''::bytea")
else:
b = psycopg2.Binary(bytes([]))
self.assertEqual(str(b), "''::bytea")
def testBinaryRoundTrip(self):
# test to make sure buffers returned by psycopg2 are
# understood by execute:
if sys.version_info[0] < 3:
s = ''.join([chr(x) for x in range(256)])
buf = self.execute("SELECT %s::bytea AS foo", (psycopg2.Binary(s),))
buf2 = self.execute("SELECT %s::bytea AS foo", (buf,))
self.assertEqual(s, str(buf2))
else:
s = bytes(range(256))
buf = self.execute("SELECT %s::bytea AS foo", (psycopg2.Binary(s),))
buf2 = self.execute("SELECT %s::bytea AS foo", (buf,))
self.assertEqual(s, buf2.tobytes())
def testArray(self):
s = self.execute("SELECT %s AS foo", ([[1,2],[3,4]],))
self.failUnlessEqual(s, [[1,2],[3,4]])
s = self.execute("SELECT %s AS foo", (['one', 'two', 'three'],))
self.failUnlessEqual(s, ['one', 'two', 'three'])
def testEmptyArrayRegression(self):
# ticket #42
import datetime
curs = self.conn.cursor()
curs.execute("create table array_test (id integer, col timestamp without time zone[])")
curs.execute("insert into array_test values (%s, %s)", (1, [datetime.date(2011,2,14)]))
curs.execute("select col from array_test where id = 1")
self.assertEqual(curs.fetchone()[0], [datetime.datetime(2011, 2, 14, 0, 0)])
curs.execute("insert into array_test values (%s, %s)", (2, []))
curs.execute("select col from array_test where id = 2")
self.assertEqual(curs.fetchone()[0], [])
def testEmptyArray(self):
s = self.execute("SELECT '{}' AS foo")
self.failUnlessEqual(s, [])
s = self.execute("SELECT '{}'::text[] AS foo")
self.failUnlessEqual(s, [])
s = self.execute("SELECT %s AS foo", ([],))
self.failUnlessEqual(s, [])
s = self.execute("SELECT 1 != ALL(%s)", ([],))
self.failUnlessEqual(s, True)
# but don't break the strings :)
s = self.execute("SELECT '{}'::text AS foo")
self.failUnlessEqual(s, "{}")
def testArrayEscape(self):
ss = ['', '\\', '"', '\\\\', '\\"']
for s in ss:
r = self.execute("SELECT %s AS foo", (s,))
self.failUnlessEqual(s, r)
r = self.execute("SELECT %s AS foo", ([s],))
self.failUnlessEqual([s], r)
r = self.execute("SELECT %s AS foo", (ss,))
self.failUnlessEqual(ss, r)
def testArrayMalformed(self):
curs = self.conn.cursor()
ss = ['', '{', '{}}', '{' * 20 + '}' * 20]
for s in ss:
self.assertRaises(psycopg2.DataError,
psycopg2.extensions.STRINGARRAY, b(s), curs)
@testutils.skip_before_postgres(8, 2)
def testArrayOfNulls(self):
curs = self.conn.cursor()
curs.execute("""
create table na (
texta text[],
inta int[],
boola boolean[],
textaa text[][],
intaa int[][],
boolaa boolean[][]
)""")
curs.execute("insert into na (texta) values (%s)", ([None],))
curs.execute("insert into na (texta) values (%s)", (['a', None],))
curs.execute("insert into na (texta) values (%s)", ([None, None],))
curs.execute("insert into na (inta) values (%s)", ([None],))
curs.execute("insert into na (inta) values (%s)", ([42, None],))
curs.execute("insert into na (inta) values (%s)", ([None, None],))
curs.execute("insert into na (boola) values (%s)", ([None],))
curs.execute("insert into na (boola) values (%s)", ([True, None],))
curs.execute("insert into na (boola) values (%s)", ([None, None],))
# TODO: array of array of nulls are not supported yet
# curs.execute("insert into na (textaa) values (%s)", ([[None]],))
curs.execute("insert into na (textaa) values (%s)", ([['a', None]],))
# curs.execute("insert into na (textaa) values (%s)", ([[None, None]],))
# curs.execute("insert into na (intaa) values (%s)", ([[None]],))
curs.execute("insert into na (intaa) values (%s)", ([[42, None]],))
# curs.execute("insert into na (intaa) values (%s)", ([[None, None]],))
# curs.execute("insert into na (boolaa) values (%s)", ([[None]],))
curs.execute("insert into na (boolaa) values (%s)", ([[True, None]],))
# curs.execute("insert into na (boolaa) values (%s)", ([[None, None]],))
@testutils.skip_from_python(3)
def testTypeRoundtripBuffer(self):
o1 = buffer("".join(map(chr, range(256))))
o2 = self.execute("select %s;", (o1,))
self.assertEqual(type(o1), type(o2))
# Test with an empty buffer
o1 = buffer("")
o2 = self.execute("select %s;", (o1,))
self.assertEqual(type(o1), type(o2))
self.assertEqual(str(o1), str(o2))
@testutils.skip_from_python(3)
def testTypeRoundtripBufferArray(self):
o1 = buffer("".join(map(chr, range(256))))
o1 = [o1]
o2 = self.execute("select %s;", (o1,))
self.assertEqual(type(o1[0]), type(o2[0]))
self.assertEqual(str(o1[0]), str(o2[0]))
@testutils.skip_before_python(3)
def testTypeRoundtripBytes(self):
o1 = bytes(range(256))
o2 = self.execute("select %s;", (o1,))
self.assertEqual(memoryview, type(o2))
# Test with an empty buffer
o1 = bytes([])
o2 = self.execute("select %s;", (o1,))
self.assertEqual(memoryview, type(o2))
@testutils.skip_before_python(3)
def testTypeRoundtripBytesArray(self):
o1 = bytes(range(256))
o1 = [o1]
o2 = self.execute("select %s;", (o1,))
self.assertEqual(memoryview, type(o2[0]))
@testutils.skip_before_python(2, 6)
def testAdaptBytearray(self):
o1 = bytearray(range(256))
o2 = self.execute("select %s;", (o1,))
if sys.version_info[0] < 3:
self.assertEqual(buffer, type(o2))
else:
self.assertEqual(memoryview, type(o2))
self.assertEqual(len(o1), len(o2))
for c1, c2 in zip(o1, o2):
self.assertEqual(c1, ord(c2))
# Test with an empty buffer
o1 = bytearray([])
o2 = self.execute("select %s;", (o1,))
self.assertEqual(len(o2), 0)
if sys.version_info[0] < 3:
self.assertEqual(buffer, type(o2))
else:
self.assertEqual(memoryview, type(o2))
@testutils.skip_before_python(2, 7)
def testAdaptMemoryview(self):
o1 = memoryview(bytearray(range(256)))
o2 = self.execute("select %s;", (o1,))
if sys.version_info[0] < 3:
self.assertEqual(buffer, type(o2))
else:
self.assertEqual(memoryview, type(o2))
# Test with an empty buffer
o1 = memoryview(bytearray([]))
o2 = self.execute("select %s;", (o1,))
if sys.version_info[0] < 3:
self.assertEqual(buffer, type(o2))
else:
self.assertEqual(memoryview, type(o2))
def testByteaHexCheckFalsePositive(self):
# the check \x -> x to detect bad bytea decode
# may be fooled if the first char is really an 'x'
o1 = psycopg2.Binary(b('x'))
o2 = self.execute("SELECT %s::bytea AS foo", (o1,))
self.assertEqual(b('x'), o2[0])
def testNegNumber(self):
d1 = self.execute("select -%s;", (decimal.Decimal('-1.0'),))
self.assertEqual(1, d1)
f1 = self.execute("select -%s;", (-1.0,))
self.assertEqual(1, f1)
i1 = self.execute("select -%s;", (-1,))
self.assertEqual(1, i1)
l1 = self.execute("select -%s;", (-1L,))
self.assertEqual(1, l1)
def testGenericArray(self):
a = self.execute("select '{1,2,3}'::int4[]")
self.assertEqual(a, [1,2,3])
a = self.execute("select array['a','b','''']::text[]")
self.assertEqual(a, ['a','b',"'"])
@testutils.skip_before_postgres(8, 2)
def testGenericArrayNull(self):
def caster(s, cur):
if s is None: return "nada"
return int(s) * 2
base = psycopg2.extensions.new_type((23,), "INT4", caster)
array = psycopg2.extensions.new_array_type((1007,), "INT4ARRAY", base)
psycopg2.extensions.register_type(array, self.conn)
a = self.execute("select '{1,2,3}'::int4[]")
self.assertEqual(a, [2,4,6])
a = self.execute("select '{1,2,NULL}'::int4[]")
self.assertEqual(a, [2,4,'nada'])
class AdaptSubclassTest(unittest.TestCase):
def test_adapt_subtype(self):
from psycopg2.extensions import adapt
class Sub(str): pass
s1 = "hel'lo"
s2 = Sub(s1)
self.assertEqual(adapt(s1).getquoted(), adapt(s2).getquoted())
def test_adapt_most_specific(self):
from psycopg2.extensions import adapt, register_adapter, AsIs
class A(object): pass
class B(A): pass
class C(B): pass
register_adapter(A, lambda a: AsIs("a"))
register_adapter(B, lambda b: AsIs("b"))
try:
self.assertEqual(b('b'), adapt(C()).getquoted())
finally:
del psycopg2.extensions.adapters[A, psycopg2.extensions.ISQLQuote]
del psycopg2.extensions.adapters[B, psycopg2.extensions.ISQLQuote]
@testutils.skip_from_python(3)
def test_no_mro_no_joy(self):
from psycopg2.extensions import adapt, register_adapter, AsIs
class A: pass
class B(A): pass
register_adapter(A, lambda a: AsIs("a"))
try:
self.assertRaises(psycopg2.ProgrammingError, adapt, B())
finally:
del psycopg2.extensions.adapters[A, psycopg2.extensions.ISQLQuote]
@testutils.skip_before_python(3)
def test_adapt_subtype_3(self):
from psycopg2.extensions import adapt, register_adapter, AsIs
class A: pass
class B(A): pass
register_adapter(A, lambda a: AsIs("a"))
try:
self.assertEqual(b("a"), adapt(B()).getquoted())
finally:
del psycopg2.extensions.adapters[A, psycopg2.extensions.ISQLQuote]
class ByteaParserTest(unittest.TestCase):
"""Unit test for our bytea format parser."""
def setUp(self):
try:
self._cast = self._import_cast()
except Exception, e:
self._cast = None
self._exc = e
def _import_cast(self):
"""Use ctypes to access the C function.
Raise any sort of error: we just support this where ctypes works as
expected.
"""
import ctypes
lib = ctypes.cdll.LoadLibrary(psycopg2._psycopg.__file__)
cast = lib.typecast_BINARY_cast
cast.argtypes = [ctypes.c_char_p, ctypes.c_size_t, ctypes.py_object]
cast.restype = ctypes.py_object
return cast
def cast(self, buffer):
"""Cast a buffer from the output format"""
l = buffer and len(buffer) or 0
rv = self._cast(buffer, l, None)
if rv is None:
return None
if sys.version_info[0] < 3:
return str(rv)
else:
return rv.tobytes()
def test_null(self):
rv = self.cast(None)
self.assertEqual(rv, None)
def test_blank(self):
rv = self.cast(b(''))
self.assertEqual(rv, b(''))
def test_blank_hex(self):
# Reported as problematic in ticket #48
rv = self.cast(b('\\x'))
self.assertEqual(rv, b(''))
def test_full_hex(self, upper=False):
buf = ''.join(("%02x" % i) for i in range(256))
if upper: buf = buf.upper()
buf = '\\x' + buf
rv = self.cast(b(buf))
if sys.version_info[0] < 3:
self.assertEqual(rv, ''.join(map(chr, range(256))))
else:
self.assertEqual(rv, bytes(range(256)))
def test_full_hex_upper(self):
return self.test_full_hex(upper=True)
def test_full_escaped_octal(self):
buf = ''.join(("\\%03o" % i) for i in range(256))
rv = self.cast(b(buf))
if sys.version_info[0] < 3:
self.assertEqual(rv, ''.join(map(chr, range(256))))
else:
self.assertEqual(rv, bytes(range(256)))
def test_escaped_mixed(self):
import string
buf = ''.join(("\\%03o" % i) for i in range(32))
buf += string.ascii_letters
buf += ''.join('\\' + c for c in string.ascii_letters)
buf += '\\\\'
rv = self.cast(b(buf))
if sys.version_info[0] < 3:
tgt = ''.join(map(chr, range(32))) \
+ string.ascii_letters * 2 + '\\'
else:
tgt = bytes(range(32)) + \
(string.ascii_letters * 2 + '\\').encode('ascii')
self.assertEqual(rv, tgt)
def skip_if_cant_cast(f):
@wraps(f)
def skip_if_cant_cast_(self, *args, **kwargs):
if self._cast is None:
return self.skipTest("can't test bytea parser: %s - %s"
% (self._exc.__class__.__name__, self._exc))
return f(self, *args, **kwargs)
return skip_if_cant_cast_
decorate_all_tests(ByteaParserTest, skip_if_cant_cast)
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
vijayendrabvs/ssl-neutron | refs/heads/master | neutron/tests/unit/vmware/nsxlib/test_l2gateway.py | 3 | # Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from neutron.plugins.vmware.api_client import exception
from neutron.plugins.vmware import nsxlib
from neutron.plugins.vmware.nsxlib import l2gateway as l2gwlib
from neutron.plugins.vmware.nsxlib import switch as switchlib
from neutron.tests.unit import test_api_v2
from neutron.tests.unit.vmware.nsxlib import base
_uuid = test_api_v2._uuid
class L2GatewayNegativeTestCase(base.NsxlibNegativeBaseTestCase):
def test_create_l2_gw_service_on_failure(self):
self.assertRaises(exception.NsxApiException,
l2gwlib.create_l2_gw_service,
self.fake_cluster,
'fake-tenant',
'fake-gateway',
[{'id': _uuid(),
'interface_name': 'xxx'}])
def test_delete_l2_gw_service_on_failure(self):
self.assertRaises(exception.NsxApiException,
l2gwlib.delete_l2_gw_service,
self.fake_cluster,
'fake-gateway')
def test_get_l2_gw_service_on_failure(self):
self.assertRaises(exception.NsxApiException,
l2gwlib.get_l2_gw_service,
self.fake_cluster,
'fake-gateway')
def test_update_l2_gw_service_on_failure(self):
self.assertRaises(exception.NsxApiException,
l2gwlib.update_l2_gw_service,
self.fake_cluster,
'fake-gateway',
'pluto')
class L2GatewayTestCase(base.NsxlibTestCase):
def _create_gw_service(self, node_uuid, display_name,
tenant_id='fake_tenant'):
return l2gwlib.create_l2_gw_service(self.fake_cluster,
tenant_id,
display_name,
[{'id': node_uuid,
'interface_name': 'xxx'}])
def test_create_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
response = self._create_gw_service(node_uuid, display_name)
self.assertEqual(response.get('type'), 'L2GatewayServiceConfig')
self.assertEqual(response.get('display_name'), display_name)
gateways = response.get('gateways', [])
self.assertEqual(len(gateways), 1)
self.assertEqual(gateways[0]['type'], 'L2Gateway')
self.assertEqual(gateways[0]['device_id'], 'xxx')
self.assertEqual(gateways[0]['transport_node_uuid'], node_uuid)
def test_update_l2_gw_service(self):
display_name = 'fake-gateway'
new_display_name = 'still-fake-gateway'
node_uuid = _uuid()
res1 = self._create_gw_service(node_uuid, display_name)
gw_id = res1['uuid']
res2 = l2gwlib.update_l2_gw_service(
self.fake_cluster, gw_id, new_display_name)
self.assertEqual(res2['display_name'], new_display_name)
def test_get_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
gw_id = self._create_gw_service(node_uuid, display_name)['uuid']
response = l2gwlib.get_l2_gw_service(self.fake_cluster, gw_id)
self.assertEqual(response.get('type'), 'L2GatewayServiceConfig')
self.assertEqual(response.get('display_name'), display_name)
self.assertEqual(response.get('uuid'), gw_id)
def test_list_l2_gw_service(self):
gw_ids = []
for name in ('fake-1', 'fake-2'):
gw_ids.append(self._create_gw_service(_uuid(), name)['uuid'])
results = l2gwlib.get_l2_gw_services(self.fake_cluster)
self.assertEqual(len(results), 2)
self.assertEqual(sorted(gw_ids), sorted([r['uuid'] for r in results]))
def test_list_l2_gw_service_by_tenant(self):
gw_ids = [self._create_gw_service(
_uuid(), name, tenant_id=name)['uuid']
for name in ('fake-1', 'fake-2')]
results = l2gwlib.get_l2_gw_services(self.fake_cluster,
tenant_id='fake-1')
self.assertEqual(len(results), 1)
self.assertEqual(results[0]['uuid'], gw_ids[0])
def test_delete_l2_gw_service(self):
display_name = 'fake-gateway'
node_uuid = _uuid()
gw_id = self._create_gw_service(node_uuid, display_name)['uuid']
l2gwlib.delete_l2_gw_service(self.fake_cluster, gw_id)
results = l2gwlib.get_l2_gw_services(self.fake_cluster)
self.assertEqual(len(results), 0)
def test_plug_l2_gw_port_attachment(self):
tenant_id = 'pippo'
node_uuid = _uuid()
transport_zones_config = [{'zone_uuid': _uuid(),
'transport_type': 'stt'}]
lswitch = switchlib.create_lswitch(
self.fake_cluster, _uuid(), tenant_id,
'fake-switch', transport_zones_config)
gw_id = self._create_gw_service(node_uuid, 'fake-gw')['uuid']
lport = switchlib.create_lport(
self.fake_cluster, lswitch['uuid'], tenant_id, _uuid(),
'fake-gw-port', gw_id, True)
l2gwlib.plug_l2_gw_service(
self.fake_cluster, lswitch['uuid'],
lport['uuid'], gw_id)
uri = nsxlib._build_uri_path(switchlib.LSWITCHPORT_RESOURCE,
lport['uuid'],
lswitch['uuid'],
is_attachment=True)
resp_obj = nsxlib.do_request("GET", uri,
cluster=self.fake_cluster)
self.assertIn('LogicalPortAttachment', resp_obj)
self.assertEqual(resp_obj['LogicalPortAttachment']['type'],
'L2GatewayAttachment')
|
eldho5505/OOP | refs/heads/master | dice.py | 2 | from random import randint
import time
# rolls n dice, reports results
def roll(n):
tally = [0, 0, 0, 0, 0, 0]
for i in range(n):
r = randint(0, 5)
tally[r] += 1
return tally
def main():
n = 0
while n <= 0:
n = int(input("How many dice would you like to roll? "))
if n <= 0:
print("")
print("Please enter a positive number!")
start = time.clock()
result = roll(n)
print("")
for i in range(6):
print("You rolled a", i + 1, "", result[i], "times (", '%.2f' % (100 * result[i] / n), "%)")
print("")
stop = time.clock()
print("Time taken:", stop - start, "seconds")
main()
|
Centre-Alt-Rendiment-Esportiu/att | refs/heads/master | old_project/Python/win_libs/numpy/fft/fftpack.py | 35 | """
Discrete Fourier Transforms
Routines in this module:
fft(a, n=None, axis=-1)
ifft(a, n=None, axis=-1)
rfft(a, n=None, axis=-1)
irfft(a, n=None, axis=-1)
hfft(a, n=None, axis=-1)
ihfft(a, n=None, axis=-1)
fftn(a, s=None, axes=None)
ifftn(a, s=None, axes=None)
rfftn(a, s=None, axes=None)
irfftn(a, s=None, axes=None)
fft2(a, s=None, axes=(-2,-1))
ifft2(a, s=None, axes=(-2, -1))
rfft2(a, s=None, axes=(-2,-1))
irfft2(a, s=None, axes=(-2, -1))
i = inverse transform
r = transform of purely real data
h = Hermite transform
n = n-dimensional transform
2 = 2-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
The underlying code for these functions is an f2c-translated and modified
version of the FFTPACK routines.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
from numpy.core import asarray, zeros, swapaxes, shape, conjugate, \
take
from . import fftpack_lite as fftpack
_fft_cache = {}
_real_fft_cache = {}
def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
work_function=fftpack.cfftf, fft_cache = _fft_cache ):
a = asarray(a)
if n is None:
n = a.shape[axis]
if n < 1:
raise ValueError("Invalid number of FFT data points (%d) specified." % n)
try:
# Thread-safety note: We rely on list.pop() here to atomically
# retrieve-and-remove a wsave from the cache. This ensures that no
# other thread can get the same wsave while we're using it.
wsave = fft_cache.setdefault(n, []).pop()
except (IndexError):
wsave = init_function(n)
if a.shape[axis] != n:
s = list(a.shape)
if s[axis] > n:
index = [slice(None)]*len(s)
index[axis] = slice(0, n)
a = a[index]
else:
index = [slice(None)]*len(s)
index[axis] = slice(0, s[axis])
s[axis] = n
z = zeros(s, a.dtype.char)
z[index] = a
a = z
if axis != -1:
a = swapaxes(a, axis, -1)
r = work_function(a, wsave)
if axis != -1:
r = swapaxes(r, axis, -1)
# As soon as we put wsave back into the cache, another thread could pick it
# up and start using it, so we must not do this until after we're
# completely done using it ourselves.
fft_cache[n].append(wsave)
return r
def fft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [CT].
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : for definition of the DFT and conventions used.
ifft : The inverse of `fft`.
fft2 : The two-dimensional FFT.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier
Transform (DFT) can be calculated efficiently, by using symmetries in the
calculated terms. The symmetry is highest when `n` is a power of 2, and
the transform is therefore most efficient for these sizes.
The DFT is defined, with the conventions used in this implementation, in
the documentation for the `numpy.fft` module.
References
----------
.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([ -3.44505240e-16 +1.14383329e-17j,
8.00000000e+00 -5.71092652e-15j,
2.33482938e-16 +1.22460635e-16j,
1.64863782e-15 +1.77635684e-15j,
9.95839695e-17 +2.33482938e-16j,
0.00000000e+00 +1.66837030e-15j,
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = np.fft.fft(np.sin(t))
>>> freq = np.fft.fftfreq(t.shape[-1])
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
the `numpy.fft` documentation.
"""
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
def ifft(a, n=None, axis=-1):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(a)) == a`` to within numerical accuracy.
For a general description of the algorithm and definitions,
see `numpy.fft`.
The input should be ordered in the same way as is returned by `fft`,
i.e., ``a[0]`` should contain the zero frequency term,
``a[1:n/2+1]`` should contain the positive-frequency terms, and
``a[n/2+1:]`` should contain the negative-frequency terms, in order of
decreasingly negative frequency. See `numpy.fft` for details.
Parameters
----------
a : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `a`.
See Also
--------
numpy.fft : An introduction, with definitions and general explanations.
fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse
ifft2 : The two-dimensional inverse FFT.
ifftn : The n-dimensional inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at 0x...>, <matplotlib.lines.Line2D object at 0x...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at 0x...>
>>> plt.show()
"""
a = asarray(a).astype(complex)
if n is None:
n = shape(a)[axis]
return _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) / n
def rfft(a, n=None, axis=-1):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
This function computes the one-dimensional *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
a : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
irfft : The inverse of `rfft`.
fft : The one-dimensional FFT of general (complex) input.
fftn : The *n*-dimensional FFT.
rfftn : The *n*-dimensional FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e. the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
>>> np.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j])
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
a = asarray(a).astype(float)
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, _real_fft_cache)
def irfft(a, n=None, axis=-1):
"""
Compute the inverse of the n-point DFT for real input.
This function computes the inverse of the one-dimensional *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e. the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
numpy.fft : For definition of the DFT and conventions used.
rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.
fft : The one-dimensional FFT.
irfft2 : The inverse of the two-dimensional FFT of real input.
irfftn : The inverse of the *n*-dimensional FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `a`, where `a` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
>>> np.fft.irfft([1, -1j, -1])
array([ 0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
_real_fft_cache) / n
def hfft(a, n=None, axis=-1):
"""
Compute the FFT of a signal which has Hermitian symmetry (real spectrum).
Parameters
----------
a : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is determined from
the length of the input along the axis specified by `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See also
--------
rfft : Compute the one-dimensional FFT for real input.
ihfft : The inverse of `hfft`.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
>>> np.fft.hfft(signal[:4]) # Input first half of signal
array([ 15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
array([ 15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
array([[ 0.-0.j, 0.+0.j],
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
array([[ 1., 1.],
[ 2., -2.]])
"""
a = asarray(a).astype(complex)
if n is None:
n = (shape(a)[axis] - 1) * 2
return irfft(conjugate(a), n, axis) * n
def ihfft(a, n=None, axis=-1):
"""
Compute the inverse FFT of a signal which has Hermitian symmetry.
Parameters
----------
a : array_like
Input array.
n : int, optional
Length of the inverse FFT.
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time domain
and is real in the frequency domain. So here it's `hfft` for which
you must supply the length of the result if it is to be odd:
``ihfft(hfft(a), len(a)) == a``, within numerical accuracy.
Examples
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> np.fft.ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
a = asarray(a).astype(float)
if n is None:
n = shape(a)[axis]
return conjugate(rfft(a, n, axis))/n
def _cook_nd_args(a, s=None, axes=None, invreal=0):
if s is None:
shapeless = 1
if axes is None:
s = list(a.shape)
else:
s = take(a.shape, axes)
else:
shapeless = 0
s = list(s)
if axes is None:
axes = list(range(-len(s), 0))
if len(s) != len(axes):
raise ValueError("Shape and axes have different lengths.")
if invreal and shapeless:
s[-1] = (a.shape[axes[-1]] - 1) * 2
return s, axes
def _raw_fftnd(a, s=None, axes=None, function=fft):
a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
itl = list(range(len(axes)))
itl.reverse()
for ii in itl:
a = function(a, n=s[ii], axis=axes[ii])
return a
def fftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform.
This function computes the *N*-dimensional discrete Fourier Transform over
any number of axes in an *M*-dimensional array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the transform over that axis is
performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.
fft : The one-dimensional FFT, with definitions and conventions used.
rfftn : The *n*-dimensional FFT of real input.
fft2 : The two-dimensional FFT.
fftshift : Shifts zero-frequency terms to centre of array
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
See `numpy.fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)
>>> FS = np.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, fft)
def ifftn(a, s=None, axes=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the N-dimensional discrete
Fourier Transform over any number of axes in an M-dimensional array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(a)) == a`` to within numerical accuracy.
For a description of the definitions and conventions used, see `numpy.fft`.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e. it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.
ifft : The one-dimensional inverse FFT.
ifft2 : The two-dimensional inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
See `numpy.fft` for definitions and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))
>>> im = np.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return _raw_fftnd(a, s, axes, ifft)
def fft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional discrete Fourier Transform
This function computes the *n*-dimensional discrete Fourier Transform
over any axes in an *M*-dimensional array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
a : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(`s[0]` refers to axis 0, `s[1]` to axis 1, etc.).
This corresponds to `n` for `fft(x, n)`.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
ifft2 : The inverse two-dimensional FFT.
fft : The one-dimensional FFT.
fftn : The *n*-dimensional FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For two-dimensional input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `numpy.fft` for
definitions and conventions used.
Examples
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ],
[-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
0.0 +0.j , 0.0 +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft)
def ifft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
This function computes the inverse of the 2-dimensional discrete Fourier
Transform over any number of axes in an M-dimensional array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e. it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
a : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used. A repeated index in `axes` means the transform over
that axis is performed multiple times. A one-element sequence means
that a one-dimensional FFT is performed.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
numpy.fft : Overall view of discrete Fourier transforms, with definitions
and conventions used.
fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the *n*-dimensional FFT.
fft : The one-dimensional FFT.
ifft : The one-dimensional inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `numpy.fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft)
def rfftn(a, s=None, axes=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
This function computes the N-dimensional discrete Fourier Transform over
any number of axes in an M-dimensional real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
a : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `a`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT
of real input.
fft : The one-dimensional FFT, with definitions and conventions used.
rfft : The one-dimensional FFT of real input.
fftn : The n-dimensional FFT.
rfft2 : The two-dimensional FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
array([[[ 8.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
array([[[ 4.+0.j, 0.+0.j],
[ 4.+0.j, 0.+0.j]],
[[ 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j]]])
"""
a = asarray(a).astype(float)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1])
for ii in range(len(axes)-1):
a = fft(a, s[ii], axes[ii])
return a
def rfft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional FFT of a real array.
Parameters
----------
a : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
rfftn : Compute the N-dimensional discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return rfftn(a, s, axes)
def irfftn(a, s=None, axes=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
This function computes the inverse of the N-dimensional discrete
Fourier Transform for real input over any number of axes in an
M-dimensional array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e. as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
a : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the
axes specified by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
Repeated indices in `axes` means that the inverse transform over that
axis is performed multiple times.
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `a`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than than the number of axes of `a`.
See Also
--------
rfftn : The forward n-dimensional FFT of real input,
of which `ifftn` is the inverse.
fft : The one-dimensional FFT, with definitions and conventions used.
irfft : The inverse of the one-dimensional FFT of real input.
irfft2 : The inverse of the two-dimensional FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
array([[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]],
[[ 1., 1.],
[ 1., 1.]]])
"""
a = asarray(a).astype(complex)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii])
a = irfft(a, s[-1], axes[-1])
return a
def irfft2(a, s=None, axes=(-2, -1)):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input array
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return irfftn(a, s, axes)
|
sloot14/flexifod | refs/heads/master | simplejson/tests/test_float.py | 136 | import math
from unittest import TestCase
import simplejson as json
class TestFloat(TestCase):
def test_floats(self):
for num in [1617161771.7650001, math.pi, math.pi**100,
math.pi**-100, 3.1]:
self.assertEquals(float(json.dumps(num)), num)
self.assertEquals(json.loads(json.dumps(num)), num)
self.assertEquals(json.loads(unicode(json.dumps(num))), num)
def test_ints(self):
for num in [1, 1L, 1<<32, 1<<64]:
self.assertEquals(json.dumps(num), str(num))
self.assertEquals(int(json.dumps(num)), num)
self.assertEquals(json.loads(json.dumps(num)), num)
self.assertEquals(json.loads(unicode(json.dumps(num))), num)
|
gauthierm/bedrock | refs/heads/master | tests/functional/contribute/test_stories.py | 2 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from pages.contribute.stories import ContributeStoriesPage
@pytest.mark.sanity
@pytest.mark.nondestructive
def test_show_hide_story(base_url, selenium):
page = ContributeStoriesPage(base_url, selenium).open()
page.show_story()
assert page.is_story_displayed
page.hide_story()
assert not page.is_story_displayed
@pytest.mark.nondestructive
def test_next_event_is_displayed(base_url, selenium):
page = ContributeStoriesPage(base_url, selenium).open()
assert page.next_event_is_displayed
|
ArthySundaram/chromeos-3.8 | refs/heads/chromeos-3.8 | scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
tarunkapadia93/gk_a6k | refs/heads/gk-r2 | scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
prman-pixar/RenderManForBlender | refs/heads/master | rman_bl_nodes/rman_bl_nodes_props.py | 2 | import bpy
from bpy.props import PointerProperty, StringProperty, BoolProperty, \
EnumProperty, IntProperty, FloatProperty, FloatVectorProperty, \
CollectionProperty, BoolVectorProperty
from .. import rman_bl_nodes
from .. import rfb_icons
from ..rfb_utils.shadergraph_utils import is_renderman_nodetree
from ..rfb_utils import shadergraph_utils
from ..rman_constants import RMAN_AREA_LIGHT_TYPES
class RendermanPluginSettings(bpy.types.PropertyGroup):
pass
class RendermanLightFilter(bpy.types.PropertyGroup):
def get_name(self):
if self.linked_filter_ob:
return self.linked_filter_ob.name
return ''
name: StringProperty(default='', get=get_name)
def update_linked_filter_ob(self, context):
pass
def validate_obj(self, ob):
if ob.type == 'LIGHT' and ob.data.renderman.renderman_light_role == 'RMAN_LIGHTFILTER':
return True
return False
linked_filter_ob: PointerProperty(name='Light Filter',
description='Light Filter',
type=bpy.types.Object,
update=update_linked_filter_ob,
poll=validate_obj
)
class RendermanPortalLightPointer(bpy.types.PropertyGroup):
def get_name(self):
if self.linked_portal_ob:
return self.linked_portal_ob.name
return ''
name: StringProperty(default='', get=get_name)
def update_linked_portal_ob(self, context):
if self.linked_portal_ob:
self.linked_portal_ob.update_tag(refresh={'DATA'})
def validate_obj(self, ob):
if ob.type == 'LIGHT':
rm = ob.data.renderman
if rm.renderman_light_role == 'RMAN_LIGHT' and rm.get_light_node_name() == 'PxrPortalLight':
return True
return False
linked_portal_ob: PointerProperty(name='Portal Light',
description='Portal Light',
type=bpy.types.Object,
update=update_linked_portal_ob,
poll=validate_obj
)
class RendermanLightSettings(bpy.types.PropertyGroup):
def get_light_node(self):
'''
Get the light shader node
'''
light = self.id_data
output = None
nt = light.node_tree
if not nt:
return None
output = is_renderman_nodetree(light)
if not output:
return None
if self.renderman_light_role == 'RMAN_LIGHT':
socket = output.inputs[1]
if socket.is_linked:
return socket.links[0].from_node
else:
socket = output.inputs[3]
if socket.is_linked:
return socket.links[0].from_node
return None
def get_light_node_name(self):
'''
Get light shader name
'''
node = self.get_light_node()
if node:
return node.bl_label
return ''
light_node: StringProperty(
name="Light Node",
default='')
def update_vis(self, context):
light = self.id_data
use_renderman_node: BoolProperty(
name="Use RenderMans Light Node",
description="Will enable RenderMan light Nodes, opening more options",
default=False
)
def renderman_light_role_update(self, context):
if self.renderman_light_role == 'RMAN_LIGHT':
self.renderman_light_shader_update(context)
else:
self.renderman_light_filter_shader_update(context)
renderman_light_role: EnumProperty(
name="Light Type",
items=[('RMAN_LIGHT', 'Light', 'RenderMan Light'),
('RMAN_LIGHTFILTER', 'Filter', 'RenderMan Light Filter')],
update=renderman_light_role_update,
default='RMAN_LIGHT'
)
def renderman_light_shader_update(self, context):
light = self.id_data
light_shader = self.get_light_node_name()
if hasattr(light, 'size'):
light.size = 0.0
if light_shader not in RMAN_AREA_LIGHT_TYPES:
light.type = 'POINT'
def get_rman_light_shaders(self, context):
items = []
i = 0
rman_light_icon = rfb_icons.get_light_icon("PxrRectLight")
items.append(('PxrRectLight', 'PxrRectLight', '', rman_light_icon.icon_id, i))
for n in rman_bl_nodes.__RMAN_LIGHT_NODES__:
if n.name != 'PxrRectLight':
i += 1
light_icon = rfb_icons.get_light_icon(n.name)
items.append( (n.name, n.name, '', light_icon.icon_id, i))
return items
renderman_light_shader: EnumProperty(
name="RenderMan Light",
items=get_rman_light_shaders,
update=renderman_light_shader_update
)
def renderman_light_filter_shader_update(self, context):
light = self.id_data
light_shader = self.get_light_node_name()
if hasattr(light, 'size'):
light.size = 0.0
light.type = 'POINT'
def get_rman_light_filter_shaders(self, context):
items = []
i = 0
rman_light_icon = rfb_icons.get_lightfilter_icon("_PxrBlockerLightFilter")
items.append(('PxrBlockerLightFilter', 'PxrBlockerLightFilter', '', rman_light_icon.icon_id, i))
for n in rman_bl_nodes.__RMAN_LIGHTFILTER_NODES__:
if n.name != 'PxrBlockerLightFilter':
i += 1
light_icon = rfb_icons.get_lightfilter_icon(n.name)
items.append( (n.name, n.name, '', light_icon.icon_id, i))
return items
renderman_light_filter_shader: EnumProperty(
name="RenderMan Light Filter",
items=get_rman_light_filter_shaders,
update=renderman_light_filter_shader_update
)
light_filters: CollectionProperty(
type=RendermanLightFilter
)
light_filters_index: IntProperty(min=-1, default=-1)
portal_lights: CollectionProperty(type=RendermanPortalLightPointer)
def update_portal_lights_index(self, context):
if self.portal_lights_index < 0:
return
if self.portal_lights_index > len(self.portal_lights) - 1:
return
portal_ptr = self.portal_lights[self.portal_lights_index]
if not portal_ptr.linked_portal_ob:
self.portal_lights.remove(self.portal_lights_index)
self.portal_lights_index = -1
portal_lights_index: IntProperty(min=-1, default=-1, update=update_portal_lights_index)
def update_dome_light_portal(self, context):
if self.dome_light_portal:
candidate = None
dome_light = self.dome_light_portal
rm = dome_light.data.renderman
for portal_ptr in rm.portal_lights:
if not portal_ptr.linked_portal_ob:
candidate = portal_ptr
break
if not candidate:
candidate = rm.portal_lights.add()
ob = context.object
candidate.linked_portal_ob = ob
self.dome_light_portal.update_tag(refresh={'DATA'})
ob.update_tag(refresh={'DATA'})
else:
# try and remove the portal light on the dome light
for obj in bpy.data.objects:
if not obj.type == 'LIGHT':
continue
rm = obj.data.renderman
if rm.get_light_node_name() != 'PxrDomeLight':
continue
if len(rm.portal_lights) < 1:
continue
for i, portal_ptr in enumerate(rm.portal_lights):
if not portal_ptr.linked_portal_ob:
continue
portal = portal_ptr.linked_portal_ob
rm = portal.data.renderman
if not rm.dome_light_portal:
portal_ptr.linked_portal_ob = None
setattr(rm, 'portal_lights_index', i)
def validate_dome_light(self, ob):
if ob.type == 'LIGHT':
rm = ob.data.renderman
if rm.renderman_light_role == 'RMAN_LIGHT' and rm.get_light_node_name() == 'PxrDomeLight':
return True
return False
dome_light_portal: PointerProperty(name="Dome Light",
type=bpy.types.Object,
description="Dome light to parent this portal light to.",
poll=validate_dome_light,
update=update_dome_light_portal)
light_primary_visibility: BoolProperty(
name="Light Primary Visibility",
description="Camera visibility for this light",
update=update_vis,
default=True)
mute: BoolProperty(
name="Mute",
description="Turn off this light",
default=False)
def update_solo(self, context):
light = self.id_data
scene = context.scene
# if the scene solo is on already find the old one and turn off
scene.renderman.solo_light = self.solo
if self.solo:
if scene.renderman.solo_light:
for ob in scene.objects:
if shadergraph_utils.is_rman_light(ob, include_light_filters=False):
rm = shadergraph_utils.get_rman_light_properties_group(ob)
if rm != self and rm.solo:
rm.solo = False
break
solo: BoolProperty(
name="Solo",
update=update_solo,
description="Turn on only this light",
default=False)
renderman_lock_light_type: BoolProperty(
name="Lock Type",
default=False,
description="Lock from changing light shader and light role."
)
# OLD PROPERTIES
shadingrate: FloatProperty(
name="Light Shading Rate",
description="Shading Rate for lights. Keep this high unless banding or pixellation occurs on detailed light maps",
default=100.0)
# illuminate
illuminates_by_default: BoolProperty(
name="Illuminates by default",
description="The light illuminates objects by default",
default=True)
renderman_type: EnumProperty(
name="Light Type",
items=[
('AREA', 'Light', 'Area Light'),
('ENV', 'Dome', 'Dome Light'),
('SKY', 'Env Daylight', 'Simulated Sky'),
('DIST', 'Distant', 'Distant Light'),
('SPOT', 'Spot', 'Spot Light'),
('POINT', 'Point', 'Point Light'),
('PORTAL', 'Portal', 'Portal Light'),
('FILTER', 'Filter', 'RenderMan Light Filter'),
('UPDATED', 'UPDATED', '')],
default='UPDATED'
)
area_shape: EnumProperty(
name="Area Shape",
items=[('rect', 'Rectangle', 'Rectangle'),
('disk', 'Disk', 'Disk'),
('sphere', 'Sphere', 'Sphere'),
('cylinder', 'Cylinder', 'Cylinder')],
default='rect'
)
filter_type: EnumProperty(
name="Area Shape",
items=[('barn', 'Barn', 'Barn'),
('blocker', 'Blocker', 'Blocker'),
#('combiner', 'Combiner', 'Combiner'),
('cookie', 'Cookie', 'Cookie'),
('gobo', 'Gobo', 'Gobo'),
('intmult', 'Multiply', 'Multiply'),
('ramp', 'Ramp', 'Ramp'),
('rod', 'Rod', 'Rod')
],
default='blocker'
)
class RendermanDisplayFilterSettings(bpy.types.PropertyGroup):
def get_filter_name(self):
return self.filter_type.replace('_settings', '')
def get_filter_node(self):
return getattr(self, self.filter_type + '_settings')
def displayfilter_items(self, context):
items = []
for n in rman_bl_nodes.__RMAN_DISPLAYFILTER_NODES__ :
items.append((n.name, n.name, ''))
return items
filter_type: EnumProperty(items=displayfilter_items, name='Filter')
class RendermanSampleFilterSettings(bpy.types.PropertyGroup):
def get_filter_name(self):
return self.filter_type.replace('_settings', '')
def get_filter_node(self):
return getattr(self, self.filter_type + '_settings')
def samplefilter_items(self, context):
items = []
for n in rman_bl_nodes.__RMAN_SAMPLEFILTER_NODES__ :
items.append((n.name, n.name, ''))
return items
filter_type: EnumProperty(items=samplefilter_items, name='Filter')
classes = [RendermanLightFilter,
RendermanPortalLightPointer,
RendermanLightSettings,
RendermanPluginSettings,
RendermanDisplayFilterSettings,
RendermanSampleFilterSettings
]
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Light.renderman = PointerProperty(
type=RendermanLightSettings, name="Renderman Light Settings")
# light settings for mesh lights, that are a part of a material
bpy.types.Material.renderman_light = PointerProperty(
type=RendermanLightSettings, name="Renderman Light Settings")
def unregister():
for cls in classes:
try:
bpy.utils.unregister_class(cls)
except RuntimeError:
rfb_log().debug('Could not unregister class: %s' % str(cls))
pass
|
icio/github3.py | refs/heads/develop | tests/integration/test_repos_deployment.py | 11 | """Deployment integration tests."""
import github3
from .helper import IntegrationHelper
def find(func, iterable):
"""Helper function to find the first item in an interable."""
return next(iter(filter(func, iterable)))
class TestDeployment(IntegrationHelper):
"""Integration tests for the Deployment class."""
def test_create_status(self):
"""Show that a user can create a deployment status."""
self.basic_login()
cassette_name = self.cassette_name('create_status')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
deployment = find(lambda d: d.id == 801,
repository.deployments())
assert deployment is not None
status = deployment.create_status('success')
assert isinstance(status, github3.repos.deployment.DeploymentStatus)
def test_statuses(self):
"""Show that a user can retrieve deployment statuses."""
cassette_name = self.cassette_name('statuses')
with self.recorder.use_cassette(cassette_name):
repository = self.gh.repository('sigmavirus24', 'github3.py')
assert repository is not None
deployment = find(lambda d: d.id == 801,
repository.deployments())
assert deployment is not None
statuses = list(deployment.statuses(5))
for status in statuses:
assert isinstance(status,
github3.repos.deployment.DeploymentStatus)
|
robscetury/gibson | refs/heads/master | lib/gibson/physics/__init__.py | 1 | try:
import pyximport
pyximport.install()
from gibson.physics.spring import *
print "Got the cython spring!"
except:
print "Cython not installed"
from gibson.physics.pyspring import *
|
rotemtam/pi-mac-monitor | refs/heads/master | install_on_pi/push_data.py | 1 | import subprocess, StringIO, csv
from simplejson import dumps
from firebase import Firebase
from time import sleep, time
firebase = Firebase('https://<your_app>.firebaseio.com/stations')
def fetch_data():
# get the newest capture.csv file, then use awk to get only Station data
cmd = r"cat /tmp/`ls -Art /tmp | grep capture | tail -n 1` | awk '/Station/{y=1;next}y'"
data = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()
f = StringIO.StringIO(data)
# convert the data to a list of dict() objects
conv = lambda row: {'station_mac':row[0], 'first_time_seen':row[1], 'last_time_seen':row[2], 'power':row[3]}
data = [row for row in csv.reader(f, delimiter=',') if len(row) != 0]
return [conv(row) for row in data]
while True:
print firebase.put(fetch_data())
sleep(1)
|
lilleswing/deepchem | refs/heads/master | examples/sweetlead/sweet.py | 5 | """
Script that loads random forest models trained on the sider and tox21 datasets,
predicts on sweetlead, creates covariance matrix
@Author Aneesh Pappu
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import sys
import numpy as np
import pandas as pd
import deepchem as dc
from sklearn.ensemble import RandomForestClassifier
from deepchem.models.multitask import SingletaskToMultitask
from deepchem import metrics
from deepchem.metrics import Metric
from deepchem.models.sklearn_models import SklearnModel
tox_tasks, (tox_train, tox_valid,
tox_test), tox_transformers = dc.molnet.load_tox21()
classification_metric = Metric(
metrics.roc_auc_score, np.mean, mode="classification")
def model_builder(model_dir):
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=500, n_jobs=-1)
return dc.models.SklearnModel(sklearn_model, model_dir)
print(tox_train.get_task_names())
print(tox_tasks)
tox_model = SingletaskToMultitask(tox_tasks, model_builder)
tox_model.fit(tox_train)
# Load sider models now
sider_tasks, (
sider_train, sider_valid,
sider_test), sider_transformers = dc.molnet.load_sider(split="random")
sider_model = SingletaskToMultitask(sider_tasks, model_builder)
sider_model.fit(sider_train)
# Load sweetlead dataset now. Pass in dataset object and appropriate
# transformers to predict functions
sweet_tasks, (sweet_dataset, _, _), sweet_transformers = dc.molnet.load_sweet()
sider_predictions = sider_model.predict(sweet_dataset, sweet_transformers)
tox_predictions = tox_model.predict(sweet_dataset, sweet_transformers)
sider_dimensions = sider_predictions.shape[1]
tox_dimensions = tox_predictions.shape[1]
confusion_matrix = np.zeros(shape=(tox_dimensions, sider_dimensions))
for i in range(tox_predictions.shape[0]):
nonzero_tox = np.nonzero(tox_predictions[i, :])
nonzero_sider = np.nonzero(sider_predictions[i, :])
for j in nonzero_tox[0]:
for k in nonzero_sider[0]:
confusion_matrix[j, k] += 1
df = pd.DataFrame(confusion_matrix)
df.to_csv("./tox_sider_matrix.csv")
|
Technorip/Myntra | refs/heads/master | Django Backend/myntra/env/lib/python2.7/site-packages/django/db/models/fields/related_descriptors.py | 20 | """
Accessors for related objects.
When a field defines a relation between two models, each model class provides
an attribute to access related instances of the other model class (unless the
reverse accessor has been disabled with related_name='+').
Accessors are implemented as descriptors in order to customize access and
assignment. This module defines the descriptor classes.
Forward accessors follow foreign keys. Reverse accessors trace them back. For
example, with the following models::
class Parent(Model):
pass
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a forward many-to-one relation. ``parent.children`` is a
reverse many-to-one relation.
There are three types of relations (many-to-one, one-to-one, and many-to-many)
and two directions (forward and reverse) for a total of six combinations.
1. Related instance on the forward side of a many-to-one or one-to-one
relation: ``ForwardManyToOneDescriptor``.
Uniqueness of foreign key values is irrelevant to accessing the related
instance, making the many-to-one and one-to-one cases identical as far as
the descriptor is concerned. The constraint is checked upstream (unicity
validation in forms) or downstream (unique indexes in the database).
If you're looking for ``ForwardOneToOneDescriptor``, use
``ForwardManyToOneDescriptor`` instead.
2. Related instance on the reverse side of a one-to-one relation:
``ReverseOneToOneDescriptor``.
One-to-one relations are asymmetrical, despite the apparent symmetry of the
name, because they're implemented in the database with a foreign key from
one table to another. As a consequence ``ReverseOneToOneDescriptor`` is
slightly different from ``ForwardManyToOneDescriptor``.
3. Related objects manager for related instances on the reverse side of a
many-to-one relation: ``ReverseManyToOneDescriptor``.
Unlike the previous two classes, this one provides access to a collection
of objects. It returns a manager rather than an instance.
4. Related objects manager for related instances on the forward or reverse
sides of a many-to-many relation: ``ManyToManyDescriptor``.
Many-to-many relations are symmetrical. The syntax of Django models
requires declaring them on one side but that's an implementation detail.
They could be declared on the other side without any change in behavior.
Therefore the forward and reverse descriptors can be the same.
If you're looking for ``ForwardManyToManyDescriptor`` or
``ReverseManyToManyDescriptor``, use ``ManyToManyDescriptor`` instead.
"""
from __future__ import unicode_literals
from operator import attrgetter
from django.db import connections, router, transaction
from django.db.models import Q, signals
from django.db.models.query import QuerySet
from django.utils.functional import cached_property
class ForwardManyToOneDescriptor(object):
"""
Accessor to the related object on the forward side of a many-to-one or
one-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``child.parent`` is a ``ForwardManyToOneDescriptor`` instance.
"""
def __init__(self, field_with_rel):
self.field = field_with_rel
self.cache_name = self.field.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception can't be created at initialization time since the
# related model might not be resolved yet; `rel.model` might still be
# a string model reference.
return type(
str('RelatedObjectDoesNotExist'),
(self.field.remote_field.model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.field.remote_field.model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.field.remote_field.model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = self.field.get_foreign_related_value
instance_attr = self.field.get_local_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
related_field = self.field.foreign_related_fields[0]
# FIXME: This will need to be revisited when we introduce support for
# composite fields. In the meantime we take this practical approach to
# solve a regression on 1.6 when the reverse manager in hidden
# (related_name ends with a '+'). Refs #21410.
# The check for len(...) == 1 is a special case that allows the query
# to be join-less and smaller. Refs #21760.
if self.field.remote_field.is_hidden() or len(self.field.foreign_related_fields) == 1:
query = {'%s__in' % related_field.name: set(instance_attr(inst)[0] for inst in instances)}
else:
query = {'%s__in' % self.field.related_query_name(): instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
if not self.field.remote_field.multiple:
rel_obj_cache_name = self.field.remote_field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
"""
Get the related instance through the forward relation.
With the example above, when getting ``child.parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``instance_type`` in the ``Child`` class (we don't need it)
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached in
# the attribute defined in self.cache_name. It can also be pre-cached
# by the reverse accessor (ReverseOneToOneDescriptor).
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
val = self.field.get_local_related_value(instance)
if None in val:
rel_obj = None
else:
qs = self.get_queryset(instance=instance)
qs = qs.filter(self.field.get_reverse_related_filter(instance))
# Assuming the database enforces foreign keys, this won't fail.
rel_obj = qs.get()
# If this is a one-to-one relation, set the reverse accessor
# cache on the related object to the current instance to avoid
# an extra SQL query if it's accessed later on.
if not self.field.remote_field.multiple:
setattr(rel_obj, self.field.remote_field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None and not self.field.null:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (self.field.model.__name__, self.field.name)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the forward relation.
With the example above, when setting ``child.parent = parent``:
- ``self`` is the descriptor managing the ``parent`` attribute
- ``instance`` is the ``child`` instance
- ``value`` in the ``parent`` instance on the right of the equal sign
"""
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null is False:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name)
)
elif value is not None and not isinstance(value, self.field.remote_field.model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.field.name,
self.field.remote_field.model._meta.object_name,
)
)
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.cache_name, None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related is not None:
setattr(related, self.field.remote_field.get_cache_name(), None)
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, None)
# Set the values of the related field.
else:
for lh_field, rh_field in self.field.related_fields:
setattr(instance, lh_field.attname, getattr(value, rh_field.attname))
# Set the related instance cache used by __get__ to avoid a SQL query
# when accessing the attribute we just set.
setattr(instance, self.cache_name, value)
# If this is a one-to-one relation, set the reverse accessor cache on
# the related object to the current instance to avoid an extra SQL
# query if it's accessed later on.
if value is not None and not self.field.remote_field.multiple:
setattr(value, self.field.remote_field.get_cache_name(), instance)
class ReverseOneToOneDescriptor(object):
"""
Accessor to the related object on the reverse side of a one-to-one
relation.
In the example::
class Restaurant(Model):
place = OneToOneField(Place, related_name='restaurant')
``place.restaurant`` is a ``ReverseOneToOneDescriptor`` instance.
"""
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
@cached_property
def RelatedObjectDoesNotExist(self):
# The exception isn't created at initialization time for the sake of
# consistency with `ForwardManyToOneDescriptor`.
return type(
str('RelatedObjectDoesNotExist'),
(self.related.related_model.DoesNotExist, AttributeError),
{}
)
def is_cached(self, instance):
return hasattr(instance, self.cache_name)
def get_queryset(self, **hints):
manager = self.related.related_model._default_manager
# If the related manager indicates that it should be used for
# related fields, respect that.
if not getattr(manager, 'use_for_related_fields', False):
manager = self.related.related_model._base_manager
return manager.db_manager(hints=hints).all()
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = self.get_queryset()
queryset._add_hints(instance=instances[0])
rel_obj_attr = attrgetter(self.related.field.attname)
instance_attr = lambda obj: obj._get_pk_val()
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.related.field.name: instances}
queryset = queryset.filter(**query)
# Since we're going to assign directly in the cache,
# we must manage the reverse relation cache manually.
rel_obj_cache_name = self.related.field.get_cache_name()
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, rel_obj_cache_name, instance)
return queryset, rel_obj_attr, instance_attr, True, self.cache_name
def __get__(self, instance, instance_type=None):
"""
Get the related instance through the reverse relation.
With the example above, when getting ``place.restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``instance_type`` in the ``Place`` class (we don't need it)
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
if instance is None:
return self
# The related instance is loaded from the database and then cached in
# the attribute defined in self.cache_name. It can also be pre-cached
# by the forward accessor (ForwardManyToOneDescriptor).
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
related_pk = instance._get_pk_val()
if related_pk is None:
rel_obj = None
else:
filter_args = self.related.field.get_forward_related_filter(instance)
try:
rel_obj = self.get_queryset(instance=instance).get(**filter_args)
except self.related.related_model.DoesNotExist:
rel_obj = None
else:
# Set the forward accessor cache on the related object to
# the current instance to avoid an extra SQL query if it's
# accessed later on.
setattr(rel_obj, self.related.field.get_cache_name(), instance)
setattr(instance, self.cache_name, rel_obj)
if rel_obj is None:
raise self.RelatedObjectDoesNotExist(
"%s has no %s." % (
instance.__class__.__name__,
self.related.get_accessor_name()
)
)
else:
return rel_obj
def __set__(self, instance, value):
"""
Set the related instance through the reverse relation.
With the example above, when setting ``place.restaurant = restaurant``:
- ``self`` is the descriptor managing the ``restaurant`` attribute
- ``instance`` is the ``place`` instance
- ``value`` in the ``restaurant`` instance on the right of the equal sign
Keep in mind that ``Restaurant`` holds the foreign key to ``Place``.
"""
# The similarity of the code below to the code in
# ForwardManyToOneDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None:
if self.related.field.null:
# Update the cached related instance (if any) & clear the cache.
try:
rel_obj = getattr(instance, self.cache_name)
except AttributeError:
pass
else:
delattr(instance, self.cache_name)
setattr(rel_obj, self.related.field.name, None)
else:
raise ValueError(
'Cannot assign None: "%s.%s" does not allow null values.' % (
instance._meta.object_name,
self.related.get_accessor_name(),
)
)
elif not isinstance(value, self.related.related_model):
raise ValueError(
'Cannot assign "%r": "%s.%s" must be a "%s" instance.' % (
value,
instance._meta.object_name,
self.related.get_accessor_name(),
self.related.related_model._meta.object_name,
)
)
else:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": the current database router prevents this relation.' % value)
related_pk = tuple(getattr(instance, field.attname) for field in self.related.field.foreign_related_fields)
# Set the value of the related field to the value of the related object's related field
for index, field in enumerate(self.related.field.local_related_fields):
setattr(value, field.attname, related_pk[index])
# Set the related instance cache used by __get__ to avoid a SQL query
# when accessing the attribute we just set.
setattr(instance, self.cache_name, value)
# Set the forward accessor cache on the related object to the current
# instance to avoid an extra SQL query if it's accessed later on.
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseManyToOneDescriptor(object):
"""
Accessor to the related objects manager on the reverse side of a
many-to-one relation.
In the example::
class Child(Model):
parent = ForeignKey(Parent, related_name='children')
``parent.children`` is a ``ReverseManyToOneDescriptor`` instance.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel):
self.rel = rel
self.field = rel.field
@cached_property
def related_manager_cls(self):
return create_reverse_many_to_one_manager(
self.rel.related_model._default_manager.__class__,
self.rel,
)
def __get__(self, instance, instance_type=None):
"""
Get the related objects through the reverse relation.
With the example above, when getting ``parent.children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``instance_type`` in the ``Parent`` class (we don't need it)
"""
if instance is None:
return self
return self.related_manager_cls(instance)
def __set__(self, instance, value):
"""
Set the related objects through the reverse relation.
With the example above, when setting ``parent.children = children``:
- ``self`` is the descriptor managing the ``children`` attribute
- ``instance`` is the ``parent`` instance
- ``value`` in the ``children`` sequence on the right of the equal sign
"""
manager = self.__get__(instance)
manager.set(value)
def create_reverse_many_to_one_manager(superclass, rel):
"""
Create a manager for the reverse side of a many-to-one relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-one relations.
"""
class RelatedManager(superclass):
def __init__(self, instance):
super(RelatedManager, self).__init__()
self.instance = instance
self.model = rel.related_model
self.field = rel.field
self.core_filters = {self.field.name: instance}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_reverse_many_to_one_manager(manager.__class__, rel)
return manager_class(self.instance)
do_not_call_in_templates = True
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.field.related_query_name()]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
empty_strings_as_null = connections[db].features.interprets_empty_strings_as_nulls
qs = super(RelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
qs = qs.filter(**self.core_filters)
for field in self.field.foreign_related_fields:
val = getattr(self.instance, field.attname)
if val is None or (val == '' and empty_strings_as_null):
return qs.none()
qs._known_related_objects = {self.field: {self.instance.pk: self.instance}}
return qs
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(RelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
rel_obj_attr = self.field.get_local_related_value
instance_attr = self.field.get_foreign_related_value
instances_dict = {instance_attr(inst): inst for inst in instances}
query = {'%s__in' % self.field.name: instances}
queryset = queryset.filter(**query)
# Since we just bypassed this class' get_queryset(), we must manage
# the reverse relation manually.
for rel_obj in queryset:
instance = instances_dict[rel_obj_attr(rel_obj)]
setattr(rel_obj, self.field.name, instance)
cache_name = self.field.related_query_name()
return queryset, rel_obj_attr, instance_attr, False, cache_name
def add(self, *objs, **kwargs):
bulk = kwargs.pop('bulk', True)
objs = list(objs)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj,
))
setattr(obj, self.field.name, self.instance)
if bulk:
pks = []
for obj in objs:
check_and_update_obj(obj)
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.field.name: self.instance,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.field.name] = self.instance
db = router.db_for_write(self.model, instance=self.instance)
return super(RelatedManager, self.db_manager(db)).update_or_create(**kwargs)
update_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel.field.null:
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
val = self.field.get_foreign_related_value(self.instance)
old_ids = set()
for obj in objs:
# Is obj actually part of this descriptor set?
if self.field.get_local_related_value(obj) == val:
old_ids.add(obj.pk)
else:
raise self.field.remote_field.model.DoesNotExist(
"%r is not related to %r." % (obj, self.instance)
)
self._clear(self.filter(pk__in=old_ids), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.update()` is intrinsically atomic.
queryset.update(**{self.field.name: None})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
setattr(obj, self.field.name, None)
obj.save(update_fields=[self.field.name])
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
if self.field.null:
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs, bulk=bulk)
self.add(*new_objs, bulk=bulk)
else:
self.add(*objs, bulk=bulk)
set.alters_data = True
return RelatedManager
class ManyToManyDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the forward and reverse sides of
a many-to-many relation.
In the example::
class Pizza(Model):
toppings = ManyToManyField(Topping, related_name='pizzas')
``pizza.toppings`` and ``topping.pizzas`` are ``ManyToManyDescriptor``
instances.
Most of the implementation is delegated to a dynamically defined manager
class built by ``create_forward_many_to_many_manager()`` defined below.
"""
def __init__(self, rel, reverse=False):
super(ManyToManyDescriptor, self).__init__(rel)
self.reverse = reverse
@property
def through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.rel.through
@cached_property
def related_manager_cls(self):
model = self.rel.related_model if self.reverse else self.rel.model
return create_forward_many_to_many_manager(
model._default_manager.__class__,
self.rel,
reverse=self.reverse,
)
def create_forward_many_to_many_manager(superclass, rel, reverse):
"""
Create a manager for the either side of a many-to-many relation.
This manager subclasses another manager, generally the default manager of
the related model, and adds behaviors specific to many-to-many relations.
"""
class ManyRelatedManager(superclass):
def __init__(self, instance=None):
super(ManyRelatedManager, self).__init__()
self.instance = instance
if not reverse:
self.model = rel.model
self.query_field_name = rel.field.related_query_name()
self.prefetch_cache_name = rel.field.name
self.source_field_name = rel.field.m2m_field_name()
self.target_field_name = rel.field.m2m_reverse_field_name()
self.symmetrical = rel.symmetrical
else:
self.model = rel.related_model
self.query_field_name = rel.field.name
self.prefetch_cache_name = rel.field.related_query_name()
self.source_field_name = rel.field.m2m_reverse_field_name()
self.target_field_name = rel.field.m2m_field_name()
self.symmetrical = False
self.through = rel.through
self.reverse = reverse
self.source_field = self.through._meta.get_field(self.source_field_name)
self.target_field = self.through._meta.get_field(self.target_field_name)
self.core_filters = {}
for lh_field, rh_field in self.source_field.related_fields:
core_filter_key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[core_filter_key] = getattr(instance, rh_field.attname)
self.related_val = self.source_field.get_foreign_related_value(instance)
if None in self.related_val:
raise ValueError('"%r" needs to have a value for field "%s" before '
'this many-to-many relationship can be used.' %
(instance, self.source_field_name))
# Even if this relation is not to pk, we require still pk value.
# The wish is that the instance has been already saved to DB,
# although having a pk value isn't a guarantee of that.
if instance.pk is None:
raise ValueError("%r instance needs to have a primary key value before "
"a many-to-many relationship can be used." %
instance.__class__.__name__)
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_forward_many_to_many_manager(manager.__class__, rel, reverse)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def _build_remove_filters(self, removed_vals):
filters = Q(**{self.source_field_name: self.related_val})
# No need to add a subquery condition if removed_vals is a QuerySet without
# filters.
removed_vals_filters = (not isinstance(removed_vals, QuerySet) or
removed_vals._has_filters())
if removed_vals_filters:
filters &= Q(**{'%s__in' % self.target_field_name: removed_vals})
if self.symmetrical:
symmetrical_filters = Q(**{self.target_field_name: self.related_val})
if removed_vals_filters:
symmetrical_filters &= Q(
**{'%s__in' % self.source_field_name: removed_vals})
filters |= symmetrical_filters
return filters
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
qs = super(ManyRelatedManager, self).get_queryset()
qs._add_hints(instance=self.instance)
if self._db:
qs = qs.using(self._db)
return qs._next_is_sticky().filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(ManyRelatedManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {'%s__in' % self.query_field_name: instances}
queryset = queryset._next_is_sticky().filter(**query)
# M2M: need to annotate the query in order to get the primary model
# that the secondary model was actually related to. We know that
# there will already be a join on the join table, so we can just add
# the select.
# For non-autocreated 'through' models, can't assume we are
# dealing with PK values.
fk = self.through._meta.get_field(self.source_field_name)
join_table = self.through._meta.db_table
connection = connections[queryset.db]
qn = connection.ops.quote_name
queryset = queryset.extra(select={
'_prefetch_related_val_%s' % f.attname:
'%s.%s' % (qn(join_table), qn(f.column)) for f in fk.local_related_fields})
return (
queryset,
lambda result: tuple(
getattr(result, '_prefetch_related_val_%s' % f.attname)
for f in fk.local_related_fields
),
lambda inst: tuple(
f.get_db_prep_value(getattr(inst, f.attname), connection)
for f in fk.foreign_related_fields
),
False,
self.prefetch_cache_name,
)
def add(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use add() on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use remove() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
self._remove_items(self.source_field_name, self.target_field_name, *objs)
remove.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
signals.m2m_changed.send(sender=self.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
filters = self._build_remove_filters(super(ManyRelatedManager, self).get_queryset().using(db))
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
clear.alters_data = True
def set(self, objs, **kwargs):
if not rel.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot set values on a ManyToManyField which specifies an "
"intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs)
else:
old_ids = set(self.using(db).values_list(self.target_field.target_field.attname, flat=True))
new_objs = []
for obj in objs:
fk_val = (self.target_field.get_foreign_related_value(obj)[0]
if isinstance(obj, self.model) else obj)
if fk_val in old_ids:
old_ids.remove(fk_val)
else:
new_objs.append(obj)
self.remove(*old_ids)
self.add(*new_objs)
set.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
"Cannot use create() on a ManyToManyField which specifies "
"an intermediary model. Use %s.%s's Manager instead." %
(opts.app_label, opts.object_name)
)
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = super(ManyRelatedManager, self.db_manager(db)).update_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
update_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK fieldname in join table for the source object
# target_field_name: the PK fieldname in join table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError(
'Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db)
)
fk_val = self.through._meta.get_field(
target_field_name).get_foreign_related_value(obj)[0]
if fk_val is None:
raise ValueError(
'Cannot add "%r": the value for field "%s" is None' %
(obj, target_field_name)
)
new_ids.add(fk_val)
elif isinstance(obj, Model):
raise TypeError(
"'%s' instance expected, got %r" %
(self.model._meta.object_name, obj)
)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = (self.through._default_manager.using(db)
.values_list(target_field_name, flat=True)
.filter(**{
source_field_name: self.related_val[0],
'%s__in' % target_field_name: new_ids,
}))
new_ids = new_ids - set(vals)
with transaction.atomic(using=db, savepoint=False):
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
self.through._default_manager.using(db).bulk_create([
self.through(**{
'%s_id' % source_field_name: self.related_val[0],
'%s_id' % target_field_name: obj_id,
})
for obj_id in new_ids
])
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=self.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_field_name: the PK colname in join table for the source object
# target_field_name: the PK colname in join table for the target object
# *objs - objects to remove
if not objs:
return
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
fk_val = self.target_field.get_foreign_related_value(obj)[0]
old_ids.add(fk_val)
else:
old_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
# Send a signal to the other end if need be.
signals.m2m_changed.send(sender=self.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
target_model_qs = super(ManyRelatedManager, self).get_queryset()
if target_model_qs._has_filters():
old_vals = target_model_qs.using(db).filter(**{
'%s__in' % self.target_field.target_field.attname: old_ids})
else:
old_vals = old_ids
filters = self._build_remove_filters(old_vals)
self.through._default_manager.using(db).filter(filters).delete()
signals.m2m_changed.send(sender=self.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
return ManyRelatedManager
|
yongshengwang/hue | refs/heads/master | desktop/core/ext-py/avro-1.7.6/build/lib/avro/txipc.py | 73 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from avro import ipc
from avro import io
from zope.interface import implements
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from twisted.internet.defer import maybeDeferred, Deferred
from twisted.web.iweb import IBodyProducer
from twisted.web import resource, server
from twisted.internet.protocol import Protocol
class TwistedRequestor(ipc.BaseRequestor):
"""A Twisted-compatible requestor. Returns a Deferred that will fire with the
returning value, instead of blocking until the request completes."""
def _process_handshake(self, call_response, message_name, request_datum):
# process the handshake and call response
buffer_decoder = io.BinaryDecoder(StringIO(call_response))
call_response_exists = self.read_handshake_response(buffer_decoder)
if call_response_exists:
return self.read_call_response(message_name, buffer_decoder)
else:
return self.request(message_name, request_datum)
def issue_request(self, call_request, message_name, request_datum):
d = self.transceiver.transceive(call_request)
d.addCallback(self._process_handshake, message_name, request_datum)
return d
class RequestStreamingProducer(object):
"""A streaming producer for issuing requests with the Twisted.web Agent."""
implements(IBodyProducer)
paused = False
stopped = False
started = False
def __init__(self, message):
self._message = message
self._length = len(message)
# We need a buffer length header for every buffer and an additional
# zero-length buffer as the message terminator
self._length += (self._length / ipc.BUFFER_SIZE + 2) \
* ipc.BUFFER_HEADER_LENGTH
self._total_bytes_sent = 0
self._deferred = Deferred()
# read-only properties
message = property(lambda self: self._message)
length = property(lambda self: self._length)
consumer = property(lambda self: self._consumer)
deferred = property(lambda self: self._deferred)
def _get_total_bytes_sent(self):
return self._total_bytes_sent
def _set_total_bytes_sent(self, bytes_sent):
self._total_bytes_sent = bytes_sent
total_bytes_sent = property(_get_total_bytes_sent, _set_total_bytes_sent)
def startProducing(self, consumer):
if self.started:
return
self.started = True
self._consumer = consumer
# Keep writing data to the consumer until we're finished,
# paused (pauseProducing()) or stopped (stopProducing())
while self.length - self.total_bytes_sent > 0 and \
not self.paused and not self.stopped:
self.write()
# self.write will fire this deferred once it has written
# the entire message to the consumer
return self.deferred
def resumeProducing(self):
self.paused = False
self.write(self)
def pauseProducing(self):
self.paused = True
def stopProducing(self):
self.stopped = True
def write(self):
if self.length - self.total_bytes_sent > ipc.BUFFER_SIZE:
buffer_length = ipc.BUFFER_SIZE
else:
buffer_length = self.length - self.total_bytes_sent
self.write_buffer(self.message[self.total_bytes_sent:
(self.total_bytes_sent + buffer_length)])
self.total_bytes_sent += buffer_length
# Make sure we wrote the entire message
if self.total_bytes_sent == self.length and not self.stopped:
self.stopProducing()
# A message is always terminated by a zero-length buffer.
self.write_buffer_length(0)
self.deferred.callback(None)
def write_buffer(self, chunk):
buffer_length = len(chunk)
self.write_buffer_length(buffer_length)
self.consumer.write(chunk)
def write_buffer_length(self, n):
self.consumer.write(ipc.BIG_ENDIAN_INT_STRUCT.pack(n))
class AvroProtocol(Protocol):
recvd = ''
done = False
def __init__(self, finished):
self.finished = finished
self.message = []
def dataReceived(self, data):
self.recvd = self.recvd + data
while len(self.recvd) >= ipc.BUFFER_HEADER_LENGTH:
buffer_length ,= ipc.BIG_ENDIAN_INT_STRUCT.unpack(
self.recvd[:ipc.BUFFER_HEADER_LENGTH])
if buffer_length == 0:
response = ''.join(self.message)
self.done = True
self.finished.callback(response)
break
if len(self.recvd) < buffer_length + ipc.BUFFER_HEADER_LENGTH:
break
buffer = self.recvd[ipc.BUFFER_HEADER_LENGTH:buffer_length + ipc.BUFFER_HEADER_LENGTH]
self.recvd = self.recvd[buffer_length + ipc.BUFFER_HEADER_LENGTH:]
self.message.append(buffer)
def connectionLost(self, reason):
if not self.done:
self.finished.errback(ipc.ConnectionClosedException("Reader read 0 bytes."))
class TwistedHTTPTransceiver(object):
"""This transceiver uses the Agent class present in Twisted.web >= 9.0
for issuing requests to the remote endpoint."""
def __init__(self, host, port, remote_name=None, reactor=None):
self.url = "http://%s:%d/" % (host, port)
if remote_name is None:
# There's no easy way to get this peer's remote address
# in Twisted so I use a random UUID to identify ourselves
import uuid
self.remote_name = uuid.uuid4()
if reactor is None:
from twisted.internet import reactor
self.agent = Agent(reactor)
def read_framed_message(self, response):
finished = Deferred()
response.deliverBody(AvroProtocol(finished))
return finished
def transceive(self, request):
req_method = 'POST'
req_headers = {
'Content-Type': ['avro/binary'],
'Accept-Encoding': ['identity'],
}
body_producer = RequestStreamingProducer(request)
d = self.agent.request(
req_method,
self.url,
headers=Headers(req_headers),
bodyProducer=body_producer)
return d.addCallback(self.read_framed_message)
class AvroResponderResource(resource.Resource):
"""This Twisted.web resource can be placed anywhere in a URL hierarchy
to provide an Avro endpoint. Different Avro protocols can be served
by the same web server as long as they are in different resources in
a URL hierarchy."""
isLeaf = True
def __init__(self, responder):
resource.Resource.__init__(self)
self.responder = responder
def cb_render_POST(self, resp_body, request):
request.setResponseCode(200)
request.setHeader('Content-Type', 'avro/binary')
resp_writer = ipc.FramedWriter(request)
resp_writer.write_framed_message(resp_body)
request.finish()
def render_POST(self, request):
# Unfortunately, Twisted.web doesn't support incoming
# streamed input yet, the whole payload must be kept in-memory
request.content.seek(0, 0)
call_request_reader = ipc.FramedReader(request.content)
call_request = call_request_reader.read_framed_message()
d = maybeDeferred(self.responder.respond, call_request)
d.addCallback(self.cb_render_POST, request)
return server.NOT_DONE_YET
|
Evervolv/android_external_chromium_org | refs/heads/kitkat | base/allocator/prep_libc.py | 30 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# This script takes libcmt.lib for VS2005/08/10 and removes the allocation
# related functions from it.
#
# Usage: prep_libc.py <VCLibDir> <OutputDir> <arch>
#
# VCLibDir is the path where VC is installed, something like:
# C:\Program Files\Microsoft Visual Studio 8\VC\lib
# OutputDir is the directory where the modified libcmt file should be stored.
# arch is either 'ia32' or 'x64'
import os
import shutil
import subprocess
import sys
def run(command, filter=None):
"""Run |command|, removing any lines that match |filter|. The filter is
to remove the echoing of input filename that 'lib' does."""
popen = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = popen.communicate()
for line in out.splitlines():
if filter and line.strip() != filter:
print line
return popen.returncode
def main():
bindir = 'SELF_X86'
objdir = 'INTEL'
vs_install_dir = sys.argv[1]
outdir = sys.argv[2]
if "x64" in sys.argv[3]:
bindir = 'SELF_64_amd64'
objdir = 'amd64'
vs_install_dir = os.path.join(vs_install_dir, 'amd64')
output_lib = os.path.join(outdir, 'libcmt.lib')
shutil.copyfile(os.path.join(vs_install_dir, 'libcmt.lib'), output_lib)
shutil.copyfile(os.path.join(vs_install_dir, 'libcmt.pdb'),
os.path.join(outdir, 'libcmt.pdb'))
vspaths = [
'build\\intel\\mt_obj\\',
'f:\\dd\\vctools\\crt_bld\\' + bindir + \
'\\crt\\src\\build\\' + objdir + '\\mt_obj\\',
'F:\\dd\\vctools\\crt_bld\\' + bindir + \
'\\crt\\src\\build\\' + objdir + '\\mt_obj\\nativec\\\\',
'F:\\dd\\vctools\\crt_bld\\' + bindir + \
'\\crt\\src\\build\\' + objdir + '\\mt_obj\\nativecpp\\\\' ]
objfiles = ['malloc', 'free', 'realloc', 'new', 'delete', 'new2', 'delete2',
'align', 'msize', 'heapinit', 'expand', 'heapchk', 'heapwalk',
'heapmin', 'sbheap', 'calloc', 'recalloc', 'calloc_impl',
'new_mode', 'newopnt', 'newaopnt']
for obj in objfiles:
for vspath in vspaths:
cmd = ('lib /nologo /ignore:4006,4014,4221 /remove:%s%s.obj %s' %
(vspath, obj, output_lib))
run(cmd, obj + '.obj')
if __name__ == "__main__":
sys.exit(main())
|
karroje/RAIDER_eval | refs/heads/master | redhawk.py | 1 | ############################################################################
# phRAIDER is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# phRAIDER is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with phRAIDER. If not, see <http:#www.gnu.org/licenses/>.
#
# Primary author: John Karro
# Contributing authors: Carly Schaefer, Jens Muller
############################################################################
############################################################################
# redhawk.py
# Class for submitting / managing pbs jobs throgh a python script
import random
import re, string, sys
import subprocess
import getopt
import time
import os
import os.path
import pickle
from argparse import *
import pwd
import datetime
import pickle
import tempfile
#import logging
epilogue_str = """#!/bin/sh
echo "Redhawk Epilogue Args:" >&2
echo "Job ID: $1" >&2
echo "User ID: $2" >&2
echo "Group ID: $3" >&2
echo "Job Name: $4" >&2
echo "Session ID: $5" >&2
echo "Resource List: $6" >&2
echo "Resources Used: $7" >&2
echo "Queue Name: $8" >&2
echo "Account String: $9" >&2
echo "" >&2
exit 0
"""
bad_ep_str = """
Redhawk Epilogue Args:
Resources Used: cput=00:00:00,mem=0kb,vmem=0kb,walltime=00:00:00
"""
############################################################
# Library-global variables
uid = os.getuid()
current_user = pwd.getpwuid(uid)[0] # When run on redhawk with a nohup, os.getlogin() does not work
try:
subprocess.check_call(["qstat > /dev/null"], shell = True)
except:
pbs_present = False
else:
pbs_present = True
job_list = set() # Global list of all jobs that have been submitted and not yet identified as having quit
job_limit = 200 if pbs_present else 2
redhawkStatsRe = re.compile("\s+C\s+[^\s]+\s*$")
redhawkInQueueRe = re.compile("\s+[R|Q]\s+[^\s]+\s*$")
# This contains the default parameter values for __init__. Doing it this way is no longer necessary,
# but its not worth taking out.
pbs_defaults = {'use_pid':True, 'job_name':None, 'nodes':1, 'ppn':1, 'mem':False, 'walltime':"04:00:00", 'address':None, 'join':False, 'env':None, 'queue':None, 'mail':None, 'output_location':None, 'chdir':None, 'RHmodules':None, 'file_limit':6, 'file_delay':5, 'epilogue_file':None}
############################################################
class PBSError(Exception):
"""Exception class for pbsJobHandler class"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
############################################################
class pbsJobHandler:
#logger = None
"""A pbsJobHandler corresponds to a job launched (or to be launched) on redhawk. Once the object is created (and provided with a command-line execution command),
the user can extract various inforamtion about the job (current status, output, etc...) and cleanup files."""
def __init__(self, batch_file, executable, use_pid = pbs_defaults['use_pid'], job_name = pbs_defaults['job_name'], nodes = pbs_defaults['nodes'], ppn = pbs_defaults['ppn'],
mem = pbs_defaults['mem'], walltime = pbs_defaults['walltime'], address = pbs_defaults['address'], join = pbs_defaults['join'], env = pbs_defaults['env'],
queue = pbs_defaults['queue'], mail = pbs_defaults['mail'], output_location = pbs_defaults['output_location'], chdir = pbs_defaults['chdir'],
RHmodules = pbs_defaults['RHmodules'], file_limit = pbs_defaults['file_limit'], file_delay = pbs_defaults['file_delay'], epilogue_file = pbs_defaults['epilogue_file'],
suppress_pbs = None, stdout_file = None, stderr_file = None, res_file = None, arch_type = None, always_outputs=True, depends=None):
"""Constructor. Requires a file name for the batch file, and the execution command. Optional parmeters include:
* use_pid: will embded a process id into the batch file name if true. Default = true.
* job_name: A name for the redhawk process. Default = the batch file name.
* nodes: number of nodes required for the job. Default = 1.
* ppn: number of processors needed for the job. Default = 1.
* mem: Using 128 Gb machine. Default = False -- don't use. Other values: 'redhawk' or 'oakley'.
* walltime: Maximum allowed runtime for the job (hours:minutes:seconds). Default = 40:00:00. Max. allowed: 400:00:00.
* mail = when to send email. Any combination of:
b send mail when job begins
e send mail when job ends
a send mail when job aborts
* address: additional email addresses to send notification (comma seperated)
* join: If true, the stdout and stderr files are combined into one file
* queue: redhawk queue to run on. Default: redhawk chooses.
* output_location: Directory to place output files. Default: current directory.
* RHmodules: A list of redhawk modules to be loaded before run (e.g. ['Blast+']). Default: none.
* depends: A list of pbs job objects on which this is dependent
* epilogue file: Script needed to track memory usage. Will overwrite any file of the same name. By default: <batch_file>.epilogue.sh"
* suppress_pbs: If true, this will run the job with a standard popen, instead of forking it out to the pbs job manager. (Included so we can run code
on other machines for testing.) Will still create all files that would have been created -- emulates redhawk execution as much as possible.
Not yes set up to extract resource usage. If false, will force an attempt to use the pbs job manager. By default: it will use pbs if
possible (specifically: if qstat can be run on the machine)
* stdout_file: File to receive stdout content. (Default: <job_name>.o<id>, in output_location directory if specified.)
* stdin_file: File to receive stderr content. (Default: <job_name>.e<id>, in output_location directory if specified.)
* res_file: File to hold resources. (Default: <job_name>.r<id>, in output_location directory if specified.)
* arch_type: Array if specific redhawk architecture to be used (n09, n11, bigmem)
* oakley_bigmem: Run on the big-mem server (oakley server only)
"""
if epilogue_file and "/" in epilogue_file:
raise PBSError("Bad epilogue file name: " + epilogue_file)
self.batch_file_name = batch_file
if use_pid:
self.batch_file_name = self.batch_file_name + "." + str(os.getpid())
self.cmd = executable
self.jobname = job_name if job_name else batch_file
self.file_limit = file_limit
self.file_delay = file_delay
self.resources = None
self.status = "unstarted"
self.suppress_pbs = not pbs_present if suppress_pbs is None else suppress_pbs
self.epilogue = os.getcwd() + "/" + "redhawk_epilogue.sh"
f = open(self.batch_file_name, 'w')
f.write("#!/bin/bash -l\n")
s="#PBS -N "+ self.jobname +"\n"
f.write(s)
self.has_split = False # Indicate whether the err file has ever been split (added for debugging)
#some defaults:
self.nodes = nodes
self.ppn = ppn
self.mem = mem
self.walltime = walltime
self.modules = RHmodules if not self.suppress_pbs else None
self.output_location = output_location if output_location else "."
self.arch_type = arch_type if arch_type else []
# if not pbsJobHandler.logger:
# pbsJobHandler.logger = logging.getLogger('reval.redhawk')
# pbsJobHandler.logger.setLevel(logging.DEBUG)
# lfp = self.output_location + '/redhawk.log'# + self.batch_file_name
# lfh = logging.FileHandler(lfp, mode='w')
# lfm = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s')
# lfh.setLevel(logging.DEBUG)
# lfh.setFormatter(lfm)
# pbsJobHandler.logger.addHandler(lfh)
#self.logger = pbsJobHandler.logger
self.preserve = False
self.always_outputs = always_outputs
self.is_timing = False
#self.logger.debug("Initializing new job with batch file name : " + self.batch_file_name)
self.depends = [p for p in depends if p] if depends else None
s="#PBS -N " + self.jobname + "\n"
s="#PBS -l nodes="+ str(self.nodes)+":ppn="+str(self.ppn)
if self.mem == 'redhawk':
s += ":m128"
if self.arch_type:
s += ":" + ":".join(arch_type)
s += "\n"
f.write(s)
if self.mem == 'oakley':
s="#PBS -l mem=192GB\n"
f.write(s)
s="#PBS -l walltime="+self.walltime+"\n"
f.write(s)
if self.depends and len(self.depends) > 0:
s = "#PBS -W depend=" + ",".join(["afterok:" + str(x.jobid) for x in self.depends]) + "\n"
f.write(s)
if stdout_file:
self.ofile = self.output_location + "/" + stdout_file
f.write("#PBS -o %s\n" % self.ofile)
else:
self.ofile = None
f.write("#PBS -o %s\n" % self.output_location)
if stderr_file:
self.efile = self.output_location + "/" + stderr_file
f.write("#PBS -e %s\n" % self.efile)
else:
self.efile = None
f.write("#PBS -e %s\n" % self.output_location)
if res_file:
self.rfile = res_file
if join:
f.write("#PBS -j oe\n")
if address:
s="#PBS -M "+address+"\n"
f.write(s)
if queue:
s="#PBS -q "+queue+"\n"
f.write(s)
if env:
f.write("#PBS -V\n")
if mail:
s="#PBS -m "+mail+"\n"
f.write(s)
if chdir:
s="cd "+chdir+"\n"
f.write(s)
else:
s="cd $PBS_O_WORKDIR\n";
f.write(s);
#if timing:
# self.timing = self.output_location + "/" + self.batch_file_name + ".timing"
# self.cmd = "/usr/bin/time -o " + self.timing + " -f \"%U\" " + self.cmd
#else:
# self.timing = None
if self.modules != None:
self.cmd = "; ".join(["module load " + x for x in self.modules]) + "; " + self.cmd
f.write(self.cmd)
f.close()
self.jobid=0
self.split = self.suppress_pbs # Set to true when the .e file gets split, unless we are not using pbs (in which case it is irrelevant)
if not os.path.isfile(self.epilogue):
open(self.epilogue, "w").write(epilogue_str)
subprocess.call("chmod 500 %s" % (self.epilogue), shell=True)
def resubmit_with_more_time(self, print_qsub=False, job_limit = job_limit, delay=10, user=current_user, new_walltime=None):
""" Resubmit job to queue with a new walltime. Since job is resubmitted, also need to delete old job from queue (as it is no longer important)"""
#self.logger.info("Job {j}:\tResubmitting with more time - {t}".format(j=self.jobid, t=new_walltime))
new_batch = self.batch_file_name + '_new'
#self.logger.debug("New batch file name: " + new_batch)
with open(self.batch_file_name) as f:
with open(new_batch, 'w') as f1:
for line in f:
if new_walltime and "walltime" in line:
s="#PBS -l walltime="+new_walltime+"\n"
f1.write(s)
self.walltime = new_walltime
else:
f1.write(line)
self.batch_file_name = new_batch
self.delete_job_from_queue(self.jobid)
return self.submit_timed_job(self.preserve, print_qsub, job_limit, delay, user, self.safety_margin)
def delete_job_from_queue(self, jobid):
""" Delete job from queue by its jobid. Wait until job it deleted and associated ofile/efile are removed"""
#self.logger.info("Deleting job with ID {j} from queue".format(j=jobid))
retry=60
always_retry=600
sleepTimeBetweenRetry=10
trial=1;
cmd = "qdel " + jobid #optionalFlag + " " + self.batch_file_name
while (trial < retry):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Sleep to ensure the prh file is created before termination
(output, error) = [x.decode() for x in p.communicate()]
if p.returncode == 0:
break
trial = trial + 1
if trial == retry:
return -1
#if self.always_outputs:
trial = 1
while not(self.efile_exists() and self.ofile_exists()):
if (not self.always_outputs and trial >= retry) or trial >= always_retry:
break
trial = trial + 1
time.sleep(sleepTimeBetweenRetry)
pass
#else:
# if(self.efile_exists() or self.ofile_exists()):
# time.sleep(5)
# self.erase_files()
self.erase_files()
#self.ofile = None
#self.efile = None
return self
### submitjob
### Parameters:
### file is the job script file
### if preserve is True, don't delete the job script. Delete otherwise
### optionalFlag is the flag after qsub
### retry (default set to retry 600 times), the number of times, the job will be submitted in retry
### seconds between retry (default is 10 seconds)
### return job id if successful
### return -1 if not
def submit(self, preserve=False, print_qsub = False, job_limit = job_limit, delay=10, user=current_user):
"""Submit job to redhawk. Optional parameters:
* preserve: if False, delete the batch file. Default = true.
* job_limit: If the user currently has this many jobs on the batch, wait until one finishes.
"""
if self.suppress_pbs: # We are not using the pbs job handler -- direct call to Popen
self.jobid = random.randint(1000000, 9999999)
if not self.ofile:
self.ofile = self.output_location + "/" + self.jobname + ".o" + str(self.jobid)
if not self.efile:
self.efile = self.output_location + "/" + self.jobname + ".e" + str(self.jobid)
if not self.rfile:
self.rfile = self.output_location + "/" + self.jobname + ".r" + str(self.jobid)
if not re.search("[^2]\>", self.cmd):
self.cmd += " > {ofile}".format(ofile = self.ofile)
if not re.search("2\>", self.cmd):
self.cmd += " 2> {efile}".format(efile = self.efile)
self.p = subprocess.Popen(self.cmd, shell=True)
else:
if job_limit > 0:
limit_jobs(limit=job_limit, delay=delay, user=user, use_pbs = not self.suppress_pbs)
optionalFlag= '-l epilogue=' + self.epilogue
retry=600
sleepTimeBetweenRetry=10
trial=1;
cmd = "qsub " + optionalFlag + " " + self.batch_file_name
while (trial < retry):
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Sleep to ensure the prh file is created before termination
(output, error) = [x.decode() for x in p.communicate()]
if p.returncode == 0:
break
trial = trial + 1
if trial == retry:
return -1
t=re.split('\.',output)
self.jobid=t[0]
#self.logger.info("Job id: " + str(self.jobid))
#self.logger.info("Job {j}:\t Submitted".format(j=self.jobid))
if not self.preserve and not preserve:
os.remove(self.batch_file_name)
if not self.ofile:
self.ofile = self.output_location + "/" + self.jobname + ".o" + str(self.jobid)
if not self.efile:
self.efile = self.output_location + "/" + self.jobname + ".e" + str(self.jobid)
if print_qsub:
if self.suppress_pbs:
print("Popen: jobid: {jobid}, process id: {pid}".format(jobid = self.jobid, pid = self.p.pid))
else:
print("PBS: jobid: {jobid}".format(jobid = self.jobid))
self.status = "running"
job_list.add(self)
return self
def submit_timed_job(self, preserve=False, print_qsub = False, job_limit = job_limit, delay=10, user=current_user, safety_margin=None):
"""Submit timed job to redhawk. Same as submit_job, but sets the timing related data fields. Additional optional parameters:
* safety_margin: Can have user define the amount of extra time necessary when aborting a job"""
#self.logger.info("Submitting timed job")
#self.logger.info("Job {j}:\tChecking isJobRunning".format(j=self.jobid))
self.start_time = time.time()
#self.logger.debug("Start time:\t{t}".format(t = self.start_time))
self.safety_margin = safety_margin if safety_margin else self.parse_redhawk_time() / 4
#self.logger.debug("Safety margin:\t{t}".format(t = self.safety_margin))
#self.logger.debug("Redhawk time:\t{t}".format(t = self.parse_redhawk_time()))
self.is_timing = True
self.preserve=preserve
ret = self.submit(preserve, print_qsub, job_limit, delay, user)
return ret
def submitjob(self, preserve=False, print_qsub = False, job_limit = job_limit, delay=10, user=current_user ):
"""Depricated: replaced with submit()"""
return self.submit(preserve, print_qsub, job_limit, delay, user)
def checkJobState(self):
"""Return job state of job, or NULL if there is none"""
cmd = "qstat -f {jobID}".format(jobID=self.jobid)
output,error = [x.decode() for x in subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()]
try:
return re.search("job_state\s+=\s+(\d+)", output).group(1)
except:
return None
### isJobRunning
### This is primarily useful for waiting for a _submitted_ job to finish
### return False if the job is done, completed for sure
### return True if the job is in Q, R states [ or that PBS/Torque is not available ]
### Prereq is jobid must be a submitted job
def isJobRunning(self, numTrials = 3, delay = 5, increase_amount=1.5):
"""Query of the object represented by the job is still running."""
#self.logger.info("Job {j}:\tChecking isJobRunning".format(j=self.jobid))
if self.status == "finished":
#self.logger.debug("Job {j}:\tFinished. No longer running.".format(j=self.jobid))
return False
if self.suppress_pbs:
if not self.p.poll():
return True
else:
self.status = "finished"
job_list.remove(self)
return False
# If we are using pbs...
while True:
if self.is_timing and self.running_out_of_time():
# if running out of time, resubmit job with new walltime = prev_walltime*increase_amount
#self.logger.debug("Job {j}:\tRunning out of time. Resubmit.".format(j=self.jobid))
new_wt = self.make_redhawk_time(self.parse_redhawk_time()*increase_amount)
self.resubmit_with_more_time(new_walltime=new_wt)
#return True#return False
if self.ofile_exists() or self.efile_exists(): #output.find(magicString) >=0 or redhawkStatsRe.search(output):
#self.logger.debug("Job {j}:\tOfile exists?\t{exo}.\tEfile exists?\t{exe}.".format(j=self.jobid, exo=self.ofile_exists(), exe=self.efile_exists()))
while not(self.efile_exists() and self.ofile_exists()):
pass
self.status = "finished"
#self.logger.debug("Job {j}:\tFinished. No longer running.".format(j=self.jobid))
if self in job_list:
job_list.remove(self)
return False
cmd = "qstat " + str(self.jobid)
magicString = 'Unknown Job ID'
(output, error) = [x.decode() for x in subprocess.Popen(cmd.split(" "), stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()]
if redhawkInQueueRe.search(output):
#self.logger.debug("Job {j}:\tStill running.".format(j=self.jobid))
return True
#time.sleep(delay)
time.sleep(delay)
raise PBSError("RedHawk error: out of queue, no output file. OFILE: %s" % (self.ofile))
def wait(self, delay=10):
"""Spin until job completes.
"""
if self.suppress_pbs:
self.p.wait()
self.status = "finished"
else:
while self.isJobRunning() == True:
time.sleep(delay)
return self.ofile_exists()
def wait_on_job(self, delay=10):
"""Depricated: replace with wait"""
return self.wait(delay)
def timed_wait(self, delay=10, increase_amount=1.5):
"""CS: Spin until job completes OR is running out of time and should resubmit."""
#self.logger.info("Job {j}:\tPerforming timed weight".format(j=self.jobid))
if self.suppress_pbs: #CS: not sure what this is for, leaving it in because seems harmless
self.p.wait()
self.status = "finished"
else:
while self.isJobRunning(increase_amount=increase_amount) == True:
#if not self.running_out_of_time():
time.sleep(delay)
#else:
# new_wt = self.make_redhawk_time(self.parse_redhawk_time()*increase_amount)
# self.resubmit_with_more_time(new_walltime=new_wt)
# return False
return self.ofile_exists()
def running_out_of_time(self):
"""Determine whether job is running out of time (walltime - time_elapsed - safety_time)"""
if not self.is_timing:
return False
t_elapsed = time.time() - self.start_time
time_left = self.parse_redhawk_time() - self.safety_margin - (t_elapsed)# - self.parse_redhawk_time() - self.safety_margin
#self.logger.debug("Job {j}:\tRedhawk:\t{redt}\t\tElapsed:\t{elapsedt}\t\tTime Left:\t{leftt}".format(j = self.jobid, redt=self.parse_redhawk_time(), elapsedt=t_elapsed, leftt=time_left))
return time_left < 0
def parse_redhawk_time(self):
"""CS: Parse time limit string for redhawk (format HH:MM:SS) into seconds amount"""
secs = sum(int(x) * 60 ** i for i,x in enumerate(reversed(self.walltime.split(":"))))
#print(time_str, '/t', secs)
return secs
def make_redhawk_time(self, secs):
return time.strftime("%H:%M:%S", time.gmtime(secs))
def ofile_name(self):
"""Get the name of the file containing the job stdio output."""
return self.ofile
def efile_name(self):
"""Get the name of the file containing the job stderr output."""
return self.efile
def rfile_name(self):
"""Get the name of the file containing the rfile output."""
return self.rfile
#def timing_name(self):
# """Get the name of the timing file."""
# return self.timing
def ofile_exists(self):
"""Does the file contiining the job stdout output exist?"""
return os.path.isfile(self.ofile)
def efile_exists(self):
"""Does the file containing the job stderr output exist?"""
return os.path.isfile(self.efile)
def rfile_exists(self):
"""Does the file containing the job stderr output exist?"""
return os.path.isfile(self.rfile)
def ofile_handle(self):
"""Return a handle to the file containing the job stdout output."""
if not self.status == "finished":
raise NameError("redhawk: unfinished ofile check")
tries = 0
while not self.ofile_exists() and tries < self.file_limit:
time.sleep(self.file_delay)
tries = tries+1
if os.path.isfile(self.ofile_name()):
return open(self.ofile_name(), "r")
raise NameError("redhawk: unfound ofile")
def efile_handle(self):
"""Return a handle to the file containing the job stderr output."""
if not self.status == "finished":
raise NameError("redhawk: unfinished efile check")
tries = 0
while not self.efile_exists() and tries < self.file_limit:
time.sleep(self.file_delay)
tries = tries+1
if os.path.isfile(self.efile_name()):
return open(self.efile_name(), "r")
raise NameError("redhawk: unfinished efile check")
def rfile_handle(self):
"""Return a handle to the file containing the resource description."""
if self.suppress_pbs:
raise PBSError("Cannot check resource file on a pbs-suppressed job")
self.split_efile()
return open(self.rfile)
def ofile_string(self):
"""Return the entire contents of the stdout file as a single string."""
fp = self.ofile_handle()
if (fp):
return "\n".join([line.rstrip() for line in fp]) + '\n'
return None
def efile_string(self):
"""Return the entire contents of the stderr file as a single string."""
fp = self.efile_handle()
if (fp):
return "\n".join([line.rstrip() for line in fp]) + '\n'
return None
# def get_timing(self, delete_timing = True):
# """Get the time-generated user runtime and delete the timing file.
# (Assumed to be the last line of the timing file.)
# By default, erases the timing file."""
# if not self.timing:
# return None
# else:
# try:
# with open(self.timing) as fp:
# lines = fp.readlines()
# if delete_timing:
# os.remove(self.timing)
# return float(lines[-1])
# except:
# sys.stderr.write("Redhawk.runtime: invalid runtime (%s)\n" % (self.timing))
# sys.exit(1)
# return None
def erase_files(self, empty_only = False):
"""Erase the stdio and stderr files."""
try:
if not empty_only or os.path.getsize(self.ofile_name()) == 0:
os.remove(self.ofile_name())
#while self.ofile_exists():
# time.sleep()
except:
pass
try:
if not empty_only or os.path.getsize(self.efile_name()) == 0:
os.remove(self.efile_name())
#while self.efile_exists():
# time.sleep()
except:
pass
try:
if not empty_only or path.getsize(self.rfile_name()) == 0:
os.remove(self.rfile_name())
except:
pass
return None
def get_results(self, resources = False, cleanup=True):
"""Retrieve strings and cleanup."""
if resources and self.suppress_job:
raise PBSError("Cannot get resources on a pbs-suppressed job")
self.wait()
self.split_efile()
stdout_str = self.ofile_string()
stderr_str = self.efile_string()
if resources:
T = self.getResources()
if cleanup:
self.erase_files()
return (stdout_str, stderr_str, T) if resources else (stdout_str, stderr_str)
def getResults(self, cleanup=True):
"""Legacy"""
return self.get_results(cleanup)
def loadResources(self):
if not self.resources:
self.wait()
if self.suppress_pbs:
self.resources = [-1, -1, -1, -1]
else:
try:
fp = self.rfile_handle()
except:
self.resources = (-1, -1, -1, -1)
return None
for line in fp:
if line.startswith("Resources Used:"):
r = re.search("cput=(\d+):(\d+):(\d+),mem=(\d+)kb,vmem=(\d+)kb,walltime=(\d+):(\d+):(\d+)", line)
if not r:
raise PBSError("Bad resource line: " + line)
cpu_time = 60*int(r.group(1)) + 3600*int(r.group(2)) + int(r.group(3))
wall_time = 60*int(r.group(6)) + 3600*int(r.group(7)) + int(r.group(8))
memory = 1024*int(r.group(4))
vmemory = 1024*int(r.group(5))
self.resources = (cpu_time, wall_time, memory, vmemory);
break
def getResources(self, cleanup=True):
"""Return cpu_time, wall_time, memory, and virtual memory used"""
self.loadResources()
return self.resources
def cpu_time(self):
if self.suppress_pbs:
raise PBSError("Cannot get resources on a pbs-suppressed job")
self.loadResources()
return self.resources[0]
def memory(self):
if self.suppress_pbs:
raise PBSError("Cannot get resources on a pbs-suppressed job")
self.loadResources()
return self.resources[2]
def vmemory(self):
if self.suppress_pbs:
raise PBSError("Cannot get resources on a pbs-suppressed job")
self.loadResources()
return self.resources[3]
def wait_on_job_limit(self, limit=200, delay=10, user=current_user):
"""Depricated: use the stand-alone function job_limit."""
sys.stderr.write("pbsJobHandler: wait_on_job_limit method is depricated; please switch to the stand-alone job_limit function.")
limit_jobs(self, limit, delay, user = user, use_pbs = not self.suppress_pbs)
def split_efile(self):
"""Split the .e<id> file into a .e<id> and .r<id> file"""
self.has_split = True
if not self.split:
if not self.efile_exists():
raise PBSError("Call to split_efile() when efile doesn't exist (%s, %s)" % (self.cmd, self.efile))
self.split = True
with open(self.efile) as fp: line = "".join([line for line in fp])
count = 0
r = None
while r is None:
r = re.search("^(.*)Redhawk Epilogue Args:\s*(.+)$", line, re.DOTALL)
if not r:
count += 1
if count == 1:
break
time.sleep(5)
if r:
first, second = r.group(1,2)
with open(self.efile, "w") as fp: fp.write(first)
with open(self.rfile, "w") as fp: fp.write(second)
else:
with open(self.rfile, "w") as fp: fp.write(bad_ep_str + "\n")
### Hold until the user has < limit jobs in the circulation
def limit_jobs(limit=job_limit, delay=10, use_pbs=pbs_present, user=current_user):
"""Spin until the user has less < limit jobs in circulation.
limit = 0 signals no limit."""
if limit == 0:
return None
while 1==1:
numJobsInQueue = get_number_of_jobs_in_queue(current_user, use_pbs)
if (numJobsInQueue < limit):
return None
time.sleep(delay)
### return the number of jobs in queue, whatever the state is
def get_number_of_jobs_in_queue(user=current_user, using_pbs = pbs_present):
"""Get the number of user jobs currently sitting in the queue or running. Not yet designed to work with pbs suppression."""
global job_list
if not using_pbs:
job_list = [j for j in job_list if j.isJobRunning()]
return len(job_list)
cmd = "qstat -u "+user + " 2>/dev/null | grep " + user
output,error = [x.decode() for x in subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()]
return len([True for line in output.split("\n") if line and not redhawkStatsRe.search(line)])
def storePBS(pbsList, fp):
"""Pickle and save a PBS job to a specified file pointer."""
pickle.dump(pbsList, fp)
def loadPBS(fp):
"""Recover a list of pickled jobs from a specified file pointer."""
return pickle.load(fp)
def relaunch(args = sys.argv, force = False, walltime = "40:00:00", python = "python"):
"""Detects whether program is being run on the head node. If so, relaunch identical program on a compute node and quit."""
o, e = [x.decode() for x in subprocess.Popen(["hostname"], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()]
if force or re.match("mualhpcp", o):
o = pbsJobHandler(batch_file = "relaunch" + str(os.getpid()), executable = python + " " + " ".join(args), walltime=walltime)
o, e = o.submit().getResults()
sys.stdout.write("STDOUT: " + o)
sys.stderr.write("STDERR: " + e)
return True
return False
############## Allow for a direct launch of a program
if __name__ == "__main__":
parser = ArgumentParser(description='Launch a redhawk job.')
parser.add_argument('command', action = "store", type = str, nargs = '+')
term = parser.add_argument_group("Input/Output switches")
term.add_argument('--wait', action = "store_true", dest = "wait", help="wait on job completion", default = False)
term.add_argument('-o', '--output', action = "store", dest = "output", help = "output file (stdout by default)", default = None)
term.add_argument('-e', '--error', action = "store", dest = "error", help = "error file (stderr by default)", default = None)
term.add_argument('-r', '--resources', action = "store", dest = "resources", help = "resource file (same as output by default)", default = None)
term.add_argument('-S', '--suppress_output', action = "store_true", dest = "suppress", help="Quit without output", default = False)
settings = parser.add_argument_group("Main job-related settings")
settings.add_argument('-c', '--create', action = "store_true", dest = "create", help = "Create the batch file and quit.", default = False)
settings.add_argument('-n', '--nodes', action = "store", type = int, dest = "nodes", help = "Number of nodes", default = 1)
settings.add_argument('-p', '--ppn', action = "store", type = int, dest = "ppn", help = "Number of processors per node", default = 1)
settings.add_argument('--big_mem', '--mem', action = "store_true", dest = "mem", help = "Use 128Gb machine", default = False)
settings.add_argument('-w', '--walltime', action = "store", type = str, dest = "walltime", help = "Reserved walltime", default = "10:00:00")
settings.add_argument('-m', '--modules', action = "store", type = str, nargs = "+", dest = "RHmodules", help = "required redhawk modules", default = None)
settings.add_argument('-O', '--output_location', action = "store", type = str, dest = "output_location", help = "Output location", default = None)
settings.add_argument('-d', '--dir', action = "store", type = str, dest = "target_directory", help = "target directory", default = None)
settings2 = parser.add_argument_group("Less important job-related settings")
settings2.add_argument('-b', '--batch', action = "store", type = str, dest = "batch", help="Batch file name", default = "redhawk_run")
settings2.add_argument('-P', '--pid_off', action = "store_false", dest = "pid", help = "Suppress use of pid in file names", default = True)
#settings2.add_argument('-T', '--time_off', action = "store_false", dest = "timing", help = "Suppress runtime reporting", default = True)
settings2.add_argument('-R', '--resources_off', action = "store_false", dest = "print_resources", help = "Suppress resource usage reporting", default = True)
settings2.add_argument('-K', '--keep_files', action = "store_true", dest = "keep", help = "Keep files generated", default = False)
settings2.add_argument('-j', '--job_name', dest = "job_name", help = "Redhawk job name", default = None)
#term = parser.add_argument_group("Alternative jobs (internal option -- not intended for users)")
#term.add_argument('--post_process', action = 'store_true', dest = 'post_process', help="Process the result", default = False)
args = parser.parse_args()
#if args.post_process:
# post_process(args.command[0])
# sys.exit(0)
p = pbsJobHandler(batch_file = args.batch, executable = " ".join(args.command), job_name = args.job_name, use_pid = args.pid, nodes = args.nodes, ppn = args.ppn, mem = args.mem,
walltime = args.walltime, output_location = args.output_location, chdir = args.target_directory, RHmodules = args.RHmodules)
if args.create:
sys.exit(0)
o = p.submit(preserve = args.keep)
if args.suppress:
sys.exit(0)
o.wait()
ofp = sys.stdout if not args.output or args.output == '-' else open(args.output, "w")
efp = sys.stderr if not args.error or args.error == '-' else open(args.error, "w")
rfp = ofp if not args.resources else (sys.stdout if args.resources == '-' else open(args.resources, "w"))
out, err = o.get_results(cleanup = not args.keep)
ofp.write(out)
ofp.write(err)
#t = o.get_timing(delete_timing = not args.keep)
#if args.timing:
# rfp.write("Time: " + str(t) + "\n")
if args.print_resources:
A = o.getResources()
rfp.write("CPU Time: %d\n" % (A[0]))
rfp.write("Wall time: %d\n" % (A[1]))
rfp.write("Memory: %d\n" % (A[2]))
rfp.write("Vmemory: %d\n" % (A[3]))
################################
### Sample code: Running a single job
### exe = "ls *" # The command we want to run (to run multiple commands, seperate with semi-colons)
### o = pbsJobHandler(batch_file = "batch.txt", executable = exe, mail = "bea"); # Set up the job
### o.submit() # Submit the job to a redhawk queue
### if o.isJobRunning(): print "yes" # Check to see if job is still in the queue or running
### o.wait() # "spin" until job is finished
### output_string = o.ofile_string() # Get the ouput
### o.erase_files() # Erase the output files
|
bmanojlovic/ansible | refs/heads/devel | lib/ansible/modules/cloud/vmware/vmware_vmkernel.py | 48 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: vmware_vmkernel
short_description: Create a VMware VMkernel Interface
description:
- Create a VMware VMkernel Interface
version_added: 2.0
author: "Joseph Callen (@jcpowermac), Russell Teague (@mtnbikenc)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
vswitch_name:
description:
- The name of the vswitch where to add the VMK interface
required: True
portgroup_name:
description:
- The name of the portgroup for the VMK interface
required: True
ip_address:
description:
- The IP Address for the VMK interface
required: True
subnet_mask:
description:
- The Subnet Mask for the VMK interface
required: True
vland_id:
description:
- The VLAN ID for the VMK interface
required: True
mtu:
description:
- The MTU for the VMK interface
required: False
enable_vsan:
description:
- Enable the VMK interface for VSAN traffic
required: False
enable_vmotion:
description:
- Enable the VMK interface for vMotion traffic
required: False
enable_mgmt:
description:
- Enable the VMK interface for Management traffic
required: False
enable_ft:
description:
- Enable the VMK interface for Fault Tolerance traffic
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example command from Ansible Playbook
- name: Add Management vmkernel port (vmk1)
local_action:
module: vmware_vmkernel
hostname: esxi_hostname
username: esxi_username
password: esxi_password
vswitch_name: vswitch_name
portgroup_name: portgroup_name
vlan_id: vlan_id
ip_address: ip_address
subnet_mask: subnet_mask
enable_mgmt: True
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def create_vmkernel_adapter(host_system, port_group_name,
vlan_id, vswitch_name,
ip_address, subnet_mask,
mtu, enable_vsan, enable_vmotion, enable_mgmt, enable_ft):
host_config_manager = host_system.configManager
host_network_system = host_config_manager.networkSystem
host_virtual_vic_manager = host_config_manager.virtualNicManager
config = vim.host.NetworkConfig()
config.portgroup = [vim.host.PortGroup.Config()]
config.portgroup[0].changeOperation = "add"
config.portgroup[0].spec = vim.host.PortGroup.Specification()
config.portgroup[0].spec.name = port_group_name
config.portgroup[0].spec.vlanId = vlan_id
config.portgroup[0].spec.vswitchName = vswitch_name
config.portgroup[0].spec.policy = vim.host.NetworkPolicy()
config.vnic = [vim.host.VirtualNic.Config()]
config.vnic[0].changeOperation = "add"
config.vnic[0].portgroup = port_group_name
config.vnic[0].spec = vim.host.VirtualNic.Specification()
config.vnic[0].spec.ip = vim.host.IpConfig()
config.vnic[0].spec.ip.dhcp = False
config.vnic[0].spec.ip.ipAddress = ip_address
config.vnic[0].spec.ip.subnetMask = subnet_mask
if mtu:
config.vnic[0].spec.mtu = mtu
host_network_config_result = host_network_system.UpdateNetworkConfig(config, "modify")
for vnic_device in host_network_config_result.vnicDevice:
if enable_vsan:
vsan_system = host_config_manager.vsanSystem
vsan_config = vim.vsan.host.ConfigInfo()
vsan_config.networkInfo = vim.vsan.host.ConfigInfo.NetworkInfo()
vsan_config.networkInfo.port = [vim.vsan.host.ConfigInfo.NetworkInfo.PortConfig()]
vsan_config.networkInfo.port[0].device = vnic_device
host_vsan_config_result = vsan_system.UpdateVsan_Task(vsan_config)
if enable_vmotion:
host_virtual_vic_manager.SelectVnicForNicType("vmotion", vnic_device)
if enable_mgmt:
host_virtual_vic_manager.SelectVnicForNicType("management", vnic_device)
if enable_ft:
host_virtual_vic_manager.SelectVnicForNicType("faultToleranceLogging", vnic_device)
return True
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(portgroup_name=dict(required=True, type='str'),
ip_address=dict(required=True, type='str'),
subnet_mask=dict(required=True, type='str'),
mtu=dict(required=False, type='int'),
enable_vsan=dict(required=False, type='bool'),
enable_vmotion=dict(required=False, type='bool'),
enable_mgmt=dict(required=False, type='bool'),
enable_ft=dict(required=False, type='bool'),
vswitch_name=dict(required=True, type='str'),
vlan_id=dict(required=True, type='int')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
port_group_name = module.params['portgroup_name']
ip_address = module.params['ip_address']
subnet_mask = module.params['subnet_mask']
mtu = module.params['mtu']
enable_vsan = module.params['enable_vsan']
enable_vmotion = module.params['enable_vmotion']
enable_mgmt = module.params['enable_mgmt']
enable_ft = module.params['enable_ft']
vswitch_name = module.params['vswitch_name']
vlan_id = module.params['vlan_id']
try:
content = connect_to_api(module)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed = create_vmkernel_adapter(host_system, port_group_name,
vlan_id, vswitch_name,
ip_address, subnet_mask,
mtu, enable_vsan, enable_vmotion, enable_mgmt, enable_ft)
module.exit_json(changed=changed)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
remik/django-page-cms | refs/heads/master | pages/tests/test_selenium.py | 1 | # -*- coding: utf-8 -*-
"""Django page CMS selemium test module"""
from pages.models import Page
from pages.tests.testcase import TestCase
from pages import settings
from django.core.urlresolvers import reverse
from django.test import LiveServerTestCase
from django.contrib import auth
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
screenshot_nb = 1
class SeleniumTestCase(TestCase, LiveServerTestCase):
def setUp(self):
self.browser = webdriver.PhantomJS()
client = self.get_admin_client()
# setUp is where you instantiate the selenium webdriver and loads the browser.
auth.models.User.objects.create_superuser(
username='admin_s',
password='admin',
email='admin_s@example.com'
)
self.browser.get('%s%s' % (self.live_server_url, reverse("admin:index")))
super(SeleniumTestCase, self).setUp()
def screenshot(self):
global screenshot_nb
if settings.PAGE_TESTS_SAVE_SCREENSHOTS:
self.browser.save_screenshot('screenshot_%d.png' % screenshot_nb)
screenshot_nb += 1
def select_option(self, select, option_id):
for option in select.find_elements_by_tag_name('option'):
if option.get_attribute('value') == str(option_id):
option.click()
def visit(self, url):
# Open the django admin page.
# DjangoLiveServerTestCase provides a live server url attribute
# to access the base url in tests
url = '%s%s' % (self.live_server_url, url)
self.browser.get(url)
def click(self, selector):
return self.browser.find_element_by_css_selector(selector).click()
def wait(self, id):
return WebDriverWait(self.browser, 3).until(
EC.presence_of_element_located((By.ID, id)))
def login(self):
self.visit(reverse("admin:index"))
# Fill login information of admin
username = self.browser.find_element_by_id("id_username")
username.send_keys("admin_s")
password = self.browser.find_element_by_id("id_password")
password.send_keys("admin")
self.click("input[type='submit']")
def tearDown(self):
self.browser.quit()
super(SeleniumTestCase, self).tearDown()
def url_change(self, id):
return reverse('admin:pages_page_change', args=[id])
def test_admin_select(self):
self.login()
page = self.new_page()
self.visit(self.url_change(page.id))
status = self.browser.find_element_by_id('id_status')
self.assertEqual(status.get_attribute('value'), str(page.status))
self.select_option(status, str(Page.DRAFT))
self.assertEqual(status.get_attribute('value'), str(Page.DRAFT))
src = self.browser.find_element_by_css_selector('.status'
).find_element_by_tag_name('img'
).get_attribute('src')
self.assertTrue(src.endswith('draft.gif'))
def test_admin_move_page(self):
self.login()
page_1 = self.new_page({'slug':'p1'})
page_2 = self.new_page({'slug':'p2'})
self.visit(reverse('admin:pages_page_changelist'))
h1 = self.browser.find_element_by_css_selector('#content h1')
self.assertEqual(h1.text, 'Select page to change')
rows = self.browser.find_elements_by_css_selector('#page-list tbody tr')
row_1 = rows[0]
row_2 = rows[1]
self.assertEqual(row_1.get_attribute('id'), 'page-row-%d' % page_1.id)
self.assertEqual(row_2.get_attribute('id'), 'page-row-%d' % page_2.id)
page_3 = self.new_page({'slug':'p3'})
self.click('#move-link-%d' % page_2.id)
self.click('#move-target-%d .move-target.left' % page_1.id)
self.wait('page-row-%d' % page_3.id)
rows = self.browser.find_elements_by_css_selector('#page-list tbody tr')
row_1 = rows[0]
row_2 = rows[1]
row_3 = rows[2]
self.screenshot()
self.assertEqual(row_1.get_attribute('id'), 'page-row-%d' % page_2.id)
self.assertEqual(row_2.get_attribute('id'), 'page-row-%d' % page_1.id)
self.assertEqual(row_3.get_attribute('id'), 'page-row-%d' % page_3.id)
|
ToToL/libprelude | refs/heads/master | bindings/tests/load.py | 4 | #!/usr/bin/python
import sys
sys.path.append('.')
sys.path.append('./.libs')
try:
import PreludeEasy
except Exception,e:
print "Import failed: ",e
print "Try 'cd ./.libs && ln -s libprelude_python.so _PreludeEasy.so'"
sys.exit(1)
idmef = PreludeEasy.IDMEF()
idmef.ReadFromFile("foo.bin")
idmef.PrintToStdout()
|
mdanielwork/intellij-community | refs/heads/master | python/testData/postfix/while/topLevel.py | 39 | True.while<caret> |
2013Commons/HUE-SHARK | refs/heads/master | apps/hbase/src/hbase/conf.py | 2 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Configuration options for the hbase application.
"""
import re
from desktop.lib.conf import Config
HBASE_CLUSTERS = Config(
key="hbase_clusters",
default="(Cluster|localhost:9090)",
help="Comma-separated list of HBase Thrift servers for clusters in the format of '(name|host:port)'.",
type=str)
TRUNCATE_LIMIT = Config(
key="truncate_limit",
default="500",
help="Hard limit of rows or columns per row fetched before truncating.",
type=int) |
utkbansal/kuma | refs/heads/master | kuma/users/migrations/0003_auto_20150722_1242.py | 8 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
import kuma.core.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0001_initial'),
('users', '0002_auto_20150722_1240'),
]
operations = [
migrations.AddField(
model_name='user',
name='bio',
field=models.TextField(verbose_name='About Me', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='content_flagging_email',
field=models.BooleanField(default=False),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='fullname',
field=models.CharField(max_length=255, verbose_name='Name', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='homepage',
field=models.URLField(max_length=255, verbose_name='Homepage', blank=True, error_messages={b'invalid': 'This URL has an invalid format. Valid URLs look like http://example.com/my_page.'}),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='irc_nickname',
field=models.CharField(max_length=255, verbose_name='IRC nickname', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='locale',
field=models.CharField(default=b'en-US', choices=[(b'af', 'Afrikaans'), (b'ar', '\u0639\u0631\u0628\u064a'), (b'az', 'Az\u0259rbaycanca'), (b'bm', 'Bamanankan'), (b'bn-BD', '\u09ac\u09be\u0982\u09b2\u09be (\u09ac\u09be\u0982\u09b2\u09be\u09a6\u09c7\u09b6)'), (b'bn-IN', '\u09ac\u09be\u0982\u09b2\u09be (\u09ad\u09be\u09b0\u09a4)'), (b'ca', 'Catal\xe0'), (b'cs', '\u010ce\u0161tina'), (b'de', 'Deutsch'), (b'ee', 'E\u028be'), (b'el', '\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac'), (b'en-US', 'English (US)'), (b'es', 'Espa\xf1ol'), (b'fa', '\u0641\u0627\u0631\u0633\u06cc'), (b'ff', 'Pulaar-Fulfulde'), (b'fi', 'suomi'), (b'fr', 'Fran\xe7ais'), (b'fy-NL', 'Frysk'), (b'ga-IE', 'Gaeilge'), (b'ha', 'Hausa'), (b'he', '\u05e2\u05d1\u05e8\u05d9\u05ea'), (b'hi-IN', '\u0939\u093f\u0928\u094d\u0926\u0940 (\u092d\u093e\u0930\u0924)'), (b'hr', 'Hrvatski'), (b'hu', 'magyar'), (b'id', 'Bahasa Indonesia'), (b'ig', 'Igbo'), (b'it', 'Italiano'), (b'ja', '\u65e5\u672c\u8a9e'), (b'ka', '\u10e5\u10d0\u10e0\u10d7\u10e3\u10da\u10d8'), (b'ko', '\ud55c\uad6d\uc5b4'), (b'ln', 'Ling\xe1la'), (b'ml', '\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02'), (b'ms', 'Melayu'), (b'my', '\u1019\u103c\u1014\u103a\u1019\u102c\u1018\u102c\u101e\u102c'), (b'nl', 'Nederlands'), (b'pl', 'Polski'), (b'pt-BR', 'Portugu\xeas (do\xa0Brasil)'), (b'pt-PT', 'Portugu\xeas (Europeu)'), (b'ro', 'rom\xe2n\u0103'), (b'ru', '\u0420\u0443\u0441\u0441\u043a\u0438\u0439'), (b'son', 'So\u014bay'), (b'sq', 'Shqip'), (b'sw', 'Kiswahili'), (b'ta', '\u0ba4\u0bae\u0bbf\u0bb4\u0bcd'), (b'th', '\u0e44\u0e17\u0e22'), (b'tl', 'Tagalog'), (b'tr', 'T\xfcrk\xe7e'), (b'vi', 'Ti\u1ebfng Vi\u1ec7t'), (b'wo', 'Wolof'), (b'xh', 'isiXhosa'), (b'yo', 'Yor\xf9b\xe1'), (b'zh-CN', '\u4e2d\u6587 (\u7b80\u4f53)'), (b'zh-TW', '\u6b63\u9ad4\u4e2d\u6587 (\u7e41\u9ad4)'), (b'zu', 'isiZulu')], max_length=7, blank=True, verbose_name='Language', db_index=True),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='location',
field=models.CharField(max_length=255, verbose_name='Location', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='organization',
field=models.CharField(max_length=255, verbose_name='Organization', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='tags',
field=kuma.core.managers.NamespacedTaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags'),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='timezone',
field=models.CharField(default=b'US/Pacific', max_length=42, verbose_name='Timezone', blank=True, choices=[('Africa', [('Africa/Abidjan', 'Abidjan (GMT+0000)'), ('Africa/Accra', 'Accra (GMT+0000)'), ('Africa/Addis_Ababa', 'Addis Ababa (GMT+0300)'), ('Africa/Algiers', 'Algiers (GMT+0100)'), ('Africa/Asmara', 'Asmara (GMT+0300)'), ('Africa/Bamako', 'Bamako (GMT+0000)'), ('Africa/Bangui', 'Bangui (GMT+0100)'), ('Africa/Banjul', 'Banjul (GMT+0000)'), ('Africa/Bissau', 'Bissau (GMT+0000)'), ('Africa/Blantyre', 'Blantyre (GMT+0200)'), ('Africa/Brazzaville', 'Brazzaville (GMT+0100)'), ('Africa/Bujumbura', 'Bujumbura (GMT+0200)'), ('Africa/Cairo', 'Cairo (GMT+0200)'), ('Africa/Casablanca', 'Casablanca (GMT+0100)'), ('Africa/Ceuta', 'Ceuta (GMT+0200)'), ('Africa/Conakry', 'Conakry (GMT+0000)'), ('Africa/Dakar', 'Dakar (GMT+0000)'), ('Africa/Dar_es_Salaam', 'Dar es Salaam (GMT+0300)'), ('Africa/Djibouti', 'Djibouti (GMT+0300)'), ('Africa/Douala', 'Douala (GMT+0100)'), ('Africa/El_Aaiun', 'El Aaiun (GMT+0100)'), ('Africa/Freetown', 'Freetown (GMT+0000)'), ('Africa/Gaborone', 'Gaborone (GMT+0200)'), ('Africa/Harare', 'Harare (GMT+0200)'), ('Africa/Johannesburg', 'Johannesburg (GMT+0200)'), ('Africa/Juba', 'Juba (GMT+0300)'), ('Africa/Kampala', 'Kampala (GMT+0300)'), ('Africa/Khartoum', 'Khartoum (GMT+0300)'), ('Africa/Kigali', 'Kigali (GMT+0200)'), ('Africa/Kinshasa', 'Kinshasa (GMT+0100)'), ('Africa/Lagos', 'Lagos (GMT+0100)'), ('Africa/Libreville', 'Libreville (GMT+0100)'), ('Africa/Lome', 'Lome (GMT+0000)'), ('Africa/Luanda', 'Luanda (GMT+0100)'), ('Africa/Lubumbashi', 'Lubumbashi (GMT+0200)'), ('Africa/Lusaka', 'Lusaka (GMT+0200)'), ('Africa/Malabo', 'Malabo (GMT+0100)'), ('Africa/Maputo', 'Maputo (GMT+0200)'), ('Africa/Maseru', 'Maseru (GMT+0200)'), ('Africa/Mbabane', 'Mbabane (GMT+0200)'), ('Africa/Mogadishu', 'Mogadishu (GMT+0300)'), ('Africa/Monrovia', 'Monrovia (GMT+0000)'), ('Africa/Nairobi', 'Nairobi (GMT+0300)'), ('Africa/Ndjamena', 'Ndjamena (GMT+0100)'), ('Africa/Niamey', 'Niamey (GMT+0100)'), ('Africa/Nouakchott', 'Nouakchott (GMT+0000)'), ('Africa/Ouagadougou', 'Ouagadougou (GMT+0000)'), ('Africa/Porto-Novo', 'Porto-Novo (GMT+0100)'), ('Africa/Sao_Tome', 'Sao Tome (GMT+0000)'), ('Africa/Tripoli', 'Tripoli (GMT+0200)'), ('Africa/Tunis', 'Tunis (GMT+0100)'), ('Africa/Windhoek', 'Windhoek (GMT+0100)')]), ('America', [('America/Adak', 'Adak (GMT-0900)'), ('America/Anchorage', 'Anchorage (GMT-0800)'), ('America/Anguilla', 'Anguilla (GMT-0400)'), ('America/Antigua', 'Antigua (GMT-0400)'), ('America/Araguaina', 'Araguaina (GMT-0300)'), ('America/Argentina/Buenos_Aires', 'Buenos Aires (GMT-0300)'), ('America/Argentina/Catamarca', 'Catamarca (GMT-0300)'), ('America/Argentina/Cordoba', 'Cordoba (GMT-0300)'), ('America/Argentina/Jujuy', 'Jujuy (GMT-0300)'), ('America/Argentina/La_Rioja', 'La Rioja (GMT-0300)'), ('America/Argentina/Mendoza', 'Mendoza (GMT-0300)'), ('America/Argentina/Rio_Gallegos', 'Rio Gallegos (GMT-0300)'), ('America/Argentina/Salta', 'Salta (GMT-0300)'), ('America/Argentina/San_Juan', 'San Juan (GMT-0300)'), ('America/Argentina/San_Luis', 'San Luis (GMT-0300)'), ('America/Argentina/Tucuman', 'Tucuman (GMT-0300)'), ('America/Argentina/Ushuaia', 'Ushuaia (GMT-0300)'), ('America/Aruba', 'Aruba (GMT-0400)'), ('America/Asuncion', 'Asuncion (GMT-0400)'), ('America/Atikokan', 'Atikokan (GMT-0500)'), ('America/Bahia', 'Bahia (GMT-0300)'), ('America/Bahia_Banderas', 'Bahia Banderas (GMT-0500)'), ('America/Barbados', 'Barbados (GMT-0400)'), ('America/Belem', 'Belem (GMT-0300)'), ('America/Belize', 'Belize (GMT-0600)'), ('America/Blanc-Sablon', 'Blanc-Sablon (GMT-0400)'), ('America/Boa_Vista', 'Boa Vista (GMT-0400)'), ('America/Bogota', 'Bogota (GMT-0500)'), ('America/Boise', 'Boise (GMT-0600)'), ('America/Cambridge_Bay', 'Cambridge Bay (GMT-0600)'), ('America/Campo_Grande', 'Campo Grande (GMT-0400)'), ('America/Cancun', 'Cancun (GMT-0500)'), ('America/Caracas', 'Caracas (GMT-0430)'), ('America/Cayenne', 'Cayenne (GMT-0300)'), ('America/Cayman', 'Cayman (GMT-0500)'), ('America/Chicago', 'Chicago (GMT-0500)'), ('America/Chihuahua', 'Chihuahua (GMT-0600)'), ('America/Costa_Rica', 'Costa Rica (GMT-0600)'), ('America/Creston', 'Creston (GMT-0700)'), ('America/Cuiaba', 'Cuiaba (GMT-0400)'), ('America/Curacao', 'Curacao (GMT-0400)'), ('America/Danmarkshavn', 'Danmarkshavn (GMT+0000)'), ('America/Dawson', 'Dawson (GMT-0700)'), ('America/Dawson_Creek', 'Dawson Creek (GMT-0700)'), ('America/Denver', 'Denver (GMT-0600)'), ('America/Detroit', 'Detroit (GMT-0400)'), ('America/Dominica', 'Dominica (GMT-0400)'), ('America/Edmonton', 'Edmonton (GMT-0600)'), ('America/Eirunepe', 'Eirunepe (GMT-0500)'), ('America/El_Salvador', 'El Salvador (GMT-0600)'), ('America/Fortaleza', 'Fortaleza (GMT-0300)'), ('America/Glace_Bay', 'Glace Bay (GMT-0300)'), ('America/Godthab', 'Godthab (GMT-0200)'), ('America/Goose_Bay', 'Goose Bay (GMT-0300)'), ('America/Grand_Turk', 'Grand Turk (GMT-0400)'), ('America/Grenada', 'Grenada (GMT-0400)'), ('America/Guadeloupe', 'Guadeloupe (GMT-0400)'), ('America/Guatemala', 'Guatemala (GMT-0600)'), ('America/Guayaquil', 'Guayaquil (GMT-0500)'), ('America/Guyana', 'Guyana (GMT-0400)'), ('America/Halifax', 'Halifax (GMT-0300)'), ('America/Havana', 'Havana (GMT-0400)'), ('America/Hermosillo', 'Hermosillo (GMT-0700)'), ('America/Indiana/Indianapolis', 'Indianapolis (GMT-0400)'), ('America/Indiana/Knox', 'Knox (GMT-0500)'), ('America/Indiana/Marengo', 'Marengo (GMT-0400)'), ('America/Indiana/Petersburg', 'Petersburg (GMT-0400)'), ('America/Indiana/Tell_City', 'Tell City (GMT-0500)'), ('America/Indiana/Vevay', 'Vevay (GMT-0400)'), ('America/Indiana/Vincennes', 'Vincennes (GMT-0400)'), ('America/Indiana/Winamac', 'Winamac (GMT-0400)'), ('America/Inuvik', 'Inuvik (GMT-0600)'), ('America/Iqaluit', 'Iqaluit (GMT-0400)'), ('America/Jamaica', 'Jamaica (GMT-0500)'), ('America/Juneau', 'Juneau (GMT-0800)'), ('America/Kentucky/Louisville', 'Louisville (GMT-0400)'), ('America/Kentucky/Monticello', 'Monticello (GMT-0400)'), ('America/Kralendijk', 'Kralendijk (GMT-0400)'), ('America/La_Paz', 'La Paz (GMT-0400)'), ('America/Lima', 'Lima (GMT-0500)'), ('America/Los_Angeles', 'Los Angeles (GMT-0700)'), ('America/Lower_Princes', 'Lower Princes (GMT-0400)'), ('America/Maceio', 'Maceio (GMT-0300)'), ('America/Managua', 'Managua (GMT-0600)'), ('America/Manaus', 'Manaus (GMT-0400)'), ('America/Marigot', 'Marigot (GMT-0400)'), ('America/Martinique', 'Martinique (GMT-0400)'), ('America/Matamoros', 'Matamoros (GMT-0500)'), ('America/Mazatlan', 'Mazatlan (GMT-0600)'), ('America/Menominee', 'Menominee (GMT-0500)'), ('America/Merida', 'Merida (GMT-0500)'), ('America/Metlakatla', 'Metlakatla (GMT-0800)'), ('America/Mexico_City', 'Mexico City (GMT-0500)'), ('America/Miquelon', 'Miquelon (GMT-0200)'), ('America/Moncton', 'Moncton (GMT-0300)'), ('America/Monterrey', 'Monterrey (GMT-0500)'), ('America/Montevideo', 'Montevideo (GMT-0300)'), ('America/Montserrat', 'Montserrat (GMT-0400)'), ('America/Nassau', 'Nassau (GMT-0400)'), ('America/New_York', 'New York (GMT-0400)'), ('America/Nipigon', 'Nipigon (GMT-0400)'), ('America/Nome', 'Nome (GMT-0800)'), ('America/Noronha', 'Noronha (GMT-0200)'), ('America/North_Dakota/Beulah', 'Beulah (GMT-0500)'), ('America/North_Dakota/Center', 'Center (GMT-0500)'), ('America/North_Dakota/New_Salem', 'New Salem (GMT-0500)'), ('America/Ojinaga', 'Ojinaga (GMT-0600)'), ('America/Panama', 'Panama (GMT-0500)'), ('America/Pangnirtung', 'Pangnirtung (GMT-0400)'), ('America/Paramaribo', 'Paramaribo (GMT-0300)'), ('America/Phoenix', 'Phoenix (GMT-0700)'), ('America/Port-au-Prince', 'Port-au-Prince (GMT-0400)'), ('America/Port_of_Spain', 'Port of Spain (GMT-0400)'), ('America/Porto_Velho', 'Porto Velho (GMT-0400)'), ('America/Puerto_Rico', 'Puerto Rico (GMT-0400)'), ('America/Rainy_River', 'Rainy River (GMT-0500)'), ('America/Rankin_Inlet', 'Rankin Inlet (GMT-0500)'), ('America/Recife', 'Recife (GMT-0300)'), ('America/Regina', 'Regina (GMT-0600)'), ('America/Resolute', 'Resolute (GMT-0500)'), ('America/Rio_Branco', 'Rio Branco (GMT-0500)'), ('America/Santa_Isabel', 'Santa Isabel (GMT-0700)'), ('America/Santarem', 'Santarem (GMT-0300)'), ('America/Santiago', 'Santiago (GMT-0300)'), ('America/Santo_Domingo', 'Santo Domingo (GMT-0400)'), ('America/Sao_Paulo', 'Sao Paulo (GMT-0300)'), ('America/Scoresbysund', 'Scoresbysund (GMT+0000)'), ('America/Sitka', 'Sitka (GMT-0800)'), ('America/St_Barthelemy', 'St Barthelemy (GMT-0400)'), ('America/St_Johns', 'St Johns (GMT-0230)'), ('America/St_Kitts', 'St Kitts (GMT-0400)'), ('America/St_Lucia', 'St Lucia (GMT-0400)'), ('America/St_Thomas', 'St Thomas (GMT-0400)'), ('America/St_Vincent', 'St Vincent (GMT-0400)'), ('America/Swift_Current', 'Swift Current (GMT-0600)'), ('America/Tegucigalpa', 'Tegucigalpa (GMT-0600)'), ('America/Thule', 'Thule (GMT-0300)'), ('America/Thunder_Bay', 'Thunder Bay (GMT-0400)'), ('America/Tijuana', 'Tijuana (GMT-0700)'), ('America/Toronto', 'Toronto (GMT-0400)'), ('America/Tortola', 'Tortola (GMT-0400)'), ('America/Vancouver', 'Vancouver (GMT-0700)'), ('America/Whitehorse', 'Whitehorse (GMT-0700)'), ('America/Winnipeg', 'Winnipeg (GMT-0500)'), ('America/Yakutat', 'Yakutat (GMT-0800)'), ('America/Yellowknife', 'Yellowknife (GMT-0600)')]), ('Antarctica', [('Antarctica/Casey', 'Casey (GMT+0800)'), ('Antarctica/Davis', 'Davis (GMT+0700)'), ('Antarctica/DumontDUrville', 'DumontDUrville (GMT+1000)'), ('Antarctica/Macquarie', 'Macquarie (GMT+1100)'), ('Antarctica/Mawson', 'Mawson (GMT+0500)'), ('Antarctica/McMurdo', 'McMurdo (GMT+1200)'), ('Antarctica/Palmer', 'Palmer (GMT-0300)'), ('Antarctica/Rothera', 'Rothera (GMT-0300)'), ('Antarctica/Syowa', 'Syowa (GMT+0300)'), ('Antarctica/Troll', 'Troll (GMT+0200)'), ('Antarctica/Vostok', 'Vostok (GMT+0600)')]), ('Arctic', [('Arctic/Longyearbyen', 'Longyearbyen (GMT+0200)')]), ('Asia', [('Asia/Aden', 'Aden (GMT+0300)'), ('Asia/Almaty', 'Almaty (GMT+0600)'), ('Asia/Amman', 'Amman (GMT+0300)'), ('Asia/Anadyr', 'Anadyr (GMT+1200)'), ('Asia/Aqtau', 'Aqtau (GMT+0500)'), ('Asia/Aqtobe', 'Aqtobe (GMT+0500)'), ('Asia/Ashgabat', 'Ashgabat (GMT+0500)'), ('Asia/Baghdad', 'Baghdad (GMT+0300)'), ('Asia/Bahrain', 'Bahrain (GMT+0300)'), ('Asia/Baku', 'Baku (GMT+0500)'), ('Asia/Bangkok', 'Bangkok (GMT+0700)'), ('Asia/Beirut', 'Beirut (GMT+0300)'), ('Asia/Bishkek', 'Bishkek (GMT+0600)'), ('Asia/Brunei', 'Brunei (GMT+0800)'), ('Asia/Chita', 'Chita (GMT+0800)'), ('Asia/Choibalsan', 'Choibalsan (GMT+0900)'), ('Asia/Colombo', 'Colombo (GMT+0530)'), ('Asia/Damascus', 'Damascus (GMT+0300)'), ('Asia/Dhaka', 'Dhaka (GMT+0600)'), ('Asia/Dili', 'Dili (GMT+0900)'), ('Asia/Dubai', 'Dubai (GMT+0400)'), ('Asia/Dushanbe', 'Dushanbe (GMT+0500)'), ('Asia/Gaza', 'Gaza (GMT+0300)'), ('Asia/Hebron', 'Hebron (GMT+0300)'), ('Asia/Ho_Chi_Minh', 'Ho Chi Minh (GMT+0700)'), ('Asia/Hong_Kong', 'Hong Kong (GMT+0800)'), ('Asia/Hovd', 'Hovd (GMT+0800)'), ('Asia/Irkutsk', 'Irkutsk (GMT+0800)'), ('Asia/Jakarta', 'Jakarta (GMT+0700)'), ('Asia/Jayapura', 'Jayapura (GMT+0900)'), ('Asia/Jerusalem', 'Jerusalem (GMT+0300)'), ('Asia/Kabul', 'Kabul (GMT+0430)'), ('Asia/Kamchatka', 'Kamchatka (GMT+1200)'), ('Asia/Karachi', 'Karachi (GMT+0500)'), ('Asia/Kathmandu', 'Kathmandu (GMT+0545)'), ('Asia/Khandyga', 'Khandyga (GMT+0900)'), ('Asia/Kolkata', 'Kolkata (GMT+0530)'), ('Asia/Krasnoyarsk', 'Krasnoyarsk (GMT+0700)'), ('Asia/Kuala_Lumpur', 'Kuala Lumpur (GMT+0800)'), ('Asia/Kuching', 'Kuching (GMT+0800)'), ('Asia/Kuwait', 'Kuwait (GMT+0300)'), ('Asia/Macau', 'Macau (GMT+0800)'), ('Asia/Magadan', 'Magadan (GMT+1000)'), ('Asia/Makassar', 'Makassar (GMT+0800)'), ('Asia/Manila', 'Manila (GMT+0800)'), ('Asia/Muscat', 'Muscat (GMT+0400)'), ('Asia/Nicosia', 'Nicosia (GMT+0300)'), ('Asia/Novokuznetsk', 'Novokuznetsk (GMT+0700)'), ('Asia/Novosibirsk', 'Novosibirsk (GMT+0600)'), ('Asia/Omsk', 'Omsk (GMT+0600)'), ('Asia/Oral', 'Oral (GMT+0500)'), ('Asia/Phnom_Penh', 'Phnom Penh (GMT+0700)'), ('Asia/Pontianak', 'Pontianak (GMT+0700)'), ('Asia/Pyongyang', 'Pyongyang (GMT+0900)'), ('Asia/Qatar', 'Qatar (GMT+0300)'), ('Asia/Qyzylorda', 'Qyzylorda (GMT+0600)'), ('Asia/Rangoon', 'Rangoon (GMT+0630)'), ('Asia/Riyadh', 'Riyadh (GMT+0300)'), ('Asia/Sakhalin', 'Sakhalin (GMT+1000)'), ('Asia/Samarkand', 'Samarkand (GMT+0500)'), ('Asia/Seoul', 'Seoul (GMT+0900)'), ('Asia/Shanghai', 'Shanghai (GMT+0800)'), ('Asia/Singapore', 'Singapore (GMT+0800)'), ('Asia/Srednekolymsk', 'Srednekolymsk (GMT+1100)'), ('Asia/Taipei', 'Taipei (GMT+0800)'), ('Asia/Tashkent', 'Tashkent (GMT+0500)'), ('Asia/Tbilisi', 'Tbilisi (GMT+0400)'), ('Asia/Tehran', 'Tehran (GMT+0430)'), ('Asia/Thimphu', 'Thimphu (GMT+0600)'), ('Asia/Tokyo', 'Tokyo (GMT+0900)'), ('Asia/Ulaanbaatar', 'Ulaanbaatar (GMT+0900)'), ('Asia/Urumqi', 'Urumqi (GMT+0600)'), ('Asia/Ust-Nera', 'Ust-Nera (GMT+1000)'), ('Asia/Vientiane', 'Vientiane (GMT+0700)'), ('Asia/Vladivostok', 'Vladivostok (GMT+1000)'), ('Asia/Yakutsk', 'Yakutsk (GMT+0900)'), ('Asia/Yekaterinburg', 'Yekaterinburg (GMT+0500)'), ('Asia/Yerevan', 'Yerevan (GMT+0400)')]), ('Atlantic', [('Atlantic/Azores', 'Azores (GMT+0000)'), ('Atlantic/Bermuda', 'Bermuda (GMT-0300)'), ('Atlantic/Canary', 'Canary (GMT+0100)'), ('Atlantic/Cape_Verde', 'Cape Verde (GMT-0100)'), ('Atlantic/Faroe', 'Faroe (GMT+0100)'), ('Atlantic/Madeira', 'Madeira (GMT+0100)'), ('Atlantic/Reykjavik', 'Reykjavik (GMT+0000)'), ('Atlantic/South_Georgia', 'South Georgia (GMT-0200)'), ('Atlantic/St_Helena', 'St Helena (GMT+0000)'), ('Atlantic/Stanley', 'Stanley (GMT-0300)')]), ('Australia', [('Australia/Adelaide', 'Adelaide (GMT+0930)'), ('Australia/Brisbane', 'Brisbane (GMT+1000)'), ('Australia/Broken_Hill', 'Broken Hill (GMT+0930)'), ('Australia/Currie', 'Currie (GMT+1000)'), ('Australia/Darwin', 'Darwin (GMT+0930)'), ('Australia/Eucla', 'Eucla (GMT+0845)'), ('Australia/Hobart', 'Hobart (GMT+1000)'), ('Australia/Lindeman', 'Lindeman (GMT+1000)'), ('Australia/Lord_Howe', 'Lord Howe (GMT+1030)'), ('Australia/Melbourne', 'Melbourne (GMT+1000)'), ('Australia/Perth', 'Perth (GMT+0800)'), ('Australia/Sydney', 'Sydney (GMT+1000)')]), ('Canada', [('Canada/Atlantic', 'Atlantic (GMT-0300)'), ('Canada/Central', 'Central (GMT-0500)'), ('Canada/Eastern', 'Eastern (GMT-0400)'), ('Canada/Mountain', 'Mountain (GMT-0600)'), ('Canada/Newfoundland', 'Newfoundland (GMT-0230)'), ('Canada/Pacific', 'Pacific (GMT-0700)')]), ('Europe', [('Europe/Amsterdam', 'Amsterdam (GMT+0200)'), ('Europe/Andorra', 'Andorra (GMT+0200)'), ('Europe/Athens', 'Athens (GMT+0300)'), ('Europe/Belgrade', 'Belgrade (GMT+0200)'), ('Europe/Berlin', 'Berlin (GMT+0200)'), ('Europe/Bratislava', 'Bratislava (GMT+0200)'), ('Europe/Brussels', 'Brussels (GMT+0200)'), ('Europe/Bucharest', 'Bucharest (GMT+0300)'), ('Europe/Budapest', 'Budapest (GMT+0200)'), ('Europe/Busingen', 'Busingen (GMT+0200)'), ('Europe/Chisinau', 'Chisinau (GMT+0300)'), ('Europe/Copenhagen', 'Copenhagen (GMT+0200)'), ('Europe/Dublin', 'Dublin (GMT+0100)'), ('Europe/Gibraltar', 'Gibraltar (GMT+0200)'), ('Europe/Guernsey', 'Guernsey (GMT+0100)'), ('Europe/Helsinki', 'Helsinki (GMT+0300)'), ('Europe/Isle_of_Man', 'Isle of Man (GMT+0100)'), ('Europe/Istanbul', 'Istanbul (GMT+0300)'), ('Europe/Jersey', 'Jersey (GMT+0100)'), ('Europe/Kaliningrad', 'Kaliningrad (GMT+0200)'), ('Europe/Kiev', 'Kiev (GMT+0300)'), ('Europe/Lisbon', 'Lisbon (GMT+0100)'), ('Europe/Ljubljana', 'Ljubljana (GMT+0200)'), ('Europe/London', 'London (GMT+0100)'), ('Europe/Luxembourg', 'Luxembourg (GMT+0200)'), ('Europe/Madrid', 'Madrid (GMT+0200)'), ('Europe/Malta', 'Malta (GMT+0200)'), ('Europe/Mariehamn', 'Mariehamn (GMT+0300)'), ('Europe/Minsk', 'Minsk (GMT+0300)'), ('Europe/Monaco', 'Monaco (GMT+0200)'), ('Europe/Moscow', 'Moscow (GMT+0300)'), ('Europe/Oslo', 'Oslo (GMT+0200)'), ('Europe/Paris', 'Paris (GMT+0200)'), ('Europe/Podgorica', 'Podgorica (GMT+0200)'), ('Europe/Prague', 'Prague (GMT+0200)'), ('Europe/Riga', 'Riga (GMT+0300)'), ('Europe/Rome', 'Rome (GMT+0200)'), ('Europe/Samara', 'Samara (GMT+0400)'), ('Europe/San_Marino', 'San Marino (GMT+0200)'), ('Europe/Sarajevo', 'Sarajevo (GMT+0200)'), ('Europe/Simferopol', 'Simferopol (GMT+0300)'), ('Europe/Skopje', 'Skopje (GMT+0200)'), ('Europe/Sofia', 'Sofia (GMT+0300)'), ('Europe/Stockholm', 'Stockholm (GMT+0200)'), ('Europe/Tallinn', 'Tallinn (GMT+0300)'), ('Europe/Tirane', 'Tirane (GMT+0200)'), ('Europe/Uzhgorod', 'Uzhgorod (GMT+0300)'), ('Europe/Vaduz', 'Vaduz (GMT+0200)'), ('Europe/Vatican', 'Vatican (GMT+0200)'), ('Europe/Vienna', 'Vienna (GMT+0200)'), ('Europe/Vilnius', 'Vilnius (GMT+0300)'), ('Europe/Volgograd', 'Volgograd (GMT+0300)'), ('Europe/Warsaw', 'Warsaw (GMT+0200)'), ('Europe/Zagreb', 'Zagreb (GMT+0200)'), ('Europe/Zaporozhye', 'Zaporozhye (GMT+0300)'), ('Europe/Zurich', 'Zurich (GMT+0200)')]), ('GMT', [('GMT', 'GMT (GMT+0000)')]), ('Indian', [('Indian/Antananarivo', 'Antananarivo (GMT+0300)'), ('Indian/Chagos', 'Chagos (GMT+0600)'), ('Indian/Christmas', 'Christmas (GMT+0700)'), ('Indian/Cocos', 'Cocos (GMT+0630)'), ('Indian/Comoro', 'Comoro (GMT+0300)'), ('Indian/Kerguelen', 'Kerguelen (GMT+0500)'), ('Indian/Mahe', 'Mahe (GMT+0400)'), ('Indian/Maldives', 'Maldives (GMT+0500)'), ('Indian/Mauritius', 'Mauritius (GMT+0400)'), ('Indian/Mayotte', 'Mayotte (GMT+0300)'), ('Indian/Reunion', 'Reunion (GMT+0400)')]), ('Pacific', [('Pacific/Apia', 'Apia (GMT+1300)'), ('Pacific/Auckland', 'Auckland (GMT+1200)'), ('Pacific/Bougainville', 'Bougainville (GMT+1100)'), ('Pacific/Chatham', 'Chatham (GMT+1245)'), ('Pacific/Chuuk', 'Chuuk (GMT+1000)'), ('Pacific/Easter', 'Easter (GMT-0500)'), ('Pacific/Efate', 'Efate (GMT+1100)'), ('Pacific/Enderbury', 'Enderbury (GMT+1300)'), ('Pacific/Fakaofo', 'Fakaofo (GMT+1300)'), ('Pacific/Fiji', 'Fiji (GMT+1200)'), ('Pacific/Funafuti', 'Funafuti (GMT+1200)'), ('Pacific/Galapagos', 'Galapagos (GMT-0600)'), ('Pacific/Gambier', 'Gambier (GMT-0900)'), ('Pacific/Guadalcanal', 'Guadalcanal (GMT+1100)'), ('Pacific/Guam', 'Guam (GMT+1000)'), ('Pacific/Honolulu', 'Honolulu (GMT-1000)'), ('Pacific/Johnston', 'Johnston (GMT-1000)'), ('Pacific/Kiritimati', 'Kiritimati (GMT+1400)'), ('Pacific/Kosrae', 'Kosrae (GMT+1100)'), ('Pacific/Kwajalein', 'Kwajalein (GMT+1200)'), ('Pacific/Majuro', 'Majuro (GMT+1200)'), ('Pacific/Marquesas', 'Marquesas (GMT-0930)'), ('Pacific/Midway', 'Midway (GMT-1100)'), ('Pacific/Nauru', 'Nauru (GMT+1200)'), ('Pacific/Niue', 'Niue (GMT-1100)'), ('Pacific/Norfolk', 'Norfolk (GMT+1130)'), ('Pacific/Noumea', 'Noumea (GMT+1100)'), ('Pacific/Pago_Pago', 'Pago Pago (GMT-1100)'), ('Pacific/Palau', 'Palau (GMT+0900)'), ('Pacific/Pitcairn', 'Pitcairn (GMT-0800)'), ('Pacific/Pohnpei', 'Pohnpei (GMT+1100)'), ('Pacific/Port_Moresby', 'Port Moresby (GMT+1000)'), ('Pacific/Rarotonga', 'Rarotonga (GMT-1000)'), ('Pacific/Saipan', 'Saipan (GMT+1000)'), ('Pacific/Tahiti', 'Tahiti (GMT-1000)'), ('Pacific/Tarawa', 'Tarawa (GMT+1200)'), ('Pacific/Tongatapu', 'Tongatapu (GMT+1300)'), ('Pacific/Wake', 'Wake (GMT+1200)'), ('Pacific/Wallis', 'Wallis (GMT+1200)')]), ('US', [('US/Alaska', 'Alaska (GMT-0800)'), ('US/Arizona', 'Arizona (GMT-0700)'), ('US/Central', 'Central (GMT-0500)'), ('US/Eastern', 'Eastern (GMT-0400)'), ('US/Hawaii', 'Hawaii (GMT-1000)'), ('US/Mountain', 'Mountain (GMT-0600)'), ('US/Pacific', 'Pacific (GMT-0700)')]), ('UTC', [('UTC', 'UTC (GMT+0000)')])]),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='title',
field=models.CharField(max_length=255, verbose_name='Title', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='facebook_url',
field=models.TextField(blank=True, verbose_name='Facebook', validators=[django.core.validators.RegexValidator(b'^https?://www\\.facebook\\.com/', 'Enter a valid Facebook URL.', b'invalid')]),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='github_url',
field=models.TextField(blank=True, verbose_name='GitHub', validators=[django.core.validators.RegexValidator(b'^https?://github\\.com/', 'Enter a valid GitHub URL.', b'invalid')]),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='linkedin_url',
field=models.TextField(blank=True, verbose_name='LinkedIn', validators=[django.core.validators.RegexValidator(b'^https?://((www|\\w\\w)\\.)?linkedin.com/((in/[^/]+/?)|(pub/[^/]+/((\\w|\\d)+/?){3}))$', 'Enter a valid LinkedIn URL.', b'invalid')]),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='mozillians_url',
field=models.TextField(blank=True, verbose_name='Mozillians', validators=[django.core.validators.RegexValidator(b'^https?://mozillians\\.org/u/', 'Enter a valid Mozillians URL.', b'invalid')]),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='stackoverflow_url',
field=models.TextField(blank=True, verbose_name='Stack Overflow', validators=[django.core.validators.RegexValidator(b'^https?://stackoverflow\\.com/users/', 'Enter a valid Stack Overflow URL.', b'invalid')]),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='twitter_url',
field=models.TextField(blank=True, verbose_name='Twitter', validators=[django.core.validators.RegexValidator(b'^https?://twitter\\.com/', 'Enter a valid Twitter URL.', b'invalid')]),
preserve_default=True,
),
migrations.AddField(
model_name='user',
name='website_url',
field=models.TextField(blank=True, verbose_name='Website', validators=[django.core.validators.RegexValidator(b'^https?://', 'Enter a valid website URL.', b'invalid')]),
preserve_default=True,
),
]
|
henrytao-me/openerp.positionq | refs/heads/master | openerp/workflow/instance.py | 61 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import workitem
def create(cr, ident, wkf_id):
(uid,res_type,res_id) = ident
cr.execute('insert into wkf_instance (res_type,res_id,uid,wkf_id) values (%s,%s,%s,%s) RETURNING id', (res_type,res_id,uid,wkf_id))
id_new = cr.fetchone()[0]
cr.execute('select * from wkf_activity where flow_start=True and wkf_id=%s', (wkf_id,))
res = cr.dictfetchall()
stack = []
workitem.create(cr, res, id_new, ident, stack=stack)
update(cr, id_new, ident)
return id_new
def delete(cr, ident):
(uid,res_type,res_id) = ident
cr.execute('delete from wkf_instance where res_id=%s and res_type=%s', (res_id,res_type))
def validate(cr, inst_id, ident, signal, force_running=False):
cr.execute("select * from wkf_workitem where inst_id=%s", (inst_id,))
stack = []
for witem in cr.dictfetchall():
stack = []
workitem.process(cr, witem, ident, signal, force_running, stack=stack)
# An action is returned
_update_end(cr, inst_id, ident)
return stack and stack[0] or False
def update(cr, inst_id, ident):
cr.execute("select * from wkf_workitem where inst_id=%s", (inst_id,))
for witem in cr.dictfetchall():
stack = []
workitem.process(cr, witem, ident, stack=stack)
return _update_end(cr, inst_id, ident)
def _update_end(cr, inst_id, ident):
cr.execute('select wkf_id from wkf_instance where id=%s', (inst_id,))
wkf_id = cr.fetchone()[0]
cr.execute('select state,flow_stop from wkf_workitem w left join wkf_activity a on (a.id=w.act_id) where w.inst_id=%s', (inst_id,))
ok=True
for r in cr.fetchall():
if (r[0]<>'complete') or not r[1]:
ok=False
break
if ok:
cr.execute('select distinct a.name from wkf_activity a left join wkf_workitem w on (a.id=w.act_id) where w.inst_id=%s', (inst_id,))
act_names = cr.fetchall()
cr.execute("update wkf_instance set state='complete' where id=%s", (inst_id,))
cr.execute("update wkf_workitem set state='complete' where subflow_id=%s", (inst_id,))
cr.execute("select i.id,w.osv,i.res_id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where i.id IN (select inst_id from wkf_workitem where subflow_id=%s)", (inst_id,))
for i in cr.fetchall():
for act_name in act_names:
validate(cr, i[0], (ident[0],i[1],i[2]), 'subflow.'+act_name[0])
return ok
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Srisai85/scipy | refs/heads/master | scipy/fftpack/fftpack_version.py | 122 | from __future__ import division, print_function, absolute_import
major = 0
minor = 4
micro = 3
fftpack_version = '%(major)d.%(minor)d.%(micro)d' % (locals())
|
SimVascular/VTK | refs/heads/master | ThirdParty/Twisted/twisted/test/test_socks.py | 41 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.protocol.socks}, an implementation of the SOCKSv4 and
SOCKSv4a protocols.
"""
import struct, socket
from twisted.trial import unittest
from twisted.test import proto_helpers
from twisted.internet import defer, address, reactor
from twisted.internet.error import DNSLookupError
from twisted.protocols import socks
class StringTCPTransport(proto_helpers.StringTransport):
stringTCPTransport_closing = False
peer = None
def getPeer(self):
return self.peer
def getHost(self):
return address.IPv4Address('TCP', '2.3.4.5', 42)
def loseConnection(self):
self.stringTCPTransport_closing = True
class FakeResolverReactor:
"""
Bare-bones reactor with deterministic behavior for the resolve method.
"""
def __init__(self, names):
"""
@type names: C{dict} containing C{str} keys and C{str} values.
@param names: A hostname to IP address mapping. The IP addresses are
stringified dotted quads.
"""
self.names = names
def resolve(self, hostname):
"""
Resolve a hostname by looking it up in the C{names} dictionary.
"""
try:
return defer.succeed(self.names[hostname])
except KeyError:
return defer.fail(
DNSLookupError("FakeResolverReactor couldn't find " + hostname))
class SOCKSv4Driver(socks.SOCKSv4):
# last SOCKSv4Outgoing instantiated
driver_outgoing = None
# last SOCKSv4IncomingFactory instantiated
driver_listen = None
def connectClass(self, host, port, klass, *args):
# fake it
proto = klass(*args)
proto.transport = StringTCPTransport()
proto.transport.peer = address.IPv4Address('TCP', host, port)
proto.connectionMade()
self.driver_outgoing = proto
return defer.succeed(proto)
def listenClass(self, port, klass, *args):
# fake it
factory = klass(*args)
self.driver_listen = factory
if port == 0:
port = 1234
return defer.succeed(('6.7.8.9', port))
class Connect(unittest.TestCase):
"""
Tests for SOCKS and SOCKSv4a connect requests using the L{SOCKSv4} protocol.
"""
def setUp(self):
self.sock = SOCKSv4Driver()
self.sock.transport = StringTCPTransport()
self.sock.connectionMade()
self.sock.reactor = FakeResolverReactor({"localhost":"127.0.0.1"})
def tearDown(self):
outgoing = self.sock.driver_outgoing
if outgoing is not None:
self.assert_(outgoing.transport.stringTCPTransport_closing,
"Outgoing SOCKS connections need to be closed.")
def test_simple(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 34)
+ socket.inet_aton('1.2.3.4'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
self.assert_(self.sock.driver_outgoing is not None)
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(self.sock.driver_outgoing.transport.value(),
'hello, world')
# the other way around
self.sock.driver_outgoing.dataReceived('hi there')
self.assertEqual(self.sock.transport.value(), 'hi there')
self.sock.connectionLost('fake reason')
def test_socks4aSuccessfulResolution(self):
"""
If the destination IP address has zeros for the first three octets and
non-zero for the fourth octet, the client is attempting a v4a
connection. A hostname is specified after the user ID string and the
server connects to the address that hostname resolves to.
@see: U{http://en.wikipedia.org/wiki/SOCKS#SOCKS_4a_protocol}
"""
# send the domain name "localhost" to be resolved
clientRequest = (
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('0.0.0.1')
+ 'fooBAZ\0'
+ 'localhost\0')
# Deliver the bytes one by one to exercise the protocol's buffering
# logic. FakeResolverReactor's resolve method is invoked to "resolve"
# the hostname.
for byte in clientRequest:
self.sock.dataReceived(byte)
sent = self.sock.transport.value()
self.sock.transport.clear()
# Verify that the server responded with the address which will be
# connected to.
self.assertEqual(
sent,
struct.pack('!BBH', 0, 90, 34) + socket.inet_aton('127.0.0.1'))
self.assertFalse(self.sock.transport.stringTCPTransport_closing)
self.assertNotIdentical(self.sock.driver_outgoing, None)
# Pass some data through and verify it is forwarded to the outgoing
# connection.
self.sock.dataReceived('hello, world')
self.assertEqual(
self.sock.driver_outgoing.transport.value(), 'hello, world')
# Deliver some data from the output connection and verify it is
# passed along to the incoming side.
self.sock.driver_outgoing.dataReceived('hi there')
self.assertEqual(self.sock.transport.value(), 'hi there')
self.sock.connectionLost('fake reason')
def test_socks4aFailedResolution(self):
"""
Failed hostname resolution on a SOCKSv4a packet results in a 91 error
response and the connection getting closed.
"""
# send the domain name "failinghost" to be resolved
clientRequest = (
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('0.0.0.1')
+ 'fooBAZ\0'
+ 'failinghost\0')
# Deliver the bytes one by one to exercise the protocol's buffering
# logic. FakeResolverReactor's resolve method is invoked to "resolve"
# the hostname.
for byte in clientRequest:
self.sock.dataReceived(byte)
# Verify that the server responds with a 91 error.
sent = self.sock.transport.value()
self.assertEqual(
sent,
struct.pack('!BBH', 0, 91, 0) + socket.inet_aton('0.0.0.0'))
# A failed resolution causes the transport to drop the connection.
self.assertTrue(self.sock.transport.stringTCPTransport_closing)
self.assertIdentical(self.sock.driver_outgoing, None)
def test_accessDenied(self):
self.sock.authorize = lambda code, server, port, user: 0
self.sock.dataReceived(
struct.pack('!BBH', 4, 1, 4242)
+ socket.inet_aton('10.2.3.4')
+ 'fooBAR'
+ '\0')
self.assertEqual(self.sock.transport.value(),
struct.pack('!BBH', 0, 91, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(self.sock.transport.stringTCPTransport_closing)
self.assertIdentical(self.sock.driver_outgoing, None)
def test_eofRemote(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(self.sock.driver_outgoing.transport.value(),
'hello, world')
# now close it from the server side
self.sock.driver_outgoing.transport.loseConnection()
self.sock.driver_outgoing.connectionLost('fake reason')
def test_eofLocal(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 1, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(self.sock.driver_outgoing.transport.value(),
'hello, world')
# now close it from the client side
self.sock.connectionLost('fake reason')
class Bind(unittest.TestCase):
"""
Tests for SOCKS and SOCKSv4a bind requests using the L{SOCKSv4} protocol.
"""
def setUp(self):
self.sock = SOCKSv4Driver()
self.sock.transport = StringTCPTransport()
self.sock.connectionMade()
self.sock.reactor = FakeResolverReactor({"localhost":"127.0.0.1"})
## def tearDown(self):
## # TODO ensure the listen port is closed
## listen = self.sock.driver_listen
## if listen is not None:
## self.assert_(incoming.transport.stringTCPTransport_closing,
## "Incoming SOCKS connections need to be closed.")
def test_simple(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 1234)
+ socket.inet_aton('6.7.8.9'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
self.assert_(self.sock.driver_listen is not None)
# connect
incoming = self.sock.driver_listen.buildProtocol(('1.2.3.4', 5345))
self.assertNotIdentical(incoming, None)
incoming.transport = StringTCPTransport()
incoming.connectionMade()
# now we should have the second reply packet
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(incoming.transport.value(),
'hello, world')
# the other way around
incoming.dataReceived('hi there')
self.assertEqual(self.sock.transport.value(), 'hi there')
self.sock.connectionLost('fake reason')
def test_socks4a(self):
"""
If the destination IP address has zeros for the first three octets and
non-zero for the fourth octet, the client is attempting a v4a
connection. A hostname is specified after the user ID string and the
server connects to the address that hostname resolves to.
@see: U{http://en.wikipedia.org/wiki/SOCKS#SOCKS_4a_protocol}
"""
# send the domain name "localhost" to be resolved
clientRequest = (
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('0.0.0.1')
+ 'fooBAZ\0'
+ 'localhost\0')
# Deliver the bytes one by one to exercise the protocol's buffering
# logic. FakeResolverReactor's resolve method is invoked to "resolve"
# the hostname.
for byte in clientRequest:
self.sock.dataReceived(byte)
sent = self.sock.transport.value()
self.sock.transport.clear()
# Verify that the server responded with the address which will be
# connected to.
self.assertEqual(
sent,
struct.pack('!BBH', 0, 90, 1234) + socket.inet_aton('6.7.8.9'))
self.assertFalse(self.sock.transport.stringTCPTransport_closing)
self.assertNotIdentical(self.sock.driver_listen, None)
# connect
incoming = self.sock.driver_listen.buildProtocol(('127.0.0.1', 5345))
self.assertNotIdentical(incoming, None)
incoming.transport = StringTCPTransport()
incoming.connectionMade()
# now we should have the second reply packet
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 0)
+ socket.inet_aton('0.0.0.0'))
self.assertNotIdentical(
self.sock.transport.stringTCPTransport_closing, None)
# Deliver some data from the output connection and verify it is
# passed along to the incoming side.
self.sock.dataReceived('hi there')
self.assertEqual(incoming.transport.value(), 'hi there')
# the other way around
incoming.dataReceived('hi there')
self.assertEqual(self.sock.transport.value(), 'hi there')
self.sock.connectionLost('fake reason')
def test_socks4aFailedResolution(self):
"""
Failed hostname resolution on a SOCKSv4a packet results in a 91 error
response and the connection getting closed.
"""
# send the domain name "failinghost" to be resolved
clientRequest = (
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('0.0.0.1')
+ 'fooBAZ\0'
+ 'failinghost\0')
# Deliver the bytes one by one to exercise the protocol's buffering
# logic. FakeResolverReactor's resolve method is invoked to "resolve"
# the hostname.
for byte in clientRequest:
self.sock.dataReceived(byte)
# Verify that the server responds with a 91 error.
sent = self.sock.transport.value()
self.assertEqual(
sent,
struct.pack('!BBH', 0, 91, 0) + socket.inet_aton('0.0.0.0'))
# A failed resolution causes the transport to drop the connection.
self.assertTrue(self.sock.transport.stringTCPTransport_closing)
self.assertIdentical(self.sock.driver_outgoing, None)
def test_accessDenied(self):
self.sock.authorize = lambda code, server, port, user: 0
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 4242)
+ socket.inet_aton('10.2.3.4')
+ 'fooBAR'
+ '\0')
self.assertEqual(self.sock.transport.value(),
struct.pack('!BBH', 0, 91, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(self.sock.transport.stringTCPTransport_closing)
self.assertIdentical(self.sock.driver_listen, None)
def test_eofRemote(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# connect
incoming = self.sock.driver_listen.buildProtocol(('1.2.3.4', 5345))
self.assertNotIdentical(incoming, None)
incoming.transport = StringTCPTransport()
incoming.connectionMade()
# now we should have the second reply packet
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(incoming.transport.value(),
'hello, world')
# now close it from the server side
incoming.transport.loseConnection()
incoming.connectionLost('fake reason')
def test_eofLocal(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# connect
incoming = self.sock.driver_listen.buildProtocol(('1.2.3.4', 5345))
self.assertNotIdentical(incoming, None)
incoming.transport = StringTCPTransport()
incoming.connectionMade()
# now we should have the second reply packet
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 90, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(not self.sock.transport.stringTCPTransport_closing)
# pass some data through
self.sock.dataReceived('hello, world')
self.assertEqual(incoming.transport.value(),
'hello, world')
# now close it from the client side
self.sock.connectionLost('fake reason')
def test_badSource(self):
self.sock.dataReceived(
struct.pack('!BBH', 4, 2, 34)
+ socket.inet_aton('1.2.3.4')
+ 'fooBAR'
+ '\0')
sent = self.sock.transport.value()
self.sock.transport.clear()
# connect from WRONG address
incoming = self.sock.driver_listen.buildProtocol(('1.6.6.6', 666))
self.assertIdentical(incoming, None)
# Now we should have the second reply packet and it should
# be a failure. The connection should be closing.
sent = self.sock.transport.value()
self.sock.transport.clear()
self.assertEqual(sent,
struct.pack('!BBH', 0, 91, 0)
+ socket.inet_aton('0.0.0.0'))
self.assert_(self.sock.transport.stringTCPTransport_closing)
|
yashodhank/erpnext | refs/heads/develop | erpnext/manufacturing/doctype/production_plan_sales_order/production_plan_sales_order.py | 121 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class ProductionPlanSalesOrder(Document):
pass |
stefanopanella/xapi-storage-plugins | refs/heads/master | libs/poolhelper.py | 1 | from xapi.storage import log
import XenAPI
def get_online_host_refs(dbg, session):
# This function is borrowed from xapi-project/sm.git/util.py
online_hosts = []
hosts = session.xenapi.host.get_all_records()
for host_ref, host_rec in hosts.iteritems():
metrics_ref = host_rec["metrics"]
metrics_rec = session.xenapi.host_metrics.get_record(metrics_ref)
if metrics_rec["live"]:
online_hosts.append(host_ref)
return online_hosts
def call_plugin_in_pool(dbg, plugin_name, plugin_function, args):
log.debug("%s: calling plugin '%s' function '%s' with args %s in pool" % (dbg, plugin_name, plugin_function, args))
session = XenAPI.xapi_local()
try:
session.xenapi.login_with_password('root', '')
except:
# ToDo: We ought to raise something else
raise
try:
for host_ref in get_online_host_refs(dbg, session):
log.debug("%s: calling plugin '%s' function '%s' with args %s on host %s" % (dbg, plugin_name, plugin_function, args, host_ref))
resulttext = session.xenapi.host.call_plugin(
host_ref,
plugin_name,
plugin_function,
args)
log.debug("%s: resulttext = %s" % (dbg, resulttext))
if resulttext != "True":
# ToDo: We ought to raise something else
raise xapi.storage.api.volume.Unimplemented(
"Failed to get hostref %s to run %s(%s)" %
(host_ref, plugin_name, plugin_function, args))
except:
# ToDo: We ought to raise something else
raise
finally:
session.xenapi.session.logout()
def call_plugin_on_host(dbg, host_name, plugin_name, plugin_function, args):
log.debug("%s: calling plugin '%s' function '%s' with args %s on %s" % (dbg, plugin_name, plugin_function, args, host_name))
session = XenAPI.xapi_local()
try:
session.xenapi.login_with_password('root', '')
except:
# ToDo: We ought to raise something else
raise
try:
for host_ref in get_online_host_refs(dbg, session):
log.debug("%s: host_ref %s - host_name %s)" % (dbg, session.xenapi.host.get_name_label(host_ref), host_name))
if session.xenapi.host.get_name_label(host_ref) == host_name:
log.debug("%s: calling plugin '%s' function '%s' with args %s on host %s - %s)" % (dbg, plugin_name, plugin_function, args, host_ref, host_name))
resulttext = session.xenapi.host.call_plugin(
host_ref,
plugin_name,
plugin_function,
args)
log.debug("%s: resulttext = %s" % (dbg, resulttext))
if resulttext != "True":
# ToDo: We ought to raise something else
raise xapi.storage.api.volume.Unimplemented(
"Failed to get hostref %s to run %s(%s)" %
(host_ref, plugin_name, plugin_function, args))
except:
# ToDo: We ought to raise something else
raise
finally:
session.xenapi.session.logout()
def suspend_datapath_in_pool(dbg, path):
call_plugin_in_pool(dbg, "suspend-resume-datapath", "suspend_datapath", {'path': path})
def resume_datapath_in_pool(dbg, path):
call_plugin_in_pool(dbg, "suspend-resume-datapath", "resume_datapath", {'path': path})
def suspend_datapath_on_host(dbg, host, path):
call_plugin_on_host(dbg, host, "suspend-resume-datapath", "suspend_datapath", {'path': path})
def resume_datapath_on_host(dbg, host, path):
call_plugin_on_host(dbg, host, "suspend-resume-datapath", "resume_datapath", {'path': path})
def refresh_datapath_on_host(dbg, host, path, new_path):
call_plugin_on_host(dbg, host, "suspend-resume-datapath", "refresh_datapath",
{'path': path, 'new_path': new_path})
|
abaheti/rabbitvcs | refs/heads/master | rabbitvcs/ui/remotes.py | 3 | #
# This is an extension to the Nautilus file manager to allow better
# integration with the Subversion source control system.
#
# Copyright (C) 2006-2008 by Jason Field <jason@jasonfield.com>
# Copyright (C) 2007-2008 by Bruce van der Kooij <brucevdkooij@gmail.com>
# Copyright (C) 2008-2010 by Adam Plumb <adamplumb@gmail.com>
#
# RabbitVCS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RabbitVCS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RabbitVCS; If not, see <http://www.gnu.org/licenses/>.
#
import os
import pygtk
import gobject
import gtk
import pango
from datetime import datetime
import time
from rabbitvcs.ui import InterfaceView
from rabbitvcs.ui.action import GitAction
import rabbitvcs.ui.widget
from rabbitvcs.ui.dialog import DeleteConfirmation
import rabbitvcs.util.helper
import rabbitvcs.vcs
from rabbitvcs import gettext
_ = gettext.gettext
STATE_ADD = 0
STATE_EDIT = 1
class GitRemotes(InterfaceView):
"""
Provides a UI interface to manage items
"""
state = STATE_ADD
def __init__(self, path):
InterfaceView.__init__(self, "manager", "Manager")
self.vcs = rabbitvcs.vcs.VCS()
self.git = self.vcs.git(path)
self.get_widget("Manager").set_title(_("Remote Repository Manager"))
self.get_widget("items_label").set_markup(_("<b>Remote Repositories</b>"))
self.selected_branch = None
self.items_treeview = rabbitvcs.ui.widget.Table(
self.get_widget("items_treeview"),
[gobject.TYPE_STRING, gobject.TYPE_STRING],
[_("Name"), _("Host")],
callbacks={
"mouse-event": self.on_treeview_mouse_event,
"key-event": self.on_treeview_key_event,
"cell-edited": self.on_treeview_cell_edited_event
},
flags={
"sortable": True,
"sort_on": 0,
"editable": [0,1]
}
)
self.load()
def load(self):
self.items_treeview.clear()
self.remote_list = self.git.remote_list()
for remote in self.remote_list:
self.items_treeview.append([remote["name"], remote["host"]])
def save(self, row, column, data):
row = int(row)
if row in self.remote_list:
remote = self.remote_list[int(row)]
name = remote["name"]
if column == 0:
name = data
host = remote["host"]
if column == 1:
host = data
if name != remote["name"]:
self.git.remote_rename(remote["name"], name)
if host != remote["host"]:
self.git.remote_set_url(remote["name"], host)
self.load()
else:
(name, host) = self.items_treeview.get_row(row)
if name and host:
print "Adding"
self.git.remote_add(name, host)
self.load()
def on_add_clicked(self, widget):
self.show_add()
def on_delete_clicked(self, widget):
selected = self.items_treeview.get_selected_row_items(0)
confirm = rabbitvcs.ui.dialog.Confirmation(_("Are you sure you want to delete %s?" % ", ".join(selected)))
result = confirm.run()
if result == gtk.RESPONSE_OK or result == True:
for remote in selected:
self.git.remote_delete(remote)
self.load()
def on_treeview_key_event(self, treeview, data=None):
if gtk.gdk.keyval_name(data.keyval) in ("Up", "Down", "Return"):
self.on_treeview_event(treeview, data)
def on_treeview_mouse_event(self, treeview, data=None):
self.on_treeview_event(treeview, data)
def on_treeview_cell_edited_event(self, cell, row, data, column):
self.items_treeview.set_row_item(row, column, data)
self.save(row, column, data)
def on_treeview_event(self, treeview, data):
selected = self.items_treeview.get_selected_row_items(0)
if len(selected) > 0:
if len(selected) == 1:
self.show_edit(selected[0])
self.get_widget("delete").set_sensitive(True)
def show_add(self):
self.state = STATE_ADD
self.items_treeview.unselect_all()
self.items_treeview.append(["", ""])
self.items_treeview.focus(1, 0)
def show_edit(self, remote_name):
self.state = STATE_EDIT
if __name__ == "__main__":
from rabbitvcs.ui import main
(options, paths) = main(usage="Usage: rabbitvcs branch-manager path")
window = GitRemotes(paths[0])
window.register_gtk_quit()
gtk.main()
|
houzhenggang/hiwifi-openwrt-HC5661-HC5761 | refs/heads/master | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/lib2to3/fixes/fix_apply.py | 315 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for apply().
This converts apply(func, v, k) into (func)(*v, **k)."""
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Call, Comma, parenthesize
class FixApply(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'apply'
trailer<
'('
arglist<
(not argument<NAME '=' any>) func=any ','
(not argument<NAME '=' any>) args=any [','
(not argument<NAME '=' any>) kwds=any] [',']
>
')'
>
>
"""
def transform(self, node, results):
syms = self.syms
assert results
func = results["func"]
args = results["args"]
kwds = results.get("kwds")
prefix = node.prefix
func = func.clone()
if (func.type not in (token.NAME, syms.atom) and
(func.type != syms.power or
func.children[-2].type == token.DOUBLESTAR)):
# Need to parenthesize
func = parenthesize(func)
func.prefix = ""
args = args.clone()
args.prefix = ""
if kwds is not None:
kwds = kwds.clone()
kwds.prefix = ""
l_newargs = [pytree.Leaf(token.STAR, u"*"), args]
if kwds is not None:
l_newargs.extend([Comma(),
pytree.Leaf(token.DOUBLESTAR, u"**"),
kwds])
l_newargs[-2].prefix = u" " # that's the ** token
# XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t)
# can be translated into f(x, y, *t) instead of f(*(x, y) + t)
#new = pytree.Node(syms.power, (func, ArgList(l_newargs)))
return Call(func, l_newargs, prefix=prefix)
|
jm-begon/scikit-learn | refs/heads/master | sklearn/tree/tree.py | 113 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
|
PetePriority/home-assistant | refs/heads/dev | tests/components/upnp/__init__.py | 42 | """Tests for the IGD component."""
|
krvajalmiguelangel/krvajalmiguelangel.github.io | refs/heads/master | node_modules/node-gyp/gyp/gyptest.py | 1752 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
|
dgovil/AdvancedPythonForMaya | refs/heads/master | Scene/characterRoot.py | 1 | # Again, the node we're defining isn't available under the new api, so we must use the old one
import os
from maya import OpenMaya as om
from maya import OpenMayaMPx as ompx
# A custom transform must inherit from the MPxTransform node
class CharacterRoot(ompx.MPxTransform):
kNodeName = 'characterRoot'
kNodeID = om.MTypeId(0x01013)
# A transform can also implement a custom transformation matrix
# This isn't necessary for our example so we'll just use the base class for it
kMatrix = ompx.MPxTransformationMatrix
# The matrix must also have an ID
kMatrixID = om.MTypeId(0x01014)
# Now lets create some place holder attributes
version = om.MObject()
author = om.MObject()
@classmethod
def creator(cls):
return ompx.asMPxPtr(cls())
@staticmethod
def initialize():
# First lets add the version number attribute so we can easily query the rig version number
nAttr = om.MFnNumericAttribute()
CharacterRoot.version = nAttr.create('version', 'ver', om.MFnNumericData.kInt, 0)
nAttr.setStorable(True)
# Then lets store the author of the rig as meta data as well.
# Strings are a generic typed attribute
tAttr = om.MFnTypedAttribute()
# To create the default value we must create it from MFnStringData
sData = om.MFnStringData()
defaultValue = sData.create('Dhruv Govil')
# Finally we make our attirbute
CharacterRoot.author = tAttr.create('author', 'a', om.MFnData.kString, defaultValue)
# Then lets add them to our node
CharacterRoot.addAttribute(CharacterRoot.version)
CharacterRoot.addAttribute(CharacterRoot.author)
def initializePlugin(plugin):
# Add the current directory to the script path so it can find the template we wrote
dirName = 'E:\Projects\AdvancedPythonForMaya\Scene'
# Maya will look for the environment vairable, MAYA_SCRIPT_PATH to look for scripts
MAYA_SCRIPT_PATH = os.getenv('MAYA_SCRIPT_PATH')
if dirName not in MAYA_SCRIPT_PATH:
# os.pathsep gives us the character that separates paths on your specific operating system
MAYA_SCRIPT_PATH += (os.pathsep + dirName)
os.environ['MAYA_SCRIPT_PATH'] = MAYA_SCRIPT_PATH
pluginFn = ompx.MFnPlugin(plugin)
try:
pluginFn.registerTransform(
CharacterRoot.kNodeName, # Name of the node
CharacterRoot.kNodeID, # ID for the node
CharacterRoot.creator, # Creator function
CharacterRoot.initialize, # Initialize function
CharacterRoot.kMatrix, # Matrix object
CharacterRoot.kMatrixID # Matrix ID
)
except:
om.MGlobal.displayError("Failed to register node: %s" % CharacterRoot.kNodeName)
raise
def uninitializePlugin(plugin):
pluginFn = ompx.MFnPlugin(plugin)
try:
pluginFn.deregisterNode(CharacterRoot.kNodeID)
except:
om.MGlobal.displayError('Failed to unregister node: %s' % CharacterRoot.kNodeName)
raise
"""
To load
import maya.cmds as mc
from Scene import characterRoot
try:
mc.delete(mc.ls(type='characterRoot'))
# Force is important
mc.unloadPlugin('characterRoot', force=True)
finally:
mc.loadPlugin(characterRoot.__file__)
mc.createNode('characterRoot', name='dhruv')
"""
|
geBros-master/GBSdk | refs/heads/master | Assets/GB/Editor/post_process.py | 2 | #!/usr/bin/env python
# Open Source mod_pbxproj.py
# https://github.com/kronenthaler/mod-pbxproj
# mode_pbxproj.py mod_pbxproj3.py
# This module can read, modify, and write a .pbxproj file from an Xcode project.
# The file is usually called project.pbxproj and can be found inside the .xcodeproj bundle.
# Last written by nairs77 2014. 12. 04
# *********** SCRIPT CUSTOMIZATION ***********
import os, shutil
import plistlib
import errno
from sys import argv
from mod_pbxproj import XcodeProject
projectPath = argv[1]
frameworkPath = argv[2]
def log(x):
with open('GBiOSBuildProcessLog.txt', 'a') as f:
f.write(x + "\n")
log('------------------------------------------------------------\n')
log(' Start post_process.py \n')
log('------------------------------------------------------------\n')
log('Unity Project Path --> ' + projectPath)
log('FrameWork Path --> ' + frameworkPath)
log('------------------------------------------------------------\n')
log(' 1. Register SNS info \n')
log('------------------------------------------------------------\n')
plist_path = os.path.join(projectPath, 'Info.plist')
plist = plistlib.readPlist(plist_path)
# usage
# 1. Facebook login
# - Add CFBundleURLSchemes / fb{FACEBOOK_APP_ID}
# - Add FacebookAppID
# - Add FacebookDisplayName
# 2. Google Plus
# - Add CFBundleURLSchemes / ${Bundle identifier}
# 3. Twitter
# - Add CFBundleURLSchemes / tw.{$Bundle identifier}
bundle_identifier = plist["CFBundleIdentifier"]
facebookAppID = "379212652436292"
sns_setting = [{
"CFBundleTypeRole" : "Editor",
"CFBundleURLName" : "%s" % (bundle_identifier),
"CFBundleURLSchemes" : ["fb%s" % facebookAppID]
}]
plist["CFBundleURLTypes"] = sns_setting
'''
<key>LSApplicationQueriesSchemes</key>
<array>
<string>fbapi</string>
<string>fbapi20130214</string>
<string>fbapi20130410</string>
<string>fbapi20130702</string>
<string>fbapi20131010</string>
<string>fbapi20131219</string>
<string>fbapi20140410</string>
<string>fbapi20140116</string>
<string>fbapi20150313</string>
<string>fbapi20150629</string>
<string>fbapi20160328</string>
<string>fbauth</string>
<string>fbauth2</string>
<string>fb-messenger-api20140430</string>
</array>
'''
fb_schmes = [
"fbauth", "fbauth2", "fbapi",
]
plist["LSApplicationQueriesSchemes"] = fb_schmes
if len(facebookAppID) > 0:
plist["FacebookAppID"] = facebookAppID
plist["AppLovinSdkKey"] = "wsGT89gFuGFIZrLsp6MrS_TQaRU_HuBCkSftbL6UcMnAB61_DOqgOI5zkaz0S9CAbt2CC8gqUS_gZ0fnPURonX"
plistlib.writePlist(plist, plist_path)
log('------------------------------------------------------------\n')
log(' 2. Add library (Framework) in Project \n')
log('------------------------------------------------------------\n')
project = XcodeProject.Load(projectPath + '/Unity-iPhone.xcodeproj/project.pbxproj')
log('Loaded project.pbxproj.')
result = project.add_file(frameworkPath + 'GBSdk.framework', tree='SDKROOT')
log('Added GBSdk SDK Framework')
project.add_file(frameworkPath + 'GoogleMobileAds.framework', tree='SDKROOT')
project.add_file(frameworkPath + 'UnityAds.framework', tree='SDKROOT')
project.add_file(frameworkPath + 'AppLovinSDK.framework', tree='SDKROOT')
project.add_file(frameworkPath + 'Bolts.framework', tree='SDKROOT')
project.add_file(frameworkPath + 'FBSDKCoreKit.framework', tree='SDKROOT')
project.add_file(frameworkPath + 'FBSDKLoginKit.framework', tree='SDKROOT')
project.add_file(frameworkPath + 'VungleSDK.framework', tree='SDKROOT')
project.add_file('System/Library/Frameworks/AdSupport.framework', tree='SDKROOT')
project.add_framework_search_paths(frameworkPath)
log('------------------------------------------------------------\n')
log(' 2-1. iOS9 Delete / Changed Library path \n')
log('------------------------------------------------------------\n')
project.add_file('usr/lib/libz.tbd', tree='SDKROOT')
project.add_file('usr/lib/libsqlite3.tbd', tree='SDKROOT')
log('------------------------------------------------------------\n')
log(' 3. Set Flag in Project Build Setting \n')
log('------------------------------------------------------------\n')
project.add_other_ldflags('-ObjC')
project.add_single_valued_flag('ENABLE_BITCODE', 'NO')
project.add_single_valued_flag('CLANG_ENABLE_MODULES', 'YES')
project.save()
log('------------------------------\n'
' Saved Project. \n'
'------------------------------')
|
slivingston/flymovieformat | refs/heads/master | motmot/FlyMovieFormat/playfmf.py | 1 | #!/usr/bin/env python
import sys, time, os, gc, datetime, warnings
from optparse import OptionParser
import pkg_resources # from setuptools
matplotlibrc = pkg_resources.resource_filename(__name__,"matplotlibrc") # trigger extraction
matplotlibrc_dir = os.path.split(matplotlibrc)[0]
os.environ['MATPLOTLIBRC'] = matplotlibrc_dir
RESFILE = pkg_resources.resource_filename(__name__,"playfmf.xrc") # trigger extraction
# py2exe stuff done
import FlyMovieFormat
import PIL.Image as Image
import motmot.imops.imops as imops
import wx
import wx.xrc as xrc
import numpy
import numpy as np
# force use of numpy by matplotlib(FlyMovieFormat uses numpy)
import matplotlib
matplotlib.use('WXAgg')
import matplotlib as mpl
import matplotlib.cm as cm
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.figure import Figure
import matplotlib.colors as mcolors
import matplotlib.ticker
RES = xrc.EmptyXmlResource()
RES.LoadFromString(open(RESFILE).read())
bpp = FlyMovieFormat.format2bpp_func
_thresh = [(0.0, 0.0, 0.0),
(1.0/255.0, 1.0, 1.0),
(1.0, 1.0, 1.0)]
_cm_threshold_binary_data = {
'red': _thresh,
'green': _thresh,
'blue': _thresh,
}
LUTSIZE = mpl.rcParams['image.lut']
threshold_cmap = mcolors.LinearSegmentedColormap('threshold',
_cm_threshold_binary_data,
LUTSIZE)
class PlotPanel(wx.Panel):
def __init__(self, parent,statbar=None):
wx.Panel.__init__(self, parent, -1)
self.fig = Figure((5,4), 75)
self.canvas = FigureCanvasWxAgg(self, -1, self.fig)
if statbar is not None:
self.toolbar = NavigationToolbar2Wx(self.canvas) #matplotlib toolbar
self.toolbar.set_status_bar(statbar)
self.toolbar.Realize()
else:
self.toolbar = None
#self.canvas.mpl_connect('button_press_event',self._onButton)
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(self.canvas, 1, wx.LEFT|wx.TOP|wx.GROW)
if self.toolbar is not None:
# On Windows platform, default window size is incorrect, so set
# toolbar width to figure width.
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
# By adding toolbar in sizer, we are able to put it at the bottom
# of the frame - so appearance is closer to GTK version.
# As noted above, doesn't work for Mac.
self.toolbar.SetSize(wx.Size(fw, th))
sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND)
self.SetSizer(sizer)
#self.Fit()
#self.Update()
## def _onButton(self,mouse_event):
## if mouse_event.inaxes:
## print "%.2f, %.2f"%(mouse_event.xdata, mouse_event.ydata)
def _convert_to_displayable(self,frame):
if self.format in ['RGB8','ARGB8','YUV411','YUV422','RGB32f']:
frame = imops.to_rgb8(self.format,frame)
elif self.format in ['MONO8','MONO16']:
frame = imops.to_mono8(self.format,frame)
elif (self.format.startswith('MONO8:') or
self.format.startswith('MONO32f:')):
# bayer
frame = imops.to_rgb8(self.format,frame)
else:
warnings.warn('unknown format "%s" conversion to displayable'%
self.format)
#frame = self.convert_to_matplotlib(frame)
return frame
def init_plot_data(self,frame,format):
self.axes = self.fig.add_subplot(111)
a = self.axes # shorthand
self.format = format
frame = self._convert_to_displayable(frame)
extent = 0, frame.shape[1]-1, frame.shape[0]-1, 0
self.im = a.imshow( frame,
origin='upper',
interpolation='nearest',
extent=extent,
cmap=cm.pink,
)
self.cbar = self.fig.colorbar(self.im)
self.im.set_clim(0,255)
a.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
a.yaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
a.fmt_xdata = str
a.fmt_ydata = str
if 0:
# flipLR (x) for display
xlim = a.get_xlim()
a.set_xlim((xlim[1],xlim[0]))
if self.toolbar is not None:
self.toolbar.update()
def GetToolBar(self):
# You will need to override GetToolBar if you are using an
# unmanaged toolbar in your frame
return self.toolbar
def onEraseBackground(self, evt):
# this is supposed to prevent redraw flicker on some X servers...
pass
def set_array(self,frame):
frame = self._convert_to_displayable(frame)
self.im.set_array(frame)
self.canvas.draw()
class MyApp(wx.App):
def OnInit(self):
self.res = RES
# main frame and panel ---------
self.frame = self.res.LoadFrame(None,"MainFrame")
statbar = matplotlib.backends.backend_wx.StatusBarWx(self.frame)
self.frame.SetStatusBar(statbar)
self.panel = xrc.XRCCTRL(self.frame,"MainPanel")
# menubar ----------------------
menubar = self.res.LoadMenuBarOnFrame(self.frame,"MENUBAR")
self.frame_offset = 0
wx.EVT_MENU(self.frame, xrc.XRCID("set_frame_offset"),
self.OnSetFrameOffset)
wx.EVT_MENU(self.frame, xrc.XRCID("export_smaller_movie"),
self.OnExportSmallerMovie)
wx.EVT_MENU(self.frame, xrc.XRCID("quit_menuitem"), self.OnQuit)
colormap_menu = wx.Menu()
self.cmap_ids={}
for cmap in 'gray','jet','pink','binary threshold':
id = wx.NewId()
colormap_menu.Append(id, cmap)
wx.EVT_MENU(self.frame, id, self.OnColormapMenu)
self.cmap_ids[id]=cmap
menubar.Append(colormap_menu,"&Colormap")
# matplotlib panel -------------
# container for matplotlib panel (I like to make a container
# panel for our panel so I know where it'll go when in XRCed.)
self.plot_container = xrc.XRCCTRL(self.frame,"plot_container_panel")
sizer = wx.BoxSizer(wx.VERTICAL)
# matplotlib panel itself
self.plotpanel = PlotPanel(self.plot_container,statbar=statbar)
label = xrc.XRCCTRL(self.frame,"time_abs_label")
#label.SetLabel('%.3f (sec)'%(timestamp,))
# wx boilerplate
sizer.Add(self.plotpanel, 1, wx.EXPAND)
self.plot_container.SetSizer(sizer)
# slider ------------------
slider = xrc.XRCCTRL(self.frame,"frame_slider")
wx.EVT_COMMAND_SCROLL(slider, slider.GetId(), self.OnScroll)
self.slider = slider
# final setup ------------------
sizer = self.panel.GetSizer()
self.frame.SetSize((800,800))
self.frame.Show(1)
self.SetTopWindow(self.frame)
self._load_plugins()
return True
def _load_plugins(self):
PluginClasses = []
pkg_env = pkg_resources.Environment()
for name in pkg_env:
egg = pkg_env[name][0]
modules = []
for name in egg.get_entry_map('motmot.FlyMovieFormat.exporter_plugins'):
egg.activate()
entry_point = egg.get_entry_info('motmot.FlyMovieFormat.exporter_plugins', name)
try:
PluginClass = entry_point.load()
except Exception,x:
if int(os.environ.get('PLAYFMF_RAISE_ERRORS','0')):
raise x
else:
import warnings
warnings.warn('could not load plugin %s: %s'%(str(entry_point),str(x)))
continue
PluginClasses.append( PluginClass )
modules.append(entry_point.module_name)
# make instances of plugins
self.plugins = [PluginClass() for PluginClass in PluginClasses]
def OnSetFrameOffset(self, event):
dlg=wx.TextEntryDialog(self.frame, 'Frame offset',
'Set frame offset',str(self.frame_offset))
try:
if dlg.ShowModal() == wx.ID_OK:
new_frame_offset = int(dlg.GetValue())
self.update_frame_offset(new_frame_offset)
finally:
dlg.Destroy()
def update_frame_offset(self, new_frame_offset):
frame_number = self.slider.GetValue() - self.frame_offset
self.frame_offset = new_frame_offset
slider = self.slider
slider.SetRange( self.frame_offset+0, max(1,self.frame_offset+self.n_frames-1 ))
slider.SetValue( self.frame_offset+frame_number )
def OnColormapMenu(self, event):
cmap_name = self.cmap_ids[event.GetId()] # e.g. 'pink', 'jet', etc.
if cmap_name == 'binary threshold':
cmap = threshold_cmap
else:
cmap = getattr(cm,cmap_name)
self.plotpanel.im.set_cmap(cmap)
# update display
self.OnScroll(None)
def OnNewMovie(self,flymovie,
corruption_fix=False,
force_format=None,
):
if corruption_fix:
self.allow_partial_frames=True
else:
self.allow_partial_frames=False
self.axes = self.plotpanel.fig.add_subplot(111) # not really new, just gets axes
a = self.axes
a.set_title('%s (%s)'%(flymovie.filename,flymovie.get_format()))
self.fly_movie = flymovie
self.n_frames = self.fly_movie.get_n_frames()
frame,timestamp = self.fly_movie.get_frame(
0, allow_partial_frames=self.allow_partial_frames)
if corruption_fix:
test_frame = self.n_frames
while 1:
test_frame -= 1
try:
self.fly_movie.get_frame(
test_frame,
allow_partial_frames=True)
except FlyMovieFormat.NoMoreFramesException:
print >> sys.stderr,'no frame %d, shortening movie'%test_frame
else:
# if we get here, it means we had a good frame
self.n_frames = test_frame+1
break
self.frame_shape = frame.shape
self.first_timestamp=timestamp
frame_number = 0
slider = self.slider
slider.SetRange( self.frame_offset+0, max(self.frame_offset+self.n_frames-1,1) )
slider.SetValue( self.frame_offset+frame_number )
# window title
self.frame.SetTitle('playfmf: %s'%(self.fly_movie.filename,))
if force_format is None:
self.format = self.fly_movie.get_format()
else:
self.format = force_format
self.width_height = (self.fly_movie.get_width(),
self.fly_movie.get_height())
self.plotpanel.init_plot_data(frame,self.format)
self.plot_container.Layout()
self.OnScroll(None)
def OnScroll(self,event):
frame_number = self.slider.GetValue() - self.frame_offset
try:
frame,timestamp = self.fly_movie.get_frame(
frame_number,
allow_partial_frames=self.allow_partial_frames)
except FlyMovieFormat.NoMoreFramesException:
frame_number = 0 - self.frame_offset
self.slider.SetValue(frame_number)
frame,timestamp = self.fly_movie.get_frame(
frame_number,
allow_partial_frames=self.allow_partial_frames)
self.plotpanel.set_array(frame)
label = xrc.XRCCTRL(self.frame,"time_rel_label")
label.SetLabel('%.1f (msec)'%((timestamp-self.first_timestamp)*1000.0,))
label = xrc.XRCCTRL(self.frame,"time_abs_label")
try:
my_datetime = datetime.datetime.fromtimestamp(timestamp)
label.SetLabel('%.3f (sec) %s'%(timestamp, my_datetime.isoformat()))
except ValueError,err:
label.SetLabel('%.3f (sec)'%(timestamp,))
def OnQuit(self, event):
self.frame.Close(True)
def OnExportSmallerMovie(self, event):
def OnCancelExportSmallerMovie(event):
dlg.Close(True)
def OnSaveExportSmallerMovie(event):
xmin = int(xrc.XRCCTRL(dlg,"xmin_textctrl").GetValue())
xmax = int(xrc.XRCCTRL(dlg,"xmax_textctrl").GetValue())
ymin = int(xrc.XRCCTRL(dlg,"ymin_textctrl").GetValue())
ymax = int(xrc.XRCCTRL(dlg,"ymax_textctrl").GetValue())
start = int(xrc.XRCCTRL(dlg,"start_frame").GetValue())
stop = int(xrc.XRCCTRL(dlg,"stop_frame").GetValue())
interval = int(xrc.XRCCTRL(dlg,"interval_frames").GetValue())
flipLR = xrc.XRCCTRL(dlg,"flipLR").GetValue()
description = xrc.XRCCTRL(dlg,"movie_format_choice").GetStringSelection()
assert xmin<=xmax
assert ymin<=ymax
for plugin in self.plugins:
if description == plugin.get_description():
break
assert description == plugin.get_description()
saver = plugin.get_saver(dlg,self.format,self.width_height)
dlg.Close()
for i in range(start,stop+1,interval):
orig_frame,timestamp = self.fly_movie.get_frame(
i,
allow_partial_frames=self.allow_partial_frames)
if orig_frame.dtype == np.uint8:
# usual case: frame encoded as uint8
crop_xmin = xmin*bpp(self.format)//8
crop_xmax = (xmax+1)*bpp(self.format)//8
else:
# sometimes (e.g. YUV422) frame has alternate dtype
crop_xmin = xmin
crop_xmax = (xmax+1)
save_frame = orig_frame[ymin:ymax+1,crop_xmin:crop_xmax]
if flipLR:
save_frame = save_frame[:,::-1]
saver.save( save_frame, timestamp )
saver.close()
dlg = self.res.LoadDialog(self.frame,"EXPORT_DIALOG")
format_choice_ctrl = xrc.XRCCTRL(dlg,"movie_format_choice")
for plugin in self.plugins:
description = plugin.get_description()
format_choice_ctrl.Append(description)
xrc.XRCCTRL(dlg,"xmax_textctrl").SetValue(str(self.width_height[0]-1))
xrc.XRCCTRL(dlg,"ymax_textctrl").SetValue(str(self.width_height[1]-1))
xrc.XRCCTRL(dlg,"stop_frame").SetValue(str(self.n_frames-1))
cancel_button=xrc.XRCCTRL(dlg,"cancel_button")
wx.EVT_BUTTON(dlg, cancel_button.GetId(),OnCancelExportSmallerMovie)
save_button=xrc.XRCCTRL(dlg,"save_button")
wx.EVT_BUTTON(dlg, save_button.GetId(),OnSaveExportSmallerMovie)
try:
dlg.ShowModal()
finally:
dlg.Destroy()
def main():
usage = '%prog FILE [options]'
parser = OptionParser(usage)
parser.add_option("--disable-corruption-fix",
action='store_false', default=True,
dest='corruption_fix',
help="disable automatic fixing of corrupted .fmf files")
parser.add_option("--frame-offset", type="int",
default=0,
help="add an integer offset to frame numbers")
parser.add_option("--format", type="string", help="force the movie coding")
(options, args) = parser.parse_args()
if len(args)<1:
parser.print_help()
return
filename = args[0]
if (sys.platform.startswith('win') or
sys.platform.startswith('darwin')):
kws = dict(redirect=True,filename='playfmf.log')
else:
kws = {}
app = MyApp(**kws)
flymovie = FlyMovieFormat.FlyMovie(filename)
app.OnNewMovie(flymovie,
corruption_fix=options.corruption_fix,
force_format=options.format,
)
app.update_frame_offset(options.frame_offset)
app.MainLoop()
if __name__ == '__main__':
main()
|
Medigate/cutiuta-server | refs/heads/master | cutiuta-server/env/lib/python3.4/site-packages/wheel/__main__.py | 565 | """
Wheel command line tool (enable python -m wheel syntax)
"""
import sys
def main(): # needed for console script
if __package__ == '':
# To be able to run 'python wheel-0.9.whl/wheel':
import os.path
path = os.path.dirname(os.path.dirname(__file__))
sys.path[0:0] = [path]
import wheel.tool
sys.exit(wheel.tool.main())
if __name__ == "__main__":
sys.exit(main())
|
akiss77/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/wptserve/wptserve/request.py | 44 | import base64
import cgi
import Cookie
import StringIO
import tempfile
from six.moves.urllib.parse import parse_qsl, urlsplit
from . import stash
from .utils import HTTPException
missing = object()
class Server(object):
"""Data about the server environment
.. attribute:: config
Environment configuration information with information about the
various servers running, their hostnames and ports.
.. attribute:: stash
Stash object holding state stored on the server between requests.
"""
config = None
def __init__(self, request):
self._stash = None
self._request = request
@property
def stash(self):
if self._stash is None:
address, authkey = stash.load_env_config()
self._stash = stash.Stash(self._request.url_parts.path, address, authkey)
return self._stash
class InputFile(object):
max_buffer_size = 1024*1024
def __init__(self, rfile, length):
"""File-like object used to provide a seekable view of request body data"""
self._file = rfile
self.length = length
self._file_position = 0
if length > self.max_buffer_size:
self._buf = tempfile.TemporaryFile(mode="rw+b")
else:
self._buf = StringIO.StringIO()
@property
def _buf_position(self):
rv = self._buf.tell()
assert rv <= self._file_position
return rv
def read(self, bytes=-1):
assert self._buf_position <= self._file_position
if bytes < 0:
bytes = self.length - self._buf_position
bytes_remaining = min(bytes, self.length - self._buf_position)
if bytes_remaining == 0:
return ""
if self._buf_position != self._file_position:
buf_bytes = min(bytes_remaining, self._file_position - self._buf_position)
old_data = self._buf.read(buf_bytes)
bytes_remaining -= buf_bytes
else:
old_data = ""
assert self._buf_position == self._file_position, (
"Before reading buffer position (%i) didn't match file position (%i)" %
(self._buf_position, self._file_position))
new_data = self._file.read(bytes_remaining)
self._buf.write(new_data)
self._file_position += bytes_remaining
assert self._buf_position == self._file_position, (
"After reading buffer position (%i) didn't match file position (%i)" %
(self._buf_position, self._file_position))
return old_data + new_data
def tell(self):
return self._buf_position
def seek(self, offset):
if offset > self.length or offset < 0:
raise ValueError
if offset <= self._file_position:
self._buf.seek(offset)
else:
self.read(offset - self._file_position)
def readline(self, max_bytes=None):
if max_bytes is None:
max_bytes = self.length - self._buf_position
if self._buf_position < self._file_position:
data = self._buf.readline(max_bytes)
if data.endswith("\n") or len(data) == max_bytes:
return data
else:
data = ""
assert self._buf_position == self._file_position
initial_position = self._file_position
found = False
buf = []
max_bytes -= len(data)
while not found:
readahead = self.read(min(2, max_bytes))
max_bytes -= len(readahead)
for i, c in enumerate(readahead):
if c == "\n":
buf.append(readahead[:i+1])
found = True
break
if not found:
buf.append(readahead)
if not readahead or not max_bytes:
break
new_data = "".join(buf)
data += new_data
self.seek(initial_position + len(new_data))
return data
def readlines(self):
rv = []
while True:
data = self.readline()
if data:
rv.append(data)
else:
break
return rv
def next(self):
data = self.readline()
if data:
return data
else:
raise StopIteration
def __iter__(self):
return self
class Request(object):
"""Object representing a HTTP request.
.. attribute:: doc_root
The local directory to use as a base when resolving paths
.. attribute:: route_match
Regexp match object from matching the request path to the route
selected for the request.
.. attribute:: protocol_version
HTTP version specified in the request.
.. attribute:: method
HTTP method in the request.
.. attribute:: request_path
Request path as it appears in the HTTP request.
.. attribute:: url_base
The prefix part of the path; typically / unless the handler has a url_base set
.. attribute:: url
Absolute URL for the request.
.. attribute:: url_parts
Parts of the requested URL as obtained by urlparse.urlsplit(path)
.. attribute:: request_line
Raw request line
.. attribute:: headers
RequestHeaders object providing a dictionary-like representation of
the request headers.
.. attribute:: raw_headers.
Dictionary of non-normalized request headers.
.. attribute:: body
Request body as a string
.. attribute:: raw_input
File-like object representing the body of the request.
.. attribute:: GET
MultiDict representing the parameters supplied with the request.
Note that these may be present on non-GET requests; the name is
chosen to be familiar to users of other systems such as PHP.
.. attribute:: POST
MultiDict representing the request body parameters. Most parameters
are present as string values, but file uploads have file-like
values.
.. attribute:: cookies
Cookies object representing cookies sent with the request with a
dictionary-like interface.
.. attribute:: auth
Object with username and password properties representing any
credentials supplied using HTTP authentication.
.. attribute:: server
Server object containing information about the server environment.
"""
def __init__(self, request_handler):
self.doc_root = request_handler.server.router.doc_root
self.route_match = None # Set by the router
self.protocol_version = request_handler.protocol_version
self.method = request_handler.command
scheme = request_handler.server.scheme
host = request_handler.headers.get("Host")
port = request_handler.server.server_address[1]
if host is None:
host = request_handler.server.server_address[0]
else:
if ":" in host:
host, port = host.split(":", 1)
self.request_path = request_handler.path
self.url_base = "/"
if self.request_path.startswith(scheme + "://"):
self.url = request_handler.path
else:
self.url = "%s://%s:%s%s" % (scheme,
host,
port,
self.request_path)
self.url_parts = urlsplit(self.url)
self.raw_headers = request_handler.headers
self.request_line = request_handler.raw_requestline
self._headers = None
self.raw_input = InputFile(request_handler.rfile,
int(self.headers.get("Content-Length", 0)))
self._body = None
self._GET = None
self._POST = None
self._cookies = None
self._auth = None
self.server = Server(self)
def __repr__(self):
return "<Request %s %s>" % (self.method, self.url)
@property
def GET(self):
if self._GET is None:
params = parse_qsl(self.url_parts.query, keep_blank_values=True)
self._GET = MultiDict()
for key, value in params:
self._GET.add(key, value)
return self._GET
@property
def POST(self):
if self._POST is None:
#Work out the post parameters
pos = self.raw_input.tell()
self.raw_input.seek(0)
fs = cgi.FieldStorage(fp=self.raw_input,
environ={"REQUEST_METHOD": self.method},
headers=self.headers,
keep_blank_values=True)
self._POST = MultiDict.from_field_storage(fs)
self.raw_input.seek(pos)
return self._POST
@property
def cookies(self):
if self._cookies is None:
parser = Cookie.BaseCookie()
cookie_headers = self.headers.get("cookie", "")
parser.load(cookie_headers)
cookies = Cookies()
for key, value in parser.iteritems():
cookies[key] = CookieValue(value)
self._cookies = cookies
return self._cookies
@property
def headers(self):
if self._headers is None:
self._headers = RequestHeaders(self.raw_headers)
return self._headers
@property
def body(self):
if self._body is None:
pos = self.raw_input.tell()
self.raw_input.seek(0)
self._body = self.raw_input.read()
self.raw_input.seek(pos)
return self._body
@property
def auth(self):
if self._auth is None:
self._auth = Authentication(self.headers)
return self._auth
class RequestHeaders(dict):
"""Dictionary-like API for accessing request headers."""
def __init__(self, items):
for key, value in zip(items.keys(), items.values()):
key = key.lower()
if key in self:
self[key].append(value)
else:
dict.__setitem__(self, key, [value])
def __getitem__(self, key):
"""Get all headers of a certain (case-insensitive) name. If there is
more than one, the values are returned comma separated"""
values = dict.__getitem__(self, key.lower())
if len(values) == 1:
return values[0]
else:
return ", ".join(values)
def __setitem__(self, name, value):
raise Exception
def get(self, key, default=None):
"""Get a string representing all headers with a particular value,
with multiple headers separated by a comma. If no header is found
return a default value
:param key: The header name to look up (case-insensitive)
:param default: The value to return in the case of no match
"""
try:
return self[key]
except KeyError:
return default
def get_list(self, key, default=missing):
"""Get all the header values for a particular field name as
a list"""
try:
return dict.__getitem__(self, key.lower())
except KeyError:
if default is not missing:
return default
else:
raise
def __contains__(self, key):
return dict.__contains__(self, key.lower())
def iteritems(self):
for item in self:
yield item, self[item]
def itervalues(self):
for item in self:
yield self[item]
class CookieValue(object):
"""Representation of cookies.
Note that cookies are considered read-only and the string value
of the cookie will not change if you update the field values.
However this is not enforced.
.. attribute:: key
The name of the cookie.
.. attribute:: value
The value of the cookie
.. attribute:: expires
The expiry date of the cookie
.. attribute:: path
The path of the cookie
.. attribute:: comment
The comment of the cookie.
.. attribute:: domain
The domain with which the cookie is associated
.. attribute:: max_age
The max-age value of the cookie.
.. attribute:: secure
Whether the cookie is marked as secure
.. attribute:: httponly
Whether the cookie is marked as httponly
"""
def __init__(self, morsel):
self.key = morsel.key
self.value = morsel.value
for attr in ["expires", "path",
"comment", "domain", "max-age",
"secure", "version", "httponly"]:
setattr(self, attr.replace("-", "_"), morsel[attr])
self._str = morsel.OutputString()
def __str__(self):
return self._str
def __repr__(self):
return self._str
def __eq__(self, other):
"""Equality comparison for cookies. Compares to other cookies
based on value alone and on non-cookies based on the equality
of self.value with the other object so that a cookie with value
"ham" compares equal to the string "ham"
"""
if hasattr(other, "value"):
return self.value == other.value
return self.value == other
class MultiDict(dict):
"""Dictionary type that holds multiple values for each
key"""
#TODO: this should perhaps also order the keys
def __init__(self):
pass
def __setitem__(self, name, value):
dict.__setitem__(self, name, [value])
def add(self, name, value):
if name in self:
dict.__getitem__(self, name).append(value)
else:
dict.__setitem__(self, name, [value])
def __getitem__(self, key):
"""Get the first value with a given key"""
#TODO: should this instead be the last value?
return self.first(key)
def first(self, key, default=missing):
"""Get the first value with a given key
:param key: The key to lookup
:param default: The default to return if key is
not found (throws if nothing is
specified)
"""
if key in self and dict.__getitem__(self, key):
return dict.__getitem__(self, key)[0]
elif default is not missing:
return default
raise KeyError
def last(self, key, default=missing):
"""Get the last value with a given key
:param key: The key to lookup
:param default: The default to return if key is
not found (throws if nothing is
specified)
"""
if key in self and dict.__getitem__(self, key):
return dict.__getitem__(self, key)[-1]
elif default is not missing:
return default
raise KeyError
def get_list(self, key):
"""Get all values with a given key as a list
:param key: The key to lookup
"""
return dict.__getitem__(self, key)
@classmethod
def from_field_storage(cls, fs):
self = cls()
if fs.list is None:
return self
for key in fs:
values = fs[key]
if not isinstance(values, list):
values = [values]
for value in values:
if value.filename:
value = value
else:
value = value.value
self.add(key, value)
return self
class Cookies(MultiDict):
"""MultiDict specialised for Cookie values"""
def __init__(self):
pass
def __getitem__(self, key):
return self.last(key)
class Authentication(object):
"""Object for dealing with HTTP Authentication
.. attribute:: username
The username supplied in the HTTP Authorization
header, or None
.. attribute:: password
The password supplied in the HTTP Authorization
header, or None
"""
def __init__(self, headers):
self.username = None
self.password = None
auth_schemes = {"Basic": self.decode_basic}
if "authorization" in headers:
header = headers.get("authorization")
auth_type, data = header.split(" ", 1)
if auth_type in auth_schemes:
self.username, self.password = auth_schemes[auth_type](data)
else:
raise HTTPException(400, "Unsupported authentication scheme %s" % auth_type)
def decode_basic(self, data):
decoded_data = base64.decodestring(data)
return decoded_data.split(":", 1)
|
JavaRabbit/CS496_capstone | refs/heads/master | appengine/standard/localtesting/task_queue_test.py | 9 | # Copyright 2015 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START taskqueue]
import operator
import os
import unittest
from google.appengine.api import taskqueue
from google.appengine.ext import deferred
from google.appengine.ext import testbed
class TaskQueueTestCase(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
# root_path must be set the the location of queue.yaml.
# Otherwise, only the 'default' queue will be available.
self.testbed.init_taskqueue_stub(
root_path=os.path.join(os.path.dirname(__file__), 'resources'))
self.taskqueue_stub = self.testbed.get_stub(
testbed.TASKQUEUE_SERVICE_NAME)
def tearDown(self):
self.testbed.deactivate()
def testTaskAddedToQueue(self):
taskqueue.Task(name='my_task', url='/url/of/my/task/').add()
tasks = self.taskqueue_stub.get_filtered_tasks()
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].name, 'my_task')
# [END taskqueue]
# [START filtering]
def testFiltering(self):
taskqueue.Task(name='task_one', url='/url/of/task/1/').add('queue-1')
taskqueue.Task(name='task_two', url='/url/of/task/2/').add('queue-2')
# All tasks
tasks = self.taskqueue_stub.get_filtered_tasks()
self.assertEqual(len(tasks), 2)
# Filter by name
tasks = self.taskqueue_stub.get_filtered_tasks(name='task_one')
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].name, 'task_one')
# Filter by URL
tasks = self.taskqueue_stub.get_filtered_tasks(url='/url/of/task/1/')
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].name, 'task_one')
# Filter by queue
tasks = self.taskqueue_stub.get_filtered_tasks(queue_names='queue-1')
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0].name, 'task_one')
# Multiple queues
tasks = self.taskqueue_stub.get_filtered_tasks(
queue_names=['queue-1', 'queue-2'])
self.assertEqual(len(tasks), 2)
# [END filtering]
# [START deferred]
def testTaskAddedByDeferred(self):
deferred.defer(operator.add, 1, 2)
tasks = self.taskqueue_stub.get_filtered_tasks()
self.assertEqual(len(tasks), 1)
result = deferred.run(tasks[0].payload)
self.assertEqual(result, 3)
# [END deferred]
if __name__ == '__main__':
unittest.main()
|
risicle/django | refs/heads/master | tests/auth_tests/test_basic.py | 328 | from __future__ import unicode_literals
from django.apps import apps
from django.contrib.auth import get_user_model
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.auth.tests.custom_user import CustomUser
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.test import TestCase, override_settings
from django.test.signals import setting_changed
from django.utils import translation
@receiver(setting_changed)
def user_model_swapped(**kwargs):
if kwargs['setting'] == 'AUTH_USER_MODEL':
from django.db.models.manager import ensure_default_manager
# Reset User manager
setattr(User, 'objects', User._default_manager)
ensure_default_manager(User)
apps.clear_cache()
class BasicTestCase(TestCase):
def test_user(self):
"Check that users can be created and can set their password"
u = User.objects.create_user('testuser', 'test@example.com', 'testpw')
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password('bad'))
self.assertTrue(u.check_password('testpw'))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password('testpw'))
self.assertFalse(u.has_usable_password())
u.set_password('testpw')
self.assertTrue(u.check_password('testpw'))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check username getter
self.assertEqual(u.get_username(), 'testuser')
# Check authentication/permissions
self.assertTrue(u.is_authenticated())
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user('testuser2', 'test2@example.com')
self.assertFalse(u2.has_usable_password())
def test_user_no_email(self):
"Check that users can be created without an email"
u = User.objects.create_user('testuser1')
self.assertEqual(u.email, '')
u2 = User.objects.create_user('testuser2', email='')
self.assertEqual(u2.email, '')
u3 = User.objects.create_user('testuser3', email=None)
self.assertEqual(u3.email, '')
def test_anonymous_user(self):
"Check the properties of the anonymous user"
a = AnonymousUser()
self.assertEqual(a.pk, None)
self.assertEqual(a.username, '')
self.assertEqual(a.get_username(), '')
self.assertFalse(a.is_authenticated())
self.assertFalse(a.is_staff)
self.assertFalse(a.is_active)
self.assertFalse(a.is_superuser)
self.assertEqual(a.groups.all().count(), 0)
self.assertEqual(a.user_permissions.all().count(), 0)
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser('super', 'super@example.com', 'super')
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_get_user_model(self):
"The current user model can be retrieved"
self.assertEqual(get_user_model(), User)
@override_settings(AUTH_USER_MODEL='auth.CustomUser')
def test_swappable_user(self):
"The current user model can be swapped out for another"
self.assertEqual(get_user_model(), CustomUser)
with self.assertRaises(AttributeError):
User.objects.all()
@override_settings(AUTH_USER_MODEL='badsetting')
def test_swappable_user_bad_setting(self):
"The alternate user setting must point to something in the format app.model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_swappable_user_nonexistent_model(self):
"The current user model must point to an installed model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
def test_user_verbose_names_translatable(self):
"Default User model verbose names are translatable (#19945)"
with translation.override('en'):
self.assertEqual(User._meta.verbose_name, 'user')
self.assertEqual(User._meta.verbose_name_plural, 'users')
with translation.override('es'):
self.assertEqual(User._meta.verbose_name, 'usuario')
self.assertEqual(User._meta.verbose_name_plural, 'usuarios')
|
Reagankm/KnockKnock | refs/heads/master | venv/lib/python3.4/site-packages/nltk/test/unit/test_seekable_unicode_stream_reader.py | 27 | # -*- coding: utf-8 -*-
"""
The following test performs a random series of reads, seeks, and
tells, and checks that the results are consistent.
"""
from __future__ import absolute_import, unicode_literals
import random
import functools
from io import BytesIO
from nltk.corpus.reader import SeekableUnicodeStreamReader
def check_reader(unicode_string, encoding, n=1000):
bytestr = unicode_string.encode(encoding)
strlen = len(unicode_string)
stream = BytesIO(bytestr)
reader = SeekableUnicodeStreamReader(stream, encoding)
# Find all character positions
chars = []
while True:
pos = reader.tell()
chars.append( (pos, reader.read(1)) )
if chars[-1][1] == '': break
# Find all strings
strings = dict( (pos,'') for (pos,c) in chars )
for pos1, char in chars:
for pos2, _ in chars:
if pos2 <= pos1:
strings[pos2] += char
while True:
op = random.choice('tsrr')
# Check our position?
if op == 't': # tell
reader.tell()
# Perform a seek?
if op == 's': # seek
new_pos = random.choice([p for (p,c) in chars])
reader.seek(new_pos)
# Perform a read?
if op == 'r': # read
if random.random() < .3: pos = reader.tell()
else: pos = None
if random.random() < .2: size = None
elif random.random() < .8:
size = random.randint(0, int(strlen/6))
else: size = random.randint(0, strlen+20)
if random.random() < .8:
s = reader.read(size)
else:
s = reader.readline(size)
# check that everything's consistent
if pos is not None:
assert pos in strings
assert strings[pos].startswith(s)
n -= 1
if n == 0:
return 'passed'
#Call the randomized test function `check_reader` with a variety of
#input strings and encodings.
ENCODINGS = ['ascii', 'latin1', 'greek', 'hebrew', 'utf-16', 'utf-8']
STRINGS = [
"""
This is a test file.
It is fairly short.
""",
"This file can be encoded with latin1. \x83",
"""\
This is a test file.
Here's a blank line:
And here's some unicode: \xee \u0123 \uffe3
""",
"""\
This is a test file.
Unicode characters: \xf3 \u2222 \u3333\u4444 \u5555
""",
]
def test_reader():
for string in STRINGS:
for encoding in ENCODINGS:
try:
# skip strings that can't be encoded with the current encoding
string.encode(encoding)
yield check_reader, string, encoding
except UnicodeEncodeError:
pass
# nose shows the whole string arguments in a verbose mode; this is annoying,
# so large string test is separated.
LARGE_STRING = """\
This is a larger file. It has some lines that are longer \
than 72 characters. It's got lots of repetition. Here's \
some unicode chars: \xee \u0123 \uffe3 \ueeee \u2345
How fun! Let's repeat it twenty times.
"""*10
def test_reader_on_large_string():
for encoding in ENCODINGS:
try:
# skip strings that can't be encoded with the current encoding
LARGE_STRING.encode(encoding)
def _check(encoding, n=1000):
check_reader(LARGE_STRING, encoding, n)
yield _check, encoding
except UnicodeEncodeError:
pass
def teardown_module(module=None):
import gc
gc.collect()
|
DarthMaulware/EquationGroupLeaks | refs/heads/master | Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/status/cmd/devicequery/type_Params.py | 1 | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: type_Params.py
from types import *
import array
PARAMS_DEVICE_TYPE_USER_SPECIFIC = 0
PARAMS_DEVICE_TYPE_U1394 = 1
PARAMS_DEVICE_TYPE_ADAPTER = 2
PARAMS_DEVICE_TYPE_ALL = 255
PARAMS_DEVICE_TYPE_APM_SUPPORT = 3
PARAMS_DEVICE_TYPE_BATTERY = 4
PARAMS_DEVICE_TYPE_CDROM = 5
PARAMS_DEVICE_TYPE_COMPUTER = 6
PARAMS_DEVICE_TYPE_DECODER = 7
PARAMS_DEVICE_TYPE_DISK_DRIVE = 8
PARAMS_DEVICE_TYPE_DISPLAY = 9
PARAMS_DEVICE_TYPE_FDC = 10
PARAMS_DEVICE_TYPE_FLOPPY = 11
PARAMS_DEVICE_TYPE_GPS = 12
PARAMS_DEVICE_TYPE_HDC = 13
PARAMS_DEVICE_TYPE_HID_CLASS = 14
PARAMS_DEVICE_TYPE_IMAGE = 15
PARAMS_DEVICE_TYPE_INFRARED = 16
PARAMS_DEVICE_TYPE_KEYBOARD = 17
PARAMS_DEVICE_TYPE_LEGACY_DRIVER = 18
PARAMS_DEVICE_TYPE_MEDIA = 19
PARAMS_DEVICE_TYPE_MEDIUM_CHANGER = 20
PARAMS_DEVICE_TYPE_MODEM = 21
PARAMS_DEVICE_TYPE_MONITOR = 22
PARAMS_DEVICE_TYPE_MOUSE = 23
PARAMS_DEVICE_TYPE_MTD = 24
PARAMS_DEVICE_TYPE_MULTIFUNCTION = 25
PARAMS_DEVICE_TYPE_MULTIPORT_SERIAL = 26
PARAMS_DEVICE_TYPE_NET = 27
PARAMS_DEVICE_TYPE_NET_CLIENT = 28
PARAMS_DEVICE_TYPE_NET_SERVICE = 29
PARAMS_DEVICE_TYPE_NET_TRANS = 30
PARAMS_DEVICE_TYPE_NO_DRIVER = 31
PARAMS_DEVICE_TYPE_PARALLEL = 32
PARAMS_DEVICE_TYPE_PCMCIA = 33
PARAMS_DEVICE_TYPE_PORTS = 34
PARAMS_DEVICE_TYPE_PRINTER = 35
PARAMS_DEVICE_TYPE_PRINTER_UPGRADE = 36
PARAMS_DEVICE_TYPE_SCSI_ADAPTER = 37
PARAMS_DEVICE_TYPE_SMART_CARD_READER = 38
PARAMS_DEVICE_TYPE_SOUND = 39
PARAMS_DEVICE_TYPE_STILL_IMAGE = 40
PARAMS_DEVICE_TYPE_SYSTEM = 41
PARAMS_DEVICE_TYPE_TAPE_DRIVE = 42
PARAMS_DEVICE_TYPE_UNKNOWN = 43
PARAMS_DEVICE_TYPE_USB = 44
PARAMS_DEVICE_TYPE_VOLUME = 45
PARAMS_DEVICE_TYPE_U1394DEBUG = 46
PARAMS_DEVICE_TYPE_U61883 = 47
PARAMS_DEVICE_TYPE_AVC = 48
PARAMS_DEVICE_TYPE_BIOMETRIC = 49
PARAMS_DEVICE_TYPE_BLUETOOTH = 50
PARAMS_DEVICE_TYPE_DOT4 = 51
PARAMS_DEVICE_TYPE_DOT4PRINT = 52
PARAMS_DEVICE_TYPE_ENUM1394 = 53
PARAMS_DEVICE_TYPE_INFINIBAND = 54
PARAMS_DEVICE_TYPE_PNPPRINTERS = 55
PARAMS_DEVICE_TYPE_PROCESSOR = 56
PARAMS_DEVICE_TYPE_SBP2 = 57
PARAMS_DEVICE_TYPE_SECURITYACCELERATOR = 58
PARAMS_DEVICE_TYPE_VOLUMESNAPSHOT = 59
PARAMS_DEVICE_TYPE_WCEUSBS = 60
PARAMS_GUID_LEN = 16
class Params:
def __init__(self):
self.__dict__['choice'] = PARAMS_DEVICE_TYPE_USER_SPECIFIC
self.__dict__['guid'] = array.array('B')
i = 0
while i < PARAMS_GUID_LEN:
self.__dict__['guid'].append(0)
i = i + 1
def __getattr__(self, name):
if name == 'choice':
return self.__dict__['choice']
if name == 'guid':
return self.__dict__['guid']
raise AttributeError("Attribute '%s' not found" % name)
def __setattr__(self, name, value):
if name == 'choice':
self.__dict__['choice'] = value
elif name == 'guid':
self.__dict__['guid'] = value
else:
raise AttributeError("Attribute '%s' not found" % name)
def Marshal(self, mmsg):
from mcl.object.Message import MarshalMessage
submsg = MarshalMessage()
submsg.AddU32(MSG_KEY_PARAMS_CHOICE, self.__dict__['choice'])
submsg.AddData(MSG_KEY_PARAMS_GUID, self.__dict__['guid'])
mmsg.AddMessage(MSG_KEY_PARAMS, submsg)
def Demarshal(self, dmsg, instance=-1):
import mcl.object.Message
msgData = dmsg.FindData(MSG_KEY_PARAMS, mcl.object.Message.MSG_TYPE_MSG, instance)
submsg = mcl.object.Message.DemarshalMessage(msgData)
self.__dict__['choice'] = submsg.FindU32(MSG_KEY_PARAMS_CHOICE)
try:
self.__dict__['guid'] = submsg.FindData(MSG_KEY_PARAMS_GUID)
except:
pass |
cs591B1-Project/Social-Media-Impact-on-Stock-Market-and-Price | refs/heads/master | data/24 toyota/parseJSON.py | 26 |
def getSocialData(post):
# Get Thread Object
threadObject = post["thread"]
domain_rank = threadObject["domain_rank"] #domain_rank
#print 'domain_rank:' + str(domain_rank)
socialObject = threadObject["social"] #social data object
facebookData = socialObject["facebook"] #facebook data
#print 'facebook data:' + str(facebookData["likes"]) + ', ' + str(facebookData["comments"]) + ', ' + str(facebookData["shares"])
fb_likes = facebookData["likes"]
fb_comments = facebookData["comments"]
fb_shares = facebookData["shares"]
gplusData = socialObject["gplus"] #gplus data
#print 'gplus data:' + str(gplusData["shares"])
g_shares = gplusData["shares"]
pinterestData = socialObject["pinterest"] #pinterest data
#print 'pinterest data:' + str(pinterestData["shares"])
pin_shares = pinterestData["shares"]
linkedinData = socialObject["linkedin"] #linkedin data
#print 'linked data:' + str(linkedinData["shares"])
linkedin_shares = linkedinData["shares"]
stumbleduponData= socialObject["stumbledupon"]
#print 'lstumbleduponData:' + str(stumbleduponData["shares"])
su_shares = stumbleduponData["shares"]
vkData = socialObject["vk"]
#print 'vkData:' + str(vkData["shares"])
vk_shares = vkData["shares"]
social_impact = (fb_likes + fb_comments + fb_shares + g_shares + pin_shares + linkedin_shares + su_shares + vk_shares)
#print str(social_impact)
return social_impact |
sdgdsffdsfff/jumpserver | refs/heads/master | apps/assets/api/admin_user.py | 1 | # ~*~ coding: utf-8 ~*~
# Copyright (C) 2014-2018 Beijing DuiZhan Technology Co.,Ltd. All Rights Reserved.
#
# Licensed under the GNU General Public License v2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.gnu.org/licenses/gpl-2.0.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import transaction
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from orgs.mixins.api import OrgBulkModelViewSet
from orgs.mixins import generics
from common.utils import get_logger
from ..hands import IsOrgAdmin
from ..models import AdminUser, Asset
from .. import serializers
from ..tasks import test_admin_user_connectivity_manual
logger = get_logger(__file__)
__all__ = [
'AdminUserViewSet', 'ReplaceNodesAdminUserApi',
'AdminUserTestConnectiveApi', 'AdminUserAuthApi',
'AdminUserAssetsListView',
]
class AdminUserViewSet(OrgBulkModelViewSet):
"""
Admin user api set, for add,delete,update,list,retrieve resource
"""
model = AdminUser
filter_fields = ("name", "username")
search_fields = filter_fields
serializer_class = serializers.AdminUserSerializer
permission_classes = (IsOrgAdmin,)
class AdminUserAuthApi(generics.UpdateAPIView):
model = AdminUser
serializer_class = serializers.AdminUserAuthSerializer
permission_classes = (IsOrgAdmin,)
class ReplaceNodesAdminUserApi(generics.UpdateAPIView):
model = AdminUser
serializer_class = serializers.ReplaceNodeAdminUserSerializer
permission_classes = (IsOrgAdmin,)
def update(self, request, *args, **kwargs):
admin_user = self.get_object()
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
nodes = serializer.validated_data['nodes']
assets = []
for node in nodes:
assets.extend([asset.id for asset in node.get_all_assets()])
with transaction.atomic():
Asset.objects.filter(id__in=assets).update(admin_user=admin_user)
return Response({"msg": "ok"})
else:
return Response({'error': serializer.errors}, status=400)
class AdminUserTestConnectiveApi(generics.RetrieveAPIView):
"""
Test asset admin user assets_connectivity
"""
model = AdminUser
permission_classes = (IsOrgAdmin,)
serializer_class = serializers.TaskIDSerializer
def retrieve(self, request, *args, **kwargs):
admin_user = self.get_object()
task = test_admin_user_connectivity_manual.delay(admin_user)
return Response({"task": task.id})
class AdminUserAssetsListView(generics.ListAPIView):
permission_classes = (IsOrgAdmin,)
serializer_class = serializers.AssetSimpleSerializer
filter_fields = ("hostname", "ip")
http_method_names = ['get']
search_fields = filter_fields
def get_object(self):
pk = self.kwargs.get('pk')
return get_object_or_404(AdminUser, pk=pk)
def get_queryset(self):
admin_user = self.get_object()
return admin_user.get_related_assets()
|
apple/llvm-project | refs/heads/llvm.org/main | lldb/test/API/tools/lldb-server/TestGdbRemoteAttach.py | 5 | import gdbremote_testcase
import lldbgdbserverutils
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteAttach(gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
def test_attach_with_vAttach(self):
self.build()
self.set_inferior_startup_attach_manually()
# Start the inferior, start the debug monitor, nothing is attached yet.
procs = self.prep_debug_monitor_and_inferior(
inferior_args=["sleep:60"])
self.assertIsNotNone(procs)
# Make sure the target process has been launched.
inferior = procs.get("inferior")
self.assertIsNotNone(inferior)
self.assertTrue(inferior.pid > 0)
self.assertTrue(
lldbgdbserverutils.process_is_running(
inferior.pid, True))
# Add attach packets.
self.test_sequence.add_log_lines([
# Do the attach.
"read packet: $vAttach;{:x}#00".format(inferior.pid),
# Expect a stop notification from the attach.
{"direction": "send",
"regex": r"^\$T([0-9a-fA-F]{2})[^#]*#[0-9a-fA-F]{2}$",
"capture": {1: "stop_signal_hex"}},
], True)
self.add_process_info_collection_packets()
# Run the stream
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Gather process info response
process_info = self.parse_process_info_response(context)
self.assertIsNotNone(process_info)
# Ensure the process id matches what we expected.
pid_text = process_info.get('pid', None)
self.assertIsNotNone(pid_text)
reported_pid = int(pid_text, base=16)
self.assertEqual(reported_pid, inferior.pid)
|
yaroslavprogrammer/django | refs/heads/master | django/core/urlresolvers.py | 30 | """
This module converts requested URLs to callback view functions.
RegexURLResolver is the main class here. Its resolve() method takes a URL (as
a string) and returns a tuple in this format:
(view_function, function_args, function_kwargs)
"""
from __future__ import unicode_literals
import re
from threading import local
from django.http import Http404
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_str, force_text, iri_to_uri
from django.utils.functional import memoize, lazy
from django.utils.http import urlquote
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
from django.utils.regex_helper import normalize
from django.utils import six
from django.utils.translation import get_language
_resolver_cache = {} # Maps URLconf modules to RegexURLResolver instances.
_ns_resolver_cache = {} # Maps namespaces to RegexURLResolver instances.
_callable_cache = {} # Maps view and url pattern names to their view functions.
# SCRIPT_NAME prefixes for each thread are stored here. If there's no entry for
# the current thread (which is the only one we ever access), it is assumed to
# be empty.
_prefixes = local()
# Overridden URLconfs for each thread are stored here.
_urlconfs = local()
class ResolverMatch(object):
def __init__(self, func, args, kwargs, url_name=None, app_name=None, namespaces=None):
self.func = func
self.args = args
self.kwargs = kwargs
self.app_name = app_name
if namespaces:
self.namespaces = [x for x in namespaces if x]
else:
self.namespaces = []
if not url_name:
if not hasattr(func, '__name__'):
# An instance of a callable class
url_name = '.'.join([func.__class__.__module__, func.__class__.__name__])
else:
# A function
url_name = '.'.join([func.__module__, func.__name__])
self.url_name = url_name
@property
def namespace(self):
return ':'.join(self.namespaces)
@property
def view_name(self):
return ':'.join([ x for x in [ self.namespace, self.url_name ] if x ])
def __getitem__(self, index):
return (self.func, self.args, self.kwargs)[index]
def __repr__(self):
return "ResolverMatch(func=%s, args=%s, kwargs=%s, url_name='%s', app_name='%s', namespace='%s')" % (
self.func, self.args, self.kwargs, self.url_name, self.app_name, self.namespace)
class Resolver404(Http404):
pass
class NoReverseMatch(Exception):
pass
def get_callable(lookup_view, can_fail=False):
"""
Convert a string version of a function name to the callable object.
If the lookup_view is not an import path, it is assumed to be a URL pattern
label and the original string is returned.
If can_fail is True, lookup_view might be a URL pattern label, so errors
during the import fail and the string is returned.
"""
if not callable(lookup_view):
mod_name, func_name = get_mod_func(lookup_view)
if func_name == '':
return lookup_view
try:
mod = import_module(mod_name)
except ImportError:
parentmod, submod = get_mod_func(mod_name)
if (not can_fail and submod != '' and
not module_has_submodule(import_module(parentmod), submod)):
raise ViewDoesNotExist(
"Could not import %s. Parent module %s does not exist." %
(lookup_view, mod_name))
if not can_fail:
raise
else:
try:
lookup_view = getattr(mod, func_name)
if not callable(lookup_view):
raise ViewDoesNotExist(
"Could not import %s.%s. View is not callable." %
(mod_name, func_name))
except AttributeError:
if not can_fail:
raise ViewDoesNotExist(
"Could not import %s. View does not exist in module %s." %
(lookup_view, mod_name))
return lookup_view
get_callable = memoize(get_callable, _callable_cache, 1)
def get_resolver(urlconf):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
get_resolver = memoize(get_resolver, _resolver_cache, 1)
def get_ns_resolver(ns_pattern, resolver):
# Build a namespaced resolver for the given parent urlconf pattern.
# This makes it possible to have captured parameters in the parent
# urlconf pattern.
ns_resolver = RegexURLResolver(ns_pattern,
resolver.url_patterns)
return RegexURLResolver(r'^/', [ns_resolver])
get_ns_resolver = memoize(get_ns_resolver, _ns_resolver_cache, 2)
def get_mod_func(callback):
# Converts 'django.views.news.stories.story_detail' to
# ['django.views.news.stories', 'story_detail']
try:
dot = callback.rindex('.')
except ValueError:
return callback, ''
return callback[:dot], callback[dot+1:]
class LocaleRegexProvider(object):
"""
A mixin to provide a default regex property which can vary by active
language.
"""
def __init__(self, regex):
# regex is either a string representing a regular expression, or a
# translatable string (using ugettext_lazy) representing a regular
# expression.
self._regex = regex
self._regex_dict = {}
@property
def regex(self):
"""
Returns a compiled regular expression, depending upon the activated
language-code.
"""
language_code = get_language()
if language_code not in self._regex_dict:
if isinstance(self._regex, six.string_types):
regex = self._regex
else:
regex = force_text(self._regex)
try:
compiled_regex = re.compile(regex, re.UNICODE)
except re.error as e:
raise ImproperlyConfigured(
'"%s" is not a valid regular expression: %s' %
(regex, six.text_type(e)))
self._regex_dict[language_code] = compiled_regex
return self._regex_dict[language_code]
class RegexURLPattern(LocaleRegexProvider):
def __init__(self, regex, callback, default_args=None, name=None):
LocaleRegexProvider.__init__(self, regex)
# callback is either a string like 'foo.views.news.stories.story_detail'
# which represents the path to a module and a view function name, or a
# callable object (view).
if callable(callback):
self._callback = callback
else:
self._callback = None
self._callback_str = callback
self.default_args = default_args or {}
self.name = name
def __repr__(self):
return force_str('<%s %s %s>' % (self.__class__.__name__, self.name, self.regex.pattern))
def add_prefix(self, prefix):
"""
Adds the prefix string to a string-based callback.
"""
if not prefix or not hasattr(self, '_callback_str'):
return
self._callback_str = prefix + '.' + self._callback_str
def resolve(self, path):
match = self.regex.search(path)
if match:
# If there are any named groups, use those as kwargs, ignoring
# non-named groups. Otherwise, pass all non-named arguments as
# positional arguments.
kwargs = match.groupdict()
if kwargs:
args = ()
else:
args = match.groups()
# In both cases, pass any extra_kwargs as **kwargs.
kwargs.update(self.default_args)
return ResolverMatch(self.callback, args, kwargs, self.name)
@property
def callback(self):
if self._callback is not None:
return self._callback
self._callback = get_callable(self._callback_str)
return self._callback
class RegexURLResolver(LocaleRegexProvider):
def __init__(self, regex, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
LocaleRegexProvider.__init__(self, regex)
# urlconf_name is a string representing the module containing URLconfs.
self.urlconf_name = urlconf_name
if not isinstance(urlconf_name, six.string_types):
self._urlconf_module = self.urlconf_name
self.callback = None
self.default_kwargs = default_kwargs or {}
self.namespace = namespace
self.app_name = app_name
self._reverse_dict = {}
self._namespace_dict = {}
self._app_dict = {}
def __repr__(self):
if isinstance(self.urlconf_name, list) and len(self.urlconf_name):
# Don't bother to output the whole list, it can be huge
urlconf_repr = '<%s list>' % self.urlconf_name[0].__class__.__name__
else:
urlconf_repr = repr(self.urlconf_name)
return str('<%s %s (%s:%s) %s>') % (
self.__class__.__name__, urlconf_repr, self.app_name,
self.namespace, self.regex.pattern)
def _populate(self):
lookups = MultiValueDict()
namespaces = {}
apps = {}
language_code = get_language()
for pattern in reversed(self.url_patterns):
p_pattern = pattern.regex.pattern
if p_pattern.startswith('^'):
p_pattern = p_pattern[1:]
if isinstance(pattern, RegexURLResolver):
if pattern.namespace:
namespaces[pattern.namespace] = (p_pattern, pattern)
if pattern.app_name:
apps.setdefault(pattern.app_name, []).append(pattern.namespace)
else:
parent = normalize(pattern.regex.pattern)
for name in pattern.reverse_dict:
for matches, pat, defaults in pattern.reverse_dict.getlist(name):
new_matches = []
for piece, p_args in parent:
new_matches.extend([(piece + suffix, p_args + args) for (suffix, args) in matches])
lookups.appendlist(name, (new_matches, p_pattern + pat, dict(defaults, **pattern.default_kwargs)))
for namespace, (prefix, sub_pattern) in pattern.namespace_dict.items():
namespaces[namespace] = (p_pattern + prefix, sub_pattern)
for app_name, namespace_list in pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
else:
bits = normalize(p_pattern)
lookups.appendlist(pattern.callback, (bits, p_pattern, pattern.default_args))
if pattern.name is not None:
lookups.appendlist(pattern.name, (bits, p_pattern, pattern.default_args))
self._reverse_dict[language_code] = lookups
self._namespace_dict[language_code] = namespaces
self._app_dict[language_code] = apps
@property
def reverse_dict(self):
language_code = get_language()
if language_code not in self._reverse_dict:
self._populate()
return self._reverse_dict[language_code]
@property
def namespace_dict(self):
language_code = get_language()
if language_code not in self._namespace_dict:
self._populate()
return self._namespace_dict[language_code]
@property
def app_dict(self):
language_code = get_language()
if language_code not in self._app_dict:
self._populate()
return self._app_dict[language_code]
def resolve(self, path):
tried = []
match = self.regex.search(path)
if match:
new_path = path[match.end():]
for pattern in self.url_patterns:
try:
sub_match = pattern.resolve(new_path)
except Resolver404 as e:
sub_tried = e.args[0].get('tried')
if sub_tried is not None:
tried.extend([[pattern] + t for t in sub_tried])
else:
tried.append([pattern])
else:
if sub_match:
sub_match_dict = dict(match.groupdict(), **self.default_kwargs)
sub_match_dict.update(sub_match.kwargs)
return ResolverMatch(sub_match.func, sub_match.args, sub_match_dict, sub_match.url_name, self.app_name or sub_match.app_name, [self.namespace] + sub_match.namespaces)
tried.append([pattern])
raise Resolver404({'tried': tried, 'path': new_path})
raise Resolver404({'path' : path})
@property
def urlconf_module(self):
try:
return self._urlconf_module
except AttributeError:
self._urlconf_module = import_module(self.urlconf_name)
return self._urlconf_module
@property
def url_patterns(self):
patterns = getattr(self.urlconf_module, "urlpatterns", self.urlconf_module)
try:
iter(patterns)
except TypeError:
raise ImproperlyConfigured("The included urlconf %s doesn't have any patterns in it" % self.urlconf_name)
return patterns
def _resolve_special(self, view_type):
callback = getattr(self.urlconf_module, 'handler%s' % view_type, None)
if not callback:
# No handler specified in file; use default
# Lazy import, since django.urls imports this file
from django.conf import urls
callback = getattr(urls, 'handler%s' % view_type)
return get_callable(callback), {}
def resolve400(self):
return self._resolve_special('400')
def resolve403(self):
return self._resolve_special('403')
def resolve404(self):
return self._resolve_special('404')
def resolve500(self):
return self._resolve_special('500')
def reverse(self, lookup_view, *args, **kwargs):
return self._reverse_with_prefix(lookup_view, '', *args, **kwargs)
def _reverse_with_prefix(self, lookup_view, _prefix, *args, **kwargs):
if args and kwargs:
raise ValueError("Don't mix *args and **kwargs in call to reverse()!")
text_args = [force_text(v) for v in args]
text_kwargs = dict((k, force_text(v)) for (k, v) in kwargs.items())
try:
lookup_view = get_callable(lookup_view, True)
except (ImportError, AttributeError) as e:
raise NoReverseMatch("Error importing '%s': %s." % (lookup_view, e))
possibilities = self.reverse_dict.getlist(lookup_view)
prefix_norm, prefix_args = normalize(urlquote(_prefix))[0]
for possibility, pattern, defaults in possibilities:
for result, params in possibility:
if args:
if len(args) != len(params) + len(prefix_args):
continue
candidate_subs = dict(zip(prefix_args + params, text_args))
else:
if set(kwargs.keys()) | set(defaults.keys()) != set(params) | set(defaults.keys()) | set(prefix_args):
continue
matches = True
for k, v in defaults.items():
if kwargs.get(k, v) != v:
matches = False
break
if not matches:
continue
candidate_subs = text_kwargs
# WSGI provides decoded URLs, without %xx escapes, and the URL
# resolver operates on such URLs. First substitute arguments
# without quoting to build a decoded URL and look for a match.
# Then, if we have a match, redo the substitution with quoted
# arguments in order to return a properly encoded URL.
candidate_pat = prefix_norm.replace('%', '%%') + result
if re.search('^%s%s' % (prefix_norm, pattern), candidate_pat % candidate_subs, re.UNICODE):
candidate_subs = dict((k, urlquote(v)) for (k, v) in candidate_subs.items())
return candidate_pat % candidate_subs
# lookup_view can be URL label, or dotted path, or callable, Any of
# these can be passed in at the top, but callables are not friendly in
# error messages.
m = getattr(lookup_view, '__module__', None)
n = getattr(lookup_view, '__name__', None)
if m is not None and n is not None:
lookup_view_s = "%s.%s" % (m, n)
else:
lookup_view_s = lookup_view
patterns = [pattern for (possibility, pattern, defaults) in possibilities]
raise NoReverseMatch("Reverse for '%s' with arguments '%s' and keyword "
"arguments '%s' not found. %d pattern(s) tried: %s" %
(lookup_view_s, args, kwargs, len(patterns), patterns))
class LocaleRegexURLResolver(RegexURLResolver):
"""
A URL resolver that always matches the active language code as URL prefix.
Rather than taking a regex argument, we just override the ``regex``
function to always return the active language-code as regex.
"""
def __init__(self, urlconf_name, default_kwargs=None, app_name=None, namespace=None):
super(LocaleRegexURLResolver, self).__init__(
None, urlconf_name, default_kwargs, app_name, namespace)
@property
def regex(self):
language_code = get_language()
if language_code not in self._regex_dict:
regex_compiled = re.compile('^%s/' % language_code, re.UNICODE)
self._regex_dict[language_code] = regex_compiled
return self._regex_dict[language_code]
def resolve(path, urlconf=None):
if urlconf is None:
urlconf = get_urlconf()
return get_resolver(urlconf).resolve(path)
def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None, current_app=None):
if urlconf is None:
urlconf = get_urlconf()
resolver = get_resolver(urlconf)
args = args or []
kwargs = kwargs or {}
if prefix is None:
prefix = get_script_prefix()
if not isinstance(viewname, six.string_types):
view = viewname
else:
parts = viewname.split(':')
parts.reverse()
view = parts[0]
path = parts[1:]
resolved_path = []
ns_pattern = ''
while path:
ns = path.pop()
# Lookup the name to see if it could be an app identifier
try:
app_list = resolver.app_dict[ns]
# Yes! Path part matches an app in the current Resolver
if current_app and current_app in app_list:
# If we are reversing for a particular app,
# use that namespace
ns = current_app
elif ns not in app_list:
# The name isn't shared by one of the instances
# (i.e., the default) so just pick the first instance
# as the default.
ns = app_list[0]
except KeyError:
pass
try:
extra, resolver = resolver.namespace_dict[ns]
resolved_path.append(ns)
ns_pattern = ns_pattern + extra
except KeyError as key:
if resolved_path:
raise NoReverseMatch(
"%s is not a registered namespace inside '%s'" %
(key, ':'.join(resolved_path)))
else:
raise NoReverseMatch("%s is not a registered namespace" %
key)
if ns_pattern:
resolver = get_ns_resolver(ns_pattern, resolver)
return iri_to_uri(resolver._reverse_with_prefix(view, prefix, *args, **kwargs))
reverse_lazy = lazy(reverse, str)
def clear_url_caches():
global _resolver_cache
global _ns_resolver_cache
global _callable_cache
_resolver_cache.clear()
_ns_resolver_cache.clear()
_callable_cache.clear()
def set_script_prefix(prefix):
"""
Sets the script prefix for the current thread.
"""
if not prefix.endswith('/'):
prefix += '/'
_prefixes.value = prefix
def get_script_prefix():
"""
Returns the currently active script prefix. Useful for client code that
wishes to construct their own URLs manually (although accessing the request
instance is normally going to be a lot cleaner).
"""
return getattr(_prefixes, "value", '/')
def clear_script_prefix():
"""
Unsets the script prefix for the current thread.
"""
try:
del _prefixes.value
except AttributeError:
pass
def set_urlconf(urlconf_name):
"""
Sets the URLconf for the current thread (overriding the default one in
settings). Set to None to revert back to the default.
"""
if urlconf_name:
_urlconfs.value = urlconf_name
else:
if hasattr(_urlconfs, "value"):
del _urlconfs.value
def get_urlconf(default=None):
"""
Returns the root URLconf to use for the current thread if it has been
changed from the default one.
"""
return getattr(_urlconfs, "value", default)
def is_valid_path(path, urlconf=None):
"""
Returns True if the given path resolves against the default URL resolver,
False otherwise.
This is a convenience method to make working with "is this a match?" cases
easier, avoiding unnecessarily indented try...except blocks.
"""
try:
resolve(path, urlconf)
return True
except Resolver404:
return False
|
WholeGrainGoats/servo | refs/heads/master | tests/wpt/css-tests/tools/html5lib/html5lib/tests/test_sanitizer.py | 430 | from __future__ import absolute_import, division, unicode_literals
try:
import json
except ImportError:
import simplejson as json
from html5lib import html5parser, sanitizer, constants, treebuilders
def toxmlFactory():
tree = treebuilders.getTreeBuilder("etree")
def toxml(element):
# encode/decode roundtrip required for Python 2.6 compatibility
result_bytes = tree.implementation.tostring(element, encoding="utf-8")
return result_bytes.decode("utf-8")
return toxml
def runSanitizerTest(name, expected, input, toxml=None):
if toxml is None:
toxml = toxmlFactory()
expected = ''.join([toxml(token) for token in html5parser.HTMLParser().
parseFragment(expected)])
expected = json.loads(json.dumps(expected))
assert expected == sanitize_html(input)
def sanitize_html(stream, toxml=None):
if toxml is None:
toxml = toxmlFactory()
return ''.join([toxml(token) for token in
html5parser.HTMLParser(tokenizer=sanitizer.HTMLSanitizer).
parseFragment(stream)])
def test_should_handle_astral_plane_characters():
assert '<html:p xmlns:html="http://www.w3.org/1999/xhtml">\U0001d4b5 \U0001d538</html:p>' == sanitize_html("<p>𝒵 𝔸</p>")
def test_sanitizer():
toxml = toxmlFactory()
for tag_name in sanitizer.HTMLSanitizer.allowed_elements:
if tag_name in ['caption', 'col', 'colgroup', 'optgroup', 'option', 'table', 'tbody', 'td', 'tfoot', 'th', 'thead', 'tr']:
continue # TODO
if tag_name != tag_name.lower():
continue # TODO
if tag_name == 'image':
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<img title=\"1\"/>foo <bad>bar</bad> baz",
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
elif tag_name == 'br':
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<br title=\"1\"/>foo <bad>bar</bad> baz<br/>",
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
elif tag_name in constants.voidElements:
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<%s title=\"1\"/>foo <bad>bar</bad> baz" % tag_name,
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
else:
yield (runSanitizerTest, "test_should_allow_%s_tag" % tag_name,
"<%s title=\"1\">foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
for tag_name in sanitizer.HTMLSanitizer.allowed_elements:
tag_name = tag_name.upper()
yield (runSanitizerTest, "test_should_forbid_%s_tag" % tag_name,
"<%s title=\"1\">foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
"<%s title='1'>foo <bad>bar</bad> baz</%s>" % (tag_name, tag_name),
toxml)
for attribute_name in sanitizer.HTMLSanitizer.allowed_attributes:
if attribute_name != attribute_name.lower():
continue # TODO
if attribute_name == 'style':
continue
yield (runSanitizerTest, "test_should_allow_%s_attribute" % attribute_name,
"<p %s=\"foo\">foo <bad>bar</bad> baz</p>" % attribute_name,
"<p %s='foo'>foo <bad>bar</bad> baz</p>" % attribute_name,
toxml)
for attribute_name in sanitizer.HTMLSanitizer.allowed_attributes:
attribute_name = attribute_name.upper()
yield (runSanitizerTest, "test_should_forbid_%s_attribute" % attribute_name,
"<p>foo <bad>bar</bad> baz</p>",
"<p %s='display: none;'>foo <bad>bar</bad> baz</p>" % attribute_name,
toxml)
for protocol in sanitizer.HTMLSanitizer.allowed_protocols:
yield (runSanitizerTest, "test_should_allow_%s_uris" % protocol,
"<a href=\"%s\">foo</a>" % protocol,
"""<a href="%s">foo</a>""" % protocol,
toxml)
for protocol in sanitizer.HTMLSanitizer.allowed_protocols:
yield (runSanitizerTest, "test_should_allow_uppercase_%s_uris" % protocol,
"<a href=\"%s\">foo</a>" % protocol,
"""<a href="%s">foo</a>""" % protocol,
toxml)
|
Ziqi-Li/bknqgis | refs/heads/master | pandas/scripts/pypistats.py | 7 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Calculates the total number of downloads that a particular PyPI package has
received across all versions tracked by PyPI
"""
from datetime import datetime
import locale
import sys
import xmlrpclib
import pandas as pd
locale.setlocale(locale.LC_ALL, '')
class PyPIDownloadAggregator(object):
def __init__(self, package_name, include_hidden=True):
self.package_name = package_name
self.include_hidden = include_hidden
self.proxy = xmlrpclib.Server('http://pypi.python.org/pypi')
self._downloads = {}
@property
def releases(self):
"""Retrieves the release number for each uploaded release"""
result = self.proxy.package_releases(self.package_name,
self.include_hidden)
if len(result) == 0:
# no matching package--search for possibles, and limit to 15
# results
results = self.proxy.search({
'name': self.package_name,
'description': self.package_name
}, 'or')[:15]
# make sure we only get unique package names
matches = []
for match in results:
name = match['name']
if name not in matches:
matches.append(name)
# if only one package was found, return it
if len(matches) == 1:
self.package_name = matches[0]
return self.releases
error = """No such package found: %s
Possible matches include:
%s
""" % (self.package_name, '\n'.join('\t- %s' % n for n in matches))
sys.exit(error)
return result
def get_downloads(self):
"""Calculate the total number of downloads for the package"""
downloads = {}
for release in self.releases:
urls = self.proxy.release_urls(self.package_name, release)
urls = pd.DataFrame(urls)
urls['version'] = release
downloads[release] = urls
return pd.concat(downloads, ignore_index=True)
if __name__ == '__main__':
agg = PyPIDownloadAggregator('pandas')
data = agg.get_downloads()
to_omit = ['0.2b1', '0.2beta']
isostrings = data['upload_time'].map(lambda x: x.value)
data['upload_time'] = pd.to_datetime(isostrings)
totals = data.groupby('version').downloads.sum()
rollup = {'0.8.0rc1': '0.8.0',
'0.8.0rc2': '0.8.0',
'0.3.0.beta': '0.3.0',
'0.3.0.beta2': '0.3.0'}
downloads = totals.groupby(lambda x: rollup.get(x, x)).sum()
first_upload = data.groupby('version').upload_time.min()
result = pd.DataFrame({'downloads': totals,
'release_date': first_upload})
result = result.sort('release_date')
result = result.drop(to_omit + list(rollup.keys()))
result.index.name = 'release'
by_date = result.reset_index().set_index('release_date').downloads
dummy = pd.Series(index=pd.DatetimeIndex([datetime(2012, 12, 27)]))
by_date = by_date.append(dummy).shift(1).fillna(0)
|
aselle/tensorflow | refs/heads/master | tensorflow/contrib/distributions/python/kernel_tests/autoregressive_test.py | 21 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import autoregressive as autoregressive_lib
from tensorflow.contrib.distributions.python.ops import independent as independent_lib
from tensorflow.contrib.distributions.python.ops import test_util
from tensorflow.contrib.distributions.python.ops.bijectors.affine import Affine
from tensorflow.contrib.distributions.python.ops.bijectors.masked_autoregressive import MaskedAutoregressiveFlow
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import normal as normal_lib
from tensorflow.python.ops.distributions import transformed_distribution as transformed_distribution_lib
from tensorflow.python.ops.distributions import util as distribution_util
from tensorflow.python.platform import test
class AutogressiveTest(test_util.VectorDistributionTestHelpers, test.TestCase):
"""Tests the Autoregressive distribution."""
def setUp(self):
self._rng = np.random.RandomState(42)
def _random_scale_tril(self, event_size):
n = np.int32(event_size * (event_size + 1) // 2)
p = 2. * self._rng.random_sample(n).astype(np.float32) - 1.
return distribution_util.fill_triangular(0.25 * p)
def _normal_fn(self, affine_bijector):
def _fn(samples):
scale = math_ops.exp(affine_bijector.forward(samples))
return independent_lib.Independent(
normal_lib.Normal(loc=0., scale=scale, validate_args=True),
reinterpreted_batch_ndims=1)
return _fn
def testSampleAndLogProbConsistency(self):
batch_shape = []
event_size = 2
with self.test_session() as sess:
batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0)
sample0 = array_ops.zeros(batch_event_shape)
affine = Affine(scale_tril=self._random_scale_tril(event_size))
ar = autoregressive_lib.Autoregressive(
self._normal_fn(affine), sample0, validate_args=True)
self.run_test_sample_consistent_log_prob(
sess.run, ar, radius=1., center=0., rtol=0.01)
def testCompareToBijector(self):
"""Demonstrates equivalence between TD, Bijector approach and AR dist."""
sample_shape = np.int32([4, 5])
batch_shape = np.int32([])
event_size = np.int32(2)
with self.test_session() as sess:
batch_event_shape = np.concatenate([batch_shape, [event_size]], axis=0)
sample0 = array_ops.zeros(batch_event_shape)
affine = Affine(scale_tril=self._random_scale_tril(event_size))
ar = autoregressive_lib.Autoregressive(
self._normal_fn(affine), sample0, validate_args=True)
ar_flow = MaskedAutoregressiveFlow(
is_constant_jacobian=True,
shift_and_log_scale_fn=lambda x: [None, affine.forward(x)],
validate_args=True)
td = transformed_distribution_lib.TransformedDistribution(
distribution=normal_lib.Normal(loc=0., scale=1.),
bijector=ar_flow,
event_shape=[event_size],
batch_shape=batch_shape,
validate_args=True)
x_shape = np.concatenate(
[sample_shape, batch_shape, [event_size]], axis=0)
x = 2. * self._rng.random_sample(x_shape).astype(np.float32) - 1.
td_log_prob_, ar_log_prob_ = sess.run([td.log_prob(x), ar.log_prob(x)])
self.assertAllClose(td_log_prob_, ar_log_prob_, atol=0., rtol=1e-6)
if __name__ == "__main__":
test.main()
|
openhatch/oh-mainline | refs/heads/master | mysite/profile/migrations/0030_remove_dead_columns_from_profile_person.py | 17 | # This file is part of OpenHatch.
# Copyright (C) 2009 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from south.db import db
from django.db import models
from mysite.profile.models import *
class Migration:
def forwards(self, orm):
# Deleting field 'Person.last_touched'
db.delete_column('profile_person', 'last_touched')
# Deleting field 'Person.last_polled'
db.delete_column('profile_person', 'last_polled')
# Deleting field 'Person.time_record_was_created'
db.delete_column('profile_person', 'time_record_was_created')
# Changing field 'Link_Person_Tag.time_record_was_created'
db.alter_column('profile_link_person_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 7, 10, 15, 24, 42, 529130)))
# Changing field 'Link_ProjectExp_Tag.time_record_was_created'
db.alter_column('profile_link_projectexp_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 7, 10, 15, 24, 42, 87296)))
# Changing field 'Link_Project_Tag.time_record_was_created'
db.alter_column('profile_link_project_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 7, 10, 15, 24, 42, 324483)))
def backwards(self, orm):
# Adding field 'Person.last_touched'
db.add_column('profile_person', 'last_touched', models.DateTimeField(null=True))
# Adding field 'Person.last_polled'
db.add_column('profile_person', 'last_polled', models.DateTimeField(null=True, blank=True))
# Adding field 'Person.time_record_was_created'
db.add_column('profile_person', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 7, 10, 15, 21, 2, 936515)))
# Changing field 'Link_Person_Tag.time_record_was_created'
db.alter_column('profile_link_person_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 7, 10, 15, 21, 3, 309384)))
# Changing field 'Link_ProjectExp_Tag.time_record_was_created'
db.alter_column('profile_link_projectexp_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 7, 10, 15, 21, 0, 929969)))
# Changing field 'Link_Project_Tag.time_record_was_created'
db.alter_column('profile_link_project_tag', 'time_record_was_created', models.DateTimeField(default=datetime.datetime(2009, 7, 10, 15, 21, 1, 457391)))
models = {
'profile.person': {
'gotten_name_from_ohloh': ('models.BooleanField', [], {'default': 'False'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'interested_in_working_on': ('models.CharField', [], {'default': "''", 'max_length': '1024'}),
'ohloh_grab_completed': ('models.BooleanField', [], {'default': 'False'}),
'poll_on_next_web_view': ('models.BooleanField', [], {'default': 'True'}),
'user': ('models.ForeignKey', ["orm['auth.User']"], {'unique': 'True'})
},
'profile.link_person_tag': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'person': ('models.ForeignKey', ["orm['profile.Person']"], {}),
'source': ('models.CharField', [], {'max_length': '200'}),
'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 10, 15, 24, 44, 730680)'})
},
'profile.tag': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'tag_type': ('models.ForeignKey', ["orm['profile.TagType']"], {}),
'text': ('models.CharField', [], {'max_length': '50'})
},
'profile.link_projectexp_tag': {
'Meta': {'unique_together': "[('tag','project_exp','source'),]"},
'favorite': ('models.BooleanField', [], {'default': 'False'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'project_exp': ('models.ForeignKey', ["orm['profile.ProjectExp']"], {}),
'source': ('models.CharField', [], {'max_length': '200'}),
'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 10, 15, 24, 43, 964291)'})
},
'profile.sourceforgeperson': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'username': ('models.CharField', [], {'max_length': '200'})
},
'profile.link_project_tag': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'project': ('models.ForeignKey', ["orm['search.Project']"], {}),
'source': ('models.CharField', [], {'max_length': '200'}),
'tag': ('models.ForeignKey', ["orm['profile.Tag']"], {}),
'time_record_was_created': ('models.DateTimeField', [], {'default': 'datetime.datetime(2009, 7, 10, 15, 24, 45, 653482)'})
},
'profile.sourceforgeproject': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'unixname': ('models.CharField', [], {'max_length': '200'})
},
'search.project': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'auth.user': {
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'profile.link_sf_proj_dude_fm': {
'Meta': {'unique_together': "[('person','project'),]"},
'date_collected': ('models.DateTimeField', [], {}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'is_admin': ('models.BooleanField', [], {'default': 'False'}),
'person': ('models.ForeignKey', ["orm['profile.SourceForgePerson']"], {}),
'position': ('models.CharField', [], {'max_length': '200'}),
'project': ('models.ForeignKey', ["orm['profile.SourceForgeProject']"], {})
},
'profile.tagtype': {
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'name': ('models.CharField', [], {'max_length': '100'}),
'prefix': ('models.CharField', [], {'max_length': '20'})
},
'profile.projectexp': {
'description': ('models.TextField', [], {}),
'favorite': ('models.BooleanField', [], {'default': '0'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'last_touched': ('models.DateTimeField', [], {'null': 'True'}),
'man_months': ('models.PositiveIntegerField', [], {'null': 'True'}),
'person': ('models.ForeignKey', ["orm['profile.Person']"], {}),
'person_role': ('models.CharField', [], {'max_length': '200'}),
'primary_language': ('models.CharField', [], {'max_length': '200', 'null': 'True'}),
'project': ('models.ForeignKey', ["orm['search.Project']"], {}),
'source': ('models.CharField', [], {'max_length': '100', 'null': 'True'}),
'time_record_was_created': ('models.DateTimeField', [], {'null': 'True'}),
'url': ('models.URLField', [], {'max_length': '200', 'null': 'True'})
}
}
complete_apps = ['profile']
|
lily-seabreeze/sappho | refs/heads/master | demo/config.py | 3 | """Configuration constants for Sappho demo."""
import pkg_resources
# Constants/game config
# The path to the file that's being used to represent the player
ANIMATED_SPRITE_PATH = pkg_resources.resource_filename("test_scene", "test.gif")
# The path to the file being used as the tilesheet
TILESHEET_PATH = pkg_resources.resource_filename("test_scene", "tilesheet.png")
# The Tiled Map Editor file which the player explores
TMX_PATH = pkg_resources.resource_filename("test_scene", "test.tmx")
MAX_SPEED = 2
RESOLUTION = [700, 500]
"""tuple(int, int): This demo will be ran in a window of the
dimensions (x, y) pixels (width, height).
"""
VIEWPORT = (80, 80)
"""tuple(int, int): ..."""
WINDOW_TITLE = "Sappho Engine Test"
"""str: The title of the window running the demo.
The text which appears in the titlebar of the window.
"""
ANIMATED_SPRITE_Z_INDEX = 0
"""int: The layer the player's sprite will be rendered on.
0 is farthest back, and higher numbers increase toward the
foreground. The number of layers will correspond with the
map that's being loaded.
"""
START_POSITION = (10, 10)
"""tuple(int, int): The absolute pixel coordinate
of the player's starting position on the map.
"""
|
renzoolivares/android_kernel_htc_monarudo | refs/heads/master | scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
Ballz0fSteel/Umeko | refs/heads/master | lib/youtube_dl/extractor/aenetworks.py | 23 | from __future__ import unicode_literals
import re
from .theplatform import ThePlatformIE
from ..utils import (
smuggle_url,
update_url_query,
unescapeHTML,
extract_attributes,
get_element_by_attribute,
)
from ..compat import (
compat_urlparse,
)
class AENetworksBaseIE(ThePlatformIE):
_THEPLATFORM_KEY = 'crazyjava'
_THEPLATFORM_SECRET = 's3cr3t'
class AENetworksIE(AENetworksBaseIE):
IE_NAME = 'aenetworks'
IE_DESC = 'A+E Networks: A&E, Lifetime, History.com, FYI Network'
_VALID_URL = r'''(?x)
https?://
(?:www\.)?
(?P<domain>
(?:history|aetv|mylifetime|lifetimemovieclub)\.com|
fyi\.tv
)/
(?:
shows/(?P<show_path>[^/]+(?:/[^/]+){0,2})|
movies/(?P<movie_display_id>[^/]+)(?:/full-movie)?|
specials/(?P<special_display_id>[^/]+)/full-special
)
'''
_TESTS = [{
'url': 'http://www.history.com/shows/mountain-men/season-1/episode-1',
'md5': 'a97a65f7e823ae10e9244bc5433d5fe6',
'info_dict': {
'id': '22253814',
'ext': 'mp4',
'title': 'Winter Is Coming',
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
'timestamp': 1338306241,
'upload_date': '20120529',
'uploader': 'AENE-NEW',
},
'add_ie': ['ThePlatform'],
}, {
'url': 'http://www.history.com/shows/ancient-aliens/season-1',
'info_dict': {
'id': '71889446852',
},
'playlist_mincount': 5,
}, {
'url': 'http://www.mylifetime.com/shows/atlanta-plastic',
'info_dict': {
'id': 'SERIES4317',
'title': 'Atlanta Plastic',
},
'playlist_mincount': 2,
}, {
'url': 'http://www.aetv.com/shows/duck-dynasty/season-9/episode-1',
'only_matching': True
}, {
'url': 'http://www.fyi.tv/shows/tiny-house-nation/season-1/episode-8',
'only_matching': True
}, {
'url': 'http://www.mylifetime.com/shows/project-runway-junior/season-1/episode-6',
'only_matching': True
}, {
'url': 'http://www.mylifetime.com/movies/center-stage-on-pointe/full-movie',
'only_matching': True
}, {
'url': 'https://www.lifetimemovieclub.com/movies/a-killer-among-us',
'only_matching': True
}, {
'url': 'http://www.history.com/specials/sniper-into-the-kill-zone/full-special',
'only_matching': True
}]
_DOMAIN_TO_REQUESTOR_ID = {
'history.com': 'HISTORY',
'aetv.com': 'AETV',
'mylifetime.com': 'LIFETIME',
'lifetimemovieclub.com': 'LIFETIMEMOVIECLUB',
'fyi.tv': 'FYI',
}
def _real_extract(self, url):
domain, show_path, movie_display_id, special_display_id = re.match(self._VALID_URL, url).groups()
display_id = show_path or movie_display_id or special_display_id
webpage = self._download_webpage(url, display_id)
if show_path:
url_parts = show_path.split('/')
url_parts_len = len(url_parts)
if url_parts_len == 1:
entries = []
for season_url_path in re.findall(r'(?s)<li[^>]+data-href="(/shows/%s/season-\d+)"' % url_parts[0], webpage):
entries.append(self.url_result(
compat_urlparse.urljoin(url, season_url_path), 'AENetworks'))
if entries:
return self.playlist_result(
entries, self._html_search_meta('aetn:SeriesId', webpage),
self._html_search_meta('aetn:SeriesTitle', webpage))
else:
# single season
url_parts_len = 2
if url_parts_len == 2:
entries = []
for episode_item in re.findall(r'(?s)<[^>]+class="[^"]*(?:episode|program)-item[^"]*"[^>]*>', webpage):
episode_attributes = extract_attributes(episode_item)
episode_url = compat_urlparse.urljoin(
url, episode_attributes['data-canonical'])
entries.append(self.url_result(
episode_url, 'AENetworks',
episode_attributes.get('data-videoid') or episode_attributes.get('data-video-id')))
return self.playlist_result(
entries, self._html_search_meta('aetn:SeasonId', webpage))
query = {
'mbr': 'true',
'assetTypes': 'high_video_s3'
}
video_id = self._html_search_meta('aetn:VideoID', webpage)
media_url = self._search_regex(
[r"media_url\s*=\s*'(?P<url>[^']+)'",
r'data-media-url=(?P<url>(?:https?:)?//[^\s>]+)',
r'data-media-url=(["\'])(?P<url>(?:(?!\1).)+?)\1'],
webpage, 'video url', group='url')
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
r'https?://link.theplatform.com/s/([^?]+)', media_url, 'theplatform_path'), video_id)
info = self._parse_theplatform_metadata(theplatform_metadata)
if theplatform_metadata.get('AETN$isBehindWall'):
requestor_id = self._DOMAIN_TO_REQUESTOR_ID[domain]
resource = self._get_mvpd_resource(
requestor_id, theplatform_metadata['title'],
theplatform_metadata.get('AETN$PPL_pplProgramId') or theplatform_metadata.get('AETN$PPL_pplProgramId_OLD'),
theplatform_metadata['ratings'][0]['rating'])
query['auth'] = self._extract_mvpd_auth(
url, video_id, requestor_id, resource)
info.update(self._search_json_ld(webpage, video_id, fatal=False))
media_url = update_url_query(media_url, query)
media_url = self._sign_url(media_url, self._THEPLATFORM_KEY, self._THEPLATFORM_SECRET)
formats, subtitles = self._extract_theplatform_smil(media_url, video_id)
self._sort_formats(formats)
info.update({
'id': video_id,
'formats': formats,
'subtitles': subtitles,
})
return info
class HistoryTopicIE(AENetworksBaseIE):
IE_NAME = 'history:topic'
IE_DESC = 'History.com Topic'
_VALID_URL = r'https?://(?:www\.)?history\.com/topics/(?:[^/]+/)?(?P<topic_id>[^/]+)(?:/[^/]+(?:/(?P<video_display_id>[^/?#]+))?)?'
_TESTS = [{
'url': 'http://www.history.com/topics/valentines-day/history-of-valentines-day/videos/bet-you-didnt-know-valentines-day?m=528e394da93ae&s=undefined&f=1&free=false',
'info_dict': {
'id': '40700995724',
'ext': 'mp4',
'title': "Bet You Didn't Know: Valentine's Day",
'description': 'md5:7b57ea4829b391995b405fa60bd7b5f7',
'timestamp': 1375819729,
'upload_date': '20130806',
'uploader': 'AENE-NEW',
},
'params': {
# m3u8 download
'skip_download': True,
},
'add_ie': ['ThePlatform'],
}, {
'url': 'http://www.history.com/topics/world-war-i/world-war-i-history/videos',
'info_dict':
{
'id': 'world-war-i-history',
'title': 'World War I History',
},
'playlist_mincount': 23,
}, {
'url': 'http://www.history.com/topics/world-war-i-history/videos',
'only_matching': True,
}, {
'url': 'http://www.history.com/topics/world-war-i/world-war-i-history',
'only_matching': True,
}, {
'url': 'http://www.history.com/topics/world-war-i/world-war-i-history/speeches',
'only_matching': True,
}]
def theplatform_url_result(self, theplatform_url, video_id, query):
return {
'_type': 'url_transparent',
'id': video_id,
'url': smuggle_url(
update_url_query(theplatform_url, query),
{
'sig': {
'key': self._THEPLATFORM_KEY,
'secret': self._THEPLATFORM_SECRET,
},
'force_smil_url': True
}),
'ie_key': 'ThePlatform',
}
def _real_extract(self, url):
topic_id, video_display_id = re.match(self._VALID_URL, url).groups()
if video_display_id:
webpage = self._download_webpage(url, video_display_id)
release_url, video_id = re.search(r"_videoPlayer.play\('([^']+)'\s*,\s*'[^']+'\s*,\s*'(\d+)'\)", webpage).groups()
release_url = unescapeHTML(release_url)
return self.theplatform_url_result(
release_url, video_id, {
'mbr': 'true',
'switch': 'hls',
'assetTypes': 'high_video_ak',
})
else:
webpage = self._download_webpage(url, topic_id)
entries = []
for episode_item in re.findall(r'<a.+?data-release-url="[^"]+"[^>]*>', webpage):
video_attributes = extract_attributes(episode_item)
entries.append(self.theplatform_url_result(
video_attributes['data-release-url'], video_attributes['data-id'], {
'mbr': 'true',
'switch': 'hls',
'assetTypes': 'high_video_ak',
}))
return self.playlist_result(entries, topic_id, get_element_by_attribute('class', 'show-title', webpage))
|
kiran/bart-sign | refs/heads/master | venv/lib/python2.7/site-packages/numpy/lib/type_check.py | 72 | """Automatically adapted for numpy Sep 19, 2005 by convertcode.py
"""
from __future__ import division, absolute_import, print_function
__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
'isreal', 'nan_to_num', 'real', 'real_if_close',
'typename', 'asfarray', 'mintypecode', 'asscalar',
'common_type']
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, asanyarray, array, isnan, \
obj2sctype, zeros
from .ufunclike import isneginf, isposinf
_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
def mintypecode(typechars,typeset='GDFgdf',default='d'):
"""
Return the character for the minimum-size type to which given types can
be safely cast.
The returned type character must represent the smallest size dtype such
that an array of the returned type can handle the data from an array of
all types in `typechars` (or if `typechars` is an array, then its
dtype.char).
Parameters
----------
typechars : list of str or array_like
If a list of strings, each string should represent a dtype.
If array_like, the character representation of the array dtype is used.
typeset : str or list of str, optional
The set of characters that the returned character is chosen from.
The default set is 'GDFgdf'.
default : str, optional
The default character, this is returned if none of the characters in
`typechars` matches a character in `typeset`.
Returns
-------
typechar : str
The character representing the minimum-size type that was found.
See Also
--------
dtype, sctype2char, maximum_sctype
Examples
--------
>>> np.mintypecode(['d', 'f', 'S'])
'd'
>>> x = np.array([1.1, 2-3.j])
>>> np.mintypecode(x)
'D'
>>> np.mintypecode('abceh', default='G')
'G'
"""
typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char
for t in typechars]
intersection = [t for t in typecodes if t in typeset]
if not intersection:
return default
if 'F' in intersection and 'd' in intersection:
return 'D'
l = []
for t in intersection:
i = _typecodes_by_elsize.index(t)
l.append((i, t))
l.sort()
return l[0][1]
def asfarray(a, dtype=_nx.float_):
"""
Return an array converted to a float type.
Parameters
----------
a : array_like
The input array.
dtype : str or dtype object, optional
Float type code to coerce input array `a`. If `dtype` is one of the
'int' dtypes, it is replaced with float64.
Returns
-------
out : ndarray
The input `a` as a float ndarray.
Examples
--------
>>> np.asfarray([2, 3])
array([ 2., 3.])
>>> np.asfarray([2, 3], dtype='float')
array([ 2., 3.])
>>> np.asfarray([2, 3], dtype='int8')
array([ 2., 3.])
"""
dtype = _nx.obj2sctype(dtype)
if not issubclass(dtype, _nx.inexact):
dtype = _nx.float_
return asarray(a, dtype=dtype)
def real(val):
"""
Return the real part of the elements of the array.
Parameters
----------
val : array_like
Input array.
Returns
-------
out : ndarray
Output array. If `val` is real, the type of `val` is used for the
output. If `val` has complex elements, the returned type is float.
See Also
--------
real_if_close, imag, angle
Examples
--------
>>> a = np.array([1+2j, 3+4j, 5+6j])
>>> a.real
array([ 1., 3., 5.])
>>> a.real = 9
>>> a
array([ 9.+2.j, 9.+4.j, 9.+6.j])
>>> a.real = np.array([9, 8, 7])
>>> a
array([ 9.+2.j, 8.+4.j, 7.+6.j])
"""
return asanyarray(val).real
def imag(val):
"""
Return the imaginary part of the elements of the array.
Parameters
----------
val : array_like
Input array.
Returns
-------
out : ndarray
Output array. If `val` is real, the type of `val` is used for the
output. If `val` has complex elements, the returned type is float.
See Also
--------
real, angle, real_if_close
Examples
--------
>>> a = np.array([1+2j, 3+4j, 5+6j])
>>> a.imag
array([ 2., 4., 6.])
>>> a.imag = np.array([8, 10, 12])
>>> a
array([ 1. +8.j, 3.+10.j, 5.+12.j])
"""
return asanyarray(val).imag
def iscomplex(x):
"""
Returns a bool array, where True if input element is complex.
What is tested is whether the input has a non-zero imaginary part, not if
the input type is complex.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray of bools
Output array.
See Also
--------
isreal
iscomplexobj : Return True if x is a complex type or an array of complex
numbers.
Examples
--------
>>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])
array([ True, False, False, False, False, True], dtype=bool)
"""
ax = asanyarray(x)
if issubclass(ax.dtype.type, _nx.complexfloating):
return ax.imag != 0
res = zeros(ax.shape, bool)
return +res # convet to array-scalar if needed
def isreal(x):
"""
Returns a bool array, where True if input element is real.
If element has complex type with zero complex part, the return value
for that element is True.
Parameters
----------
x : array_like
Input array.
Returns
-------
out : ndarray, bool
Boolean array of same shape as `x`.
See Also
--------
iscomplex
isrealobj : Return True if x is not a complex type.
Examples
--------
>>> np.isreal([1+1j, 1+0j, 4.5, 3, 2, 2j])
array([False, True, True, True, True, False], dtype=bool)
"""
return imag(x) == 0
def iscomplexobj(x):
"""
Check for a complex type or an array of complex numbers.
The type of the input is checked, not the value. Even if the input
has an imaginary part equal to zero, `iscomplexobj` evaluates to True.
Parameters
----------
x : any
The input can be of any type and shape.
Returns
-------
iscomplexobj : bool
The return value, True if `x` is of a complex type or has at least
one complex element.
See Also
--------
isrealobj, iscomplex
Examples
--------
>>> np.iscomplexobj(1)
False
>>> np.iscomplexobj(1+0j)
True
>>> np.iscomplexobj([3, 1+0j, True])
True
"""
return issubclass(asarray(x).dtype.type, _nx.complexfloating)
def isrealobj(x):
"""
Return True if x is a not complex type or an array of complex numbers.
The type of the input is checked, not the value. So even if the input
has an imaginary part equal to zero, `isrealobj` evaluates to False
if the data type is complex.
Parameters
----------
x : any
The input can be of any type and shape.
Returns
-------
y : bool
The return value, False if `x` is of a complex type.
See Also
--------
iscomplexobj, isreal
Examples
--------
>>> np.isrealobj(1)
True
>>> np.isrealobj(1+0j)
False
>>> np.isrealobj([3, 1+0j, True])
False
"""
return not issubclass(asarray(x).dtype.type, _nx.complexfloating)
#-----------------------------------------------------------------------------
def _getmaxmin(t):
from numpy.core import getlimits
f = getlimits.finfo(t)
return f.max, f.min
def nan_to_num(x):
"""
Replace nan with zero and inf with finite numbers.
Returns an array or scalar replacing Not a Number (NaN) with zero,
(positive) infinity with a very large number and negative infinity
with a very small (or negative) number.
Parameters
----------
x : array_like
Input data.
Returns
-------
out : ndarray
New Array with the same shape as `x` and dtype of the element in
`x` with the greatest precision. If `x` is inexact, then NaN is
replaced by zero, and infinity (-infinity) is replaced by the
largest (smallest or most negative) floating point value that fits
in the output dtype. If `x` is not inexact, then a copy of `x` is
returned.
See Also
--------
isinf : Shows which elements are negative or negative infinity.
isneginf : Shows which elements are negative infinity.
isposinf : Shows which elements are positive infinity.
isnan : Shows which elements are Not a Number (NaN).
isfinite : Shows which elements are finite (not NaN, not infinity)
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.set_printoptions(precision=8)
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000,
-1.28000000e+002, 1.28000000e+002])
"""
x = _nx.array(x, subok=True)
xtype = x.dtype.type
if not issubclass(xtype, _nx.inexact):
return x
iscomplex = issubclass(xtype, _nx.complexfloating)
isscalar = (x.ndim == 0)
x = x[None] if isscalar else x
dest = (x.real, x.imag) if iscomplex else (x,)
maxf, minf = _getmaxmin(x.real.dtype)
for d in dest:
_nx.copyto(d, 0.0, where=isnan(d))
_nx.copyto(d, maxf, where=isposinf(d))
_nx.copyto(d, minf, where=isneginf(d))
return x[0] if isscalar else x
#-----------------------------------------------------------------------------
def real_if_close(a,tol=100):
"""
If complex input returns a real array if complex parts are close to zero.
"Close to zero" is defined as `tol` * (machine epsilon of the type for
`a`).
Parameters
----------
a : array_like
Input array.
tol : float
Tolerance in machine epsilons for the complex part of the elements
in the array.
Returns
-------
out : ndarray
If `a` is real, the type of `a` is used for the output. If `a`
has complex elements, the returned type is float.
See Also
--------
real, imag, angle
Notes
-----
Machine epsilon varies from machine to machine and between data types
but Python floats on most platforms have a machine epsilon equal to
2.2204460492503131e-16. You can use 'np.finfo(np.float).eps' to print
out the machine epsilon for floats.
Examples
--------
>>> np.finfo(np.float).eps
2.2204460492503131e-16
>>> np.real_if_close([2.1 + 4e-14j], tol=1000)
array([ 2.1])
>>> np.real_if_close([2.1 + 4e-13j], tol=1000)
array([ 2.1 +4.00000000e-13j])
"""
a = asanyarray(a)
if not issubclass(a.dtype.type, _nx.complexfloating):
return a
if tol > 1:
from numpy.core import getlimits
f = getlimits.finfo(a.dtype.type)
tol = f.eps * tol
if _nx.allclose(a.imag, 0, atol=tol):
a = a.real
return a
def asscalar(a):
"""
Convert an array of size 1 to its scalar equivalent.
Parameters
----------
a : ndarray
Input array of size 1.
Returns
-------
out : scalar
Scalar representation of `a`. The output data type is the same type
returned by the input's `item` method.
Examples
--------
>>> np.asscalar(np.array([24]))
24
"""
return a.item()
#-----------------------------------------------------------------------------
_namefromtype = {'S1': 'character',
'?': 'bool',
'b': 'signed char',
'B': 'unsigned char',
'h': 'short',
'H': 'unsigned short',
'i': 'integer',
'I': 'unsigned integer',
'l': 'long integer',
'L': 'unsigned long integer',
'q': 'long long integer',
'Q': 'unsigned long long integer',
'f': 'single precision',
'd': 'double precision',
'g': 'long precision',
'F': 'complex single precision',
'D': 'complex double precision',
'G': 'complex long double precision',
'S': 'string',
'U': 'unicode',
'V': 'void',
'O': 'object'
}
def typename(char):
"""
Return a description for the given data type code.
Parameters
----------
char : str
Data type code.
Returns
-------
out : str
Description of the input data type code.
See Also
--------
dtype, typecodes
Examples
--------
>>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',
... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']
>>> for typechar in typechars:
... print typechar, ' : ', np.typename(typechar)
...
S1 : character
? : bool
B : unsigned char
D : complex double precision
G : complex long double precision
F : complex single precision
I : unsigned integer
H : unsigned short
L : unsigned long integer
O : object
Q : unsigned long long integer
S : string
U : unicode
V : void
b : signed char
d : double precision
g : long precision
f : single precision
i : integer
h : short
l : long integer
q : long long integer
"""
return _namefromtype[char]
#-----------------------------------------------------------------------------
#determine the "minimum common type" for a group of arrays.
array_type = [[_nx.half, _nx.single, _nx.double, _nx.longdouble],
[None, _nx.csingle, _nx.cdouble, _nx.clongdouble]]
array_precision = {_nx.half: 0,
_nx.single: 1,
_nx.double: 2,
_nx.longdouble: 3,
_nx.csingle: 1,
_nx.cdouble: 2,
_nx.clongdouble: 3}
def common_type(*arrays):
"""
Return a scalar type which is common to the input arrays.
The return type will always be an inexact (i.e. floating point) scalar
type, even if all the arrays are integer arrays. If one of the inputs is
an integer array, the minimum precision type that is returned is a
64-bit floating point dtype.
All input arrays can be safely cast to the returned dtype without loss
of information.
Parameters
----------
array1, array2, ... : ndarrays
Input arrays.
Returns
-------
out : data type code
Data type code.
See Also
--------
dtype, mintypecode
Examples
--------
>>> np.common_type(np.arange(2, dtype=np.float32))
<type 'numpy.float32'>
>>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
<type 'numpy.float64'>
>>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
<type 'numpy.complex128'>
"""
is_complex = False
precision = 0
for a in arrays:
t = a.dtype.type
if iscomplexobj(a):
is_complex = True
if issubclass(t, _nx.integer):
p = 2 # array_precision[_nx.double]
else:
p = array_precision.get(t, None)
if p is None:
raise TypeError("can't get common type for non-numeric array")
precision = max(precision, p)
if is_complex:
return array_type[1][precision]
else:
return array_type[0][precision]
|
pieterdp/helptux | refs/heads/master | db_repository/manage.py | 263 | #!/usr/bin/env python
from migrate.versioning.shell import main
if __name__ == '__main__':
main()
|
alfredtofu/thrift | refs/heads/master | lib/py/src/server/TServer.py | 50 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import Queue
import os
import sys
import threading
import traceback
import logging
logger = logging.getLogger(__name__)
from thrift.Thrift import TProcessor
from thrift.protocol import TBinaryProtocol
from thrift.transport import TTransport
class TServer:
"""Base interface for a server, which must have a serve() method.
Three constructors for all servers:
1) (processor, serverTransport)
2) (processor, serverTransport, transportFactory, protocolFactory)
3) (processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory)
"""
def __init__(self, *args):
if (len(args) == 2):
self.__initArgs__(args[0], args[1],
TTransport.TTransportFactoryBase(),
TTransport.TTransportFactoryBase(),
TBinaryProtocol.TBinaryProtocolFactory(),
TBinaryProtocol.TBinaryProtocolFactory())
elif (len(args) == 4):
self.__initArgs__(args[0], args[1], args[2], args[2], args[3], args[3])
elif (len(args) == 6):
self.__initArgs__(args[0], args[1], args[2], args[3], args[4], args[5])
def __initArgs__(self, processor, serverTransport,
inputTransportFactory, outputTransportFactory,
inputProtocolFactory, outputProtocolFactory):
self.processor = processor
self.serverTransport = serverTransport
self.inputTransportFactory = inputTransportFactory
self.outputTransportFactory = outputTransportFactory
self.inputProtocolFactory = inputProtocolFactory
self.outputProtocolFactory = outputProtocolFactory
def serve(self):
pass
class TSimpleServer(TServer):
"""Simple single-threaded server that just pumps around one transport."""
def __init__(self, *args):
TServer.__init__(self, *args)
def serve(self):
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
if not client:
continue
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException as tx:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
class TThreadedServer(TServer):
"""Threaded server that spawns a new thread per each connection."""
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.daemon = kwargs.get("daemon", False)
def serve(self):
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
if not client:
continue
t = threading.Thread(target=self.handle, args=(client,))
t.setDaemon(self.daemon)
t.start()
except KeyboardInterrupt:
raise
except Exception as x:
logger.exception(x)
def handle(self, client):
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException as tx:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
class TThreadPoolServer(TServer):
"""Server with a fixed size pool of threads which service requests."""
def __init__(self, *args, **kwargs):
TServer.__init__(self, *args)
self.clients = Queue.Queue()
self.threads = 10
self.daemon = kwargs.get("daemon", False)
def setNumThreads(self, num):
"""Set the number of worker threads that should be created"""
self.threads = num
def serveThread(self):
"""Loop around getting clients from the shared queue and process them."""
while True:
try:
client = self.clients.get()
self.serveClient(client)
except Exception as x:
logger.exception(x)
def serveClient(self, client):
"""Process input/output from a client for as long as possible"""
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException as tx:
pass
except Exception as x:
logger.exception(x)
itrans.close()
otrans.close()
def serve(self):
"""Start a fixed number of worker threads and put client into a queue"""
for i in range(self.threads):
try:
t = threading.Thread(target=self.serveThread)
t.setDaemon(self.daemon)
t.start()
except Exception as x:
logger.exception(x)
# Pump the socket for clients
self.serverTransport.listen()
while True:
try:
client = self.serverTransport.accept()
if not client:
continue
self.clients.put(client)
except Exception as x:
logger.exception(x)
class TForkingServer(TServer):
"""A Thrift server that forks a new process for each request
This is more scalable than the threaded server as it does not cause
GIL contention.
Note that this has different semantics from the threading server.
Specifically, updates to shared variables will no longer be shared.
It will also not work on windows.
This code is heavily inspired by SocketServer.ForkingMixIn in the
Python stdlib.
"""
def __init__(self, *args):
TServer.__init__(self, *args)
self.children = []
def serve(self):
def try_close(file):
try:
file.close()
except IOError as e:
logger.warning(e, exc_info=True)
self.serverTransport.listen()
while True:
client = self.serverTransport.accept()
if not client:
continue
try:
pid = os.fork()
if pid: # parent
# add before collect, otherwise you race w/ waitpid
self.children.append(pid)
self.collect_children()
# Parent must close socket or the connection may not get
# closed promptly
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
try_close(itrans)
try_close(otrans)
else:
itrans = self.inputTransportFactory.getTransport(client)
otrans = self.outputTransportFactory.getTransport(client)
iprot = self.inputProtocolFactory.getProtocol(itrans)
oprot = self.outputProtocolFactory.getProtocol(otrans)
ecode = 0
try:
try:
while True:
self.processor.process(iprot, oprot)
except TTransport.TTransportException as tx:
pass
except Exception as e:
logger.exception(e)
ecode = 1
finally:
try_close(itrans)
try_close(otrans)
os._exit(ecode)
except TTransport.TTransportException as tx:
pass
except Exception as x:
logger.exception(x)
def collect_children(self):
while self.children:
try:
pid, status = os.waitpid(0, os.WNOHANG)
except os.error:
pid = None
if pid:
self.children.remove(pid)
else:
break
|
andymckay/zamboni | refs/heads/master | mkt/prices/admin.py | 21 | from django.contrib import admin
from mkt.prices.models import Price, PriceCurrency
admin.site.register(Price)
admin.site.register(PriceCurrency)
|
tmerrick1/spack | refs/heads/develop | var/spack/repos/builtin/packages/json-glib/package.py | 5 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class JsonGlib(AutotoolsPackage):
"""JSON-GLib is a library for reading and parsing JSON using GLib and
GObject data types and API."""
homepage = "https://developer.gnome.org/json-glib"
url = "https://ftp.gnome.org/pub/gnome/sources/json-glib/1.2/json-glib-1.2.8.tar.xz"
version('1.2.8', 'ff31e7d0594df44318e12facda3d086e')
depends_on('glib')
|
orbitfp7/nova | refs/heads/master | nova/spice/__init__.py | 72 | #!/usr/bin/env python
# Copyright (c) 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for SPICE Proxying."""
from oslo_config import cfg
spice_opts = [
cfg.StrOpt('html5proxy_base_url',
default='http://127.0.0.1:6082/spice_auto.html',
help='Location of spice HTML5 console proxy, in the form '
'"http://127.0.0.1:6082/spice_auto.html"'),
cfg.StrOpt('server_listen',
default='127.0.0.1',
help='IP address on which instance spice server should listen'),
cfg.StrOpt('server_proxyclient_address',
default='127.0.0.1',
help='The address to which proxy clients '
'(like nova-spicehtml5proxy) should connect'),
cfg.BoolOpt('enabled',
default=False,
help='Enable spice related features'),
cfg.BoolOpt('agent_enabled',
default=True,
help='Enable spice guest agent support'),
cfg.StrOpt('keymap',
default='en-us',
help='Keymap for spice'),
]
CONF = cfg.CONF
CONF.register_opts(spice_opts, group='spice')
|
alquerci/cyg-apt | refs/heads/master | src/cygapt/test/case/exception.py | 2 | # -*- coding: utf-8 -*-
######################## BEGIN LICENSE BLOCK ########################
# This file is part of the cygapt package.
#
# Copyright (C) 2002-2009 Jan Nieuwenhuizen <janneke@gnu.org>
# 2002-2009 Chris Cormie <cjcormie@gmail.com>
# 2012 James Nylen <jnylen@gmail.com>
# 2012-2014 Alexandre Quercia <alquerci@email.com>
#
# For the full copyright and license information, please view the
# LICENSE file that was distributed with this source code.
######################### END LICENSE BLOCK #########################
from __future__ import absolute_import;
import sys;
if sys.version_info < (3, ):
from .py2.exception import SkipTestException as SkipTestException;
else:
from unittest import SkipTest as SkipTestException;
|
fusion809/fusion809.github.io-old | refs/heads/master | vendor/bundle/ruby/2.2.0/gems/pygments.rb-0.6.3/vendor/simplejson/simplejson/tests/test_float.py | 136 | import math
from unittest import TestCase
import simplejson as json
class TestFloat(TestCase):
def test_floats(self):
for num in [1617161771.7650001, math.pi, math.pi**100,
math.pi**-100, 3.1]:
self.assertEquals(float(json.dumps(num)), num)
self.assertEquals(json.loads(json.dumps(num)), num)
self.assertEquals(json.loads(unicode(json.dumps(num))), num)
def test_ints(self):
for num in [1, 1L, 1<<32, 1<<64]:
self.assertEquals(json.dumps(num), str(num))
self.assertEquals(int(json.dumps(num)), num)
self.assertEquals(json.loads(json.dumps(num)), num)
self.assertEquals(json.loads(unicode(json.dumps(num))), num)
|
jmanday/Master | refs/heads/master | TFM/library/opencv-3.2.0/samples/python/houghcircles.py | 5 | #!/usr/bin/python
'''
This example illustrates how to use cv2.HoughCircles() function.
Usage:
houghcircles.py [<image_name>]
image argument defaults to ../data/board.jpg
'''
# Python 2/3 compatibility
from __future__ import print_function
import cv2
import numpy as np
import sys
if __name__ == '__main__':
print(__doc__)
try:
fn = sys.argv[1]
except IndexError:
fn = "../data/board.jpg"
src = cv2.imread(fn, 1)
img = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 5)
cimg = src.copy() # numpy function
circles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, 1, 10, np.array([]), 100, 30, 1, 30)
if circles is not None: # Check if circles have been found and only then iterate over these and add them to the image
a, b, c = circles.shape
for i in range(b):
cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), circles[0][i][2], (0, 0, 255), 3, cv2.LINE_AA)
cv2.circle(cimg, (circles[0][i][0], circles[0][i][1]), 2, (0, 255, 0), 3, cv2.LINE_AA) # draw center of circle
cv2.imshow("detected circles", cimg)
cv2.imshow("source", src)
cv2.waitKey(0)
|
tallstreet/jaikuenginepatch | refs/heads/master | api/tests.py | 1 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import xmlrpclib
from oauth import oauth
from django.conf import settings
from api import xmlrpc
from common import api
from common import clean
from common import legacy
from common import oauth_util
from common import profile
from common import util
from common.protocol import xmpp
from common.protocol import sms
from common.test import base
from common.test import util as test_util
class ImTestCase(base.ViewTestCase):
endpoint = '/_ah/xmpp/message'
def setUp(self):
super(ImTestCase, self).setUp()
self.overrides = test_util.override(IM_ENABLED=True)
def tearDown(self):
super(ImTestCase, self).tearDown()
self.overrides.reset()
def sign_in(self, from_jid, nick, password=None):
if not password:
password = self.passwords[clean.nick(nick)]
message = 'SIGN IN %s %s' % (nick, password)
return self.send(from_jid, message)
def send(self, from_jid, message):
r = self.client.post(
self.endpoint,
{
'from': from_jid.full(),
'to': settings.IM_BOT,
'body': message
}
)
return r
class SignInTest(ImTestCase):
from_jid = xmpp.JID.from_uri('test@example.com/demo')
def test_sign_in(self):
self.assertEqual(len(xmpp.outbox), 0)
r = self.sign_in(self.from_jid, 'hermit')
self.assertEqual(len(xmpp.outbox), 1)
class NotificationsTest(ImTestCase):
from_jid = xmpp.JID.from_uri('test@example.com/demo')
def test_start_stop(self):
actor_pre_ref = api.actor_get(api.ROOT, 'hermit')
self.assertEqual(actor_pre_ref.extra.get('im_notify', False), False)
r = self.sign_in(self.from_jid, 'hermit')
self.send(self.from_jid, 'start')
actor_post_ref = api.actor_get(api.ROOT, 'hermit')
self.assertEqual(actor_post_ref.extra.get('im_notify', False), True)
self.send(self.from_jid, 'stop')
actor_last_ref = api.actor_get(api.ROOT, 'hermit')
self.assertEqual(actor_last_ref.extra.get('im_notify', False), False)
def test_notify_on_post(self):
api.post(api.ROOT, nick='popular', message='la la la')
self.exhaust_queue('popular')
# should notify popular and unpopular
self.assertEqual(len(xmpp.outbox), 2)
def test_notify_on_comment(self):
api.entry_add_comment(api.ROOT,
stream='stream/popular@example.com/presence',
entry='stream/popular@example.com/presence/12345',
nick='popular',
content='la la la')
self.exhaust_queue_any()
# should notify popular and unpopular
self.assertEqual(len(xmpp.outbox), 2)
def test_notify_on_restricted_comment(self):
api.subscription_request(api.ROOT,
'stream/hermit@example.com/comments',
'inbox/unpopular@example.com/overview')
api.entry_add_comment(api.ROOT,
stream='stream/popular@example.com/presence',
entry='stream/popular@example.com/presence/12347',
nick='hermit',
content='la la la')
self.exhaust_queue_any()
# should notify popular and unpopular
self.assertEqual(len(xmpp.outbox), 1)
def test_notify_on_channel_post(self):
api.channel_post(api.ROOT,
nick='popular',
channel="#popular",
message='la la la')
# should notify popular and unpopular
self.assertEqual(len(xmpp.outbox), 2)
class PostTest(ImTestCase):
from_jid = xmpp.JID.from_uri('test@example.com/demo')
def test_post_signed_in(self):
message = "test post"
r = self.sign_in(self.from_jid, 'hermit')
r = self.send(self.from_jid, message)
class ChannelPostTest(ImTestCase):
from_jid = xmpp.JID.from_uri('test@example.com/demo')
message = "test post from jabber"
channel = "#popular@example.com"
def verify_post_present(self):
# Verify that the channel was updated.
self.login('popular')
r = self.client.get('/channel/popular')
self.assertContains(r, self.message)
def verify_post_not_present(self):
# Verify that the channel was updated.
self.login('popular')
r = self.client.get('/channel/popular')
self.assertNotContains(r, self.message)
def test_post_signed_in(self):
post = "%s %s" % (self.channel, self.message)
r = self.sign_in(self.from_jid, 'popular')
r = self.send(self.from_jid, post)
self.verify_post_present()
def test_post_not_signed_in(self):
post = "%s %s" % (self.channel, self.message)
r = self.send(self.from_jid, post)
self.verify_post_not_present()
def test_post_not_member(self):
# Test posting to a channel, where the user is not a member.
# (user automatically joined).
post = "%s %s" % (self.channel, self.message)
r = self.sign_in(self.from_jid, 'hermit')
r = self.send(self.from_jid, post)
self.verify_post_present()
# TODO(tyler): Add test to verify the user is now a member and following
# the channel via jabber.
class OAuthTest(base.ViewTestCase):
def setUp(self):
super(OAuthTest, self).setUp()
self.desktop_consumer = oauth.OAuthConsumer("TESTDESKTOPCONSUMER", "secret")
self.sig_hmac = oauth.OAuthSignatureMethod_HMAC_SHA1()
def test_tokens(self):
request_request = oauth.OAuthRequest.from_consumer_and_token(
self.desktop_consumer,
http_url="http://%s/api/request_token" % settings.DOMAIN,
)
request_request.sign_request(self.sig_hmac, self.desktop_consumer, None)
response = self.client.get("/api/request_token", request_request.parameters)
request_token = oauth.OAuthToken.from_string(response.content)
# cheat and authorize this token using the backend
api.oauth_authorize_request_token(api.ROOT,
request_token.key,
actor='popular@example.com',
perms="read")
access_request = oauth.OAuthRequest.from_consumer_and_token(
self.desktop_consumer,
request_token,
http_url="http://%s/api/access_token" % (settings.DOMAIN),
)
access_request.sign_request(self.sig_hmac, self.desktop_consumer,
request_token)
response = self.client.get("/api/access_token", access_request.parameters)
access_token = oauth.OAuthToken.from_string(response.content)
def test_update_bad_type(self):
"""Verify that sending a bad auth mode fails"""
r = self.login('popular')
r = self.client.post('/api/keys/TESTDESKTOPCONSUMER', {
'nick': 'popular@example.com',
'_nonce': util.create_nonce('popular', 'oauth_consumer_update'),
'oauth_consumer_update': '',
'app_name': 'Foo',
'consumer_type' : 'Bad Consumer Type',
'consumer_key' : 'TESTDESKTOPCONSUMER',
})
# TODO(tyler): I think I'm smoking crack, but there should be a better
# error, and I don't know why it isn't happening. The validation
# is failing (as it should), but there doesn't seem to be any error
# page or message to the user.
self.assertWellformed(r)
def test_update(self):
"""Verify that sending a good auth mode succeeds"""
r = self.login('popular')
r = self.client.post('/api/keys/TESTDESKTOPCONSUMER', {
'nick': 'popular@example.com',
'_nonce': util.create_nonce('popular', 'oauth_consumer_update'),
'oauth_consumer_update': '',
'app_name': 'New App Name',
'consumer_type' : 'web',
'consumer_key' : 'TESTDESKTOPCONSUMER',
})
r = self.assertRedirectsPrefix(r, '/api/keys/TESTDESKTOPCONSUMER')
self.assertTemplateUsed(r, 'key.html')
self.assertContains(r, 'API Key information updated')
self.assertWellformed(r)
self.assertContains(r, 'New App Name')
def test_delete(self):
r = self.login('popular')
r = self.client.get('/api/keys/TESTDESKTOPCONSUMER', {
'nick': 'popular@example.com',
'_nonce': util.create_nonce('popular', 'oauth_consumer_delete'),
'oauth_consumer_delete': '',
'consumer_key' : 'TESTDESKTOPCONSUMER',
'confirm' : '1',
})
r = self.assertRedirectsPrefix(r, '/api/keys')
self.assertTemplateUsed(r, 'keys.html')
self.assertContains(r, 'API Key deleted')
self.assertNotContains(r, 'TESTDESKTOPCONSUMER')
self.assertWellformed(r)
def test_revoke_access_token(self):
r = self.login('popular')
r = self.client.get('/api/tokens', {
'_nonce': util.create_nonce('popular', 'oauth_revoke_access_token'),
'oauth_revoke_access_token': '',
'key': 'POPULARDESKTOPACCESSTOKEN',
'confirm' : 1,
})
r = self.assertRedirectsPrefix(r, '/api/tokens')
self.assertTemplateUsed(r, 'tokens.html')
self.assertContains(r, 'token revoked')
self.assertNotContains(r, 'POPULARDESKTOPACCESSTOKEN')
self.assertWellformed(r)
class SmsTestCase(base.ViewTestCase):
endpoint = '/api/sms_receive/%s' % settings.SMS_VENDOR_SECRET
popular = '+14084900694'
def sign_in(self, from_mobile, nick, password=None):
if not password:
password = self.passwords[clean.nick(nick)]
message = 'SIGN IN %s %s' % (nick, password)
return self.send(from_mobile, message)
def send(self, from_mobile, message):
r = self.client.post(
self.endpoint,
{
'sender': from_mobile,
'target': settings.SMS_TARGET,
'message': message
}
)
return r
class SmsPostTest(SmsTestCase):
def test_post_signed_in(self):
message = "test post"
r = self.sign_in(self.popular, 'popular')
r = self.send(self.popular, message)
self.exhaust_queue_any()
inbox = api.inbox_get_actor_overview(api.ROOT, 'popular@example.com')
entry_ref = api.entry_get(api.ROOT, inbox[0])
self.assertEqual(entry_ref.title(), message)
class XmlRpcTest(base.FixturesTestCase):
def setUp(self):
super(XmlRpcTest, self).setUp()
self.overrides = None
def tearDown(self):
if self.overrides:
self.overrides.reset()
super(XmlRpcTest, self).tearDown()
def assert_valid_actor_get_response(self, params):
xml = xmlrpclib.dumps((params,), 'actor_get')
response = self.client.post('/api/xmlrpc', xml, 'text/xml')
rv = xmlrpclib.loads(response.content)
actor = api.actor_get(api.ROOT, 'popular')
expected = {
'actor': {'avatar_updated_at': '2001-01-01 00:00:00',
'extra': {'follower_count': 4,
'contact_count': 2,
'icon': 'default/animal_3'},
'privacy': 3,
'nick': 'popular@example.com',
'deleted_at': None,
'type': 'user' }
}
self.assertEquals(expected, rv[0][0])
def oauth_request(self):
consumer = oauth.OAuthConsumer('TESTDESKTOPCONSUMER', 'secret')
access_token = oauth.OAuthToken('POPULARDESKTOPACCESSTOKEN', 'secret')
request = oauth.OAuthRequest.from_consumer_and_token(
oauth_consumer=consumer,
token=access_token,
http_url=xmlrpc.URL,
http_method='POST',
parameters={'nick': 'popular'})
request.sign_request(oauth_util.HMAC_SHA1, consumer, access_token)
return request
def test_xmlrpc_with_legacy_key(self):
self.overrides = test_util.override(API_ALLOW_LEGACY_AUTH=True)
popular_ref = api.actor_get(api.ROOT, 'popular')
personal_key = legacy.generate_personal_key(popular_ref)
params = {'user': 'popular',
'personal_key': personal_key,
'nick': 'popular'}
self.assert_valid_actor_get_response(params)
def test_xmlrpc_with_disabled_legacy_key(self):
self.overrides = test_util.override(API_ALLOW_LEGACY_AUTH=False)
popular_ref = api.actor_get(api.ROOT, 'popular')
personal_key = legacy.generate_personal_key(popular_ref)
params = {'user': 'popular',
'personal_key': personal_key,
'nick': 'popular'}
xml = xmlrpclib.dumps((params,), 'actor_get')
response = self.client.post('/api/xmlrpc', xml, 'text/xml')
self.assertContains(response, 'Parameter not found')
def test_xmlrpc_bad_legacy_key(self):
self.overrides = test_util.override(API_ALLOW_LEGACY_AUTH=True)
params = {'nick': 'popular',
'user': 'popular',
'personal_key': 'INVALID PERSONAL KEY!'}
xml = xmlrpclib.dumps((params,), 'actor_get')
response = self.client.post('/api/xmlrpc', xml, 'text/xml')
self.assertContains(response, 'Invalid API user')
def test_xmlrpc_with_oauth(self):
oauth_request = self.oauth_request()
self.assert_valid_actor_get_response(oauth_request.parameters)
def test_xmlrpc_bad_oauth(self):
oauth_request = self.oauth_request()
params = dict(oauth_request.parameters)
params['oauth_key'] = 'INVALID OAUTH KEY!'
xml = xmlrpclib.dumps((params,), 'actor_get')
response = self.client.post('/api/xmlrpc', xml, 'text/xml')
self.assertContains(response, 'Invalid signature')
def test_xmlrpc_no_auth(self):
params = {'nick' : 'popular'}
xml = xmlrpclib.dumps((params,), 'actor_get')
response = self.client.post('/api/xmlrpc', xml, 'text/xml')
self.assertContains(response, 'Parameter not found')
def test_get_request(self):
response = self.client.get('/api/xmlrpc')
self.assertContains(response, 'XML-RPC message must be an HTTP-POST request')
|
ctk3b/mbuild | refs/heads/master | docs/conf.py | 3 | # -*- coding: utf-8 -*-
#
# mbuild documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 1 08:59:12 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import pip
import sys
import mock
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
MOCK_MODULES = ['numpy',
'mdtraj',
'nglview',
'oset',
'parmed',
'parmed.periodic_table',
'scipy',
'scipy.spatial',
'numpy.linalg']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = mock.Mock()
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('sphinxext'))
import mbuild
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
# 'nbsphinx',
'sphinx.ext.mathjax',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'notebook_sphinxext',
'numpydoc'
]
nbsphinx_execute = 'always'
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
numpydoc_class_members_toctree = False
# stackoverflow.com/questions/12206334
numpydoc_show_class_members = False
numpydoc_show_inherited_class_members = False
_python_doc_base = 'http://docs.python.org/3.4'
intersphinx_mapping = {
_python_doc_base: None,
'http://docs.scipy.org/doc/numpy': None,
'http://docs.scipy.org/doc/scipy/reference': None,
'http://scikit-learn.org/stable': None
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'mbuild'
copyright = u'2014-2017, Vanderbilt University'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = mbuild.version
# The full version, including alpha/beta/rc tags.
release = mbuild.version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
hhtml_theme_path = [
sphinx_rtd_theme.get_html_theme_path()
]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['globaltoc.html', 'sourcelink.html', 'searchbox.html', 'relations.html', 'sourcelink.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'mbuilddoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'mbuild.tex', u'mbuild Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'mbuild', u'mbuild Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'mbuild', u'mbuild Documentation',
u'Author', 'mbuild', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'mbuild'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2017, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'mbuild'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
|
incaser/odoo-odoo | refs/heads/8.0 | addons/payment_transfer/models/__init__.py | 437 | # -*- coding: utf-8 -*-
import payment_acquirer
|
malishevg/edugraph | refs/heads/master | common/lib/xmodule/xmodule/tabs.py | 10 | """
Implement CourseTab
"""
# pylint: disable=incomplete-protocol
# Note: pylint complains that we do not implement __delitem__ and __len__, although we implement __setitem__
# and __getitem__. However, the former two do not apply to the CourseTab class so we do not implement them.
# The reason we implement the latter two is to enable callers to continue to use the CourseTab object with
# dict-type accessors.
from abc import ABCMeta, abstractmethod
from xblock.fields import List
# We should only scrape strings for i18n in this file, since the target language is known only when
# they are rendered in the template. So ugettext gets called in the template.
_ = lambda text: text
class CourseTab(object): # pylint: disable=incomplete-protocol
"""
The Course Tab class is a data abstraction for all tabs (i.e., course navigation links) within a course.
It is an abstract class - to be inherited by various tab types.
Derived classes are expected to override methods as needed.
When a new tab class is created, it should define the type and add it in this class' factory method.
"""
__metaclass__ = ABCMeta
# Class property that specifies the type of the tab. It is generally a constant value for a
# subclass, shared by all instances of the subclass.
type = ''
# Class property that specifies whether the tab can be hidden for a particular course
is_hideable = False
# Class property that specifies whether the tab can be moved within a course's list of tabs
is_movable = True
# Class property that specifies whether the tab is a collection of other tabs
is_collection = False
def __init__(self, name, tab_id, link_func):
"""
Initializes class members with values passed in by subclasses.
Args:
name: The name of the tab
tab_id: Intended to be a unique id for this tab, although it is currently not enforced
within this module. It is used by the UI to determine which page is active.
link_func: A function that computes the link for the tab,
given the course and a reverse-url function as input parameters
"""
self.name = name
self.tab_id = tab_id
self.link_func = link_func
def can_display(self, course, settings, is_user_authenticated, is_user_staff): # pylint: disable=unused-argument
"""
Determines whether the tab should be displayed in the UI for the given course and a particular user.
This method is to be overridden by subclasses when applicable. The base class implementation
always returns True.
Args:
course: An xModule CourseDescriptor
settings: The configuration settings, including values for:
WIKI_ENABLED
FEATURES['ENABLE_DISCUSSION_SERVICE']
FEATURES['ENABLE_STUDENT_NOTES']
FEATURES['ENABLE_TEXTBOOK']
is_user_authenticated: Indicates whether the user is authenticated. If the tab is of
type AuthenticatedCourseTab and this value is False, then can_display will return False.
is_user_staff: Indicates whether the user has staff access to the course. If the tab is of
type StaffTab and this value is False, then can_display will return False.
Returns:
A boolean value to indicate whether this instance of the tab should be displayed to a
given user for the given course.
"""
return True
def get(self, key, default=None):
"""
Akin to the get method on Python dictionary objects, gracefully returns the value associated with the
given key, or the default if key does not exist.
"""
try:
return self[key]
except KeyError:
return default
def __getitem__(self, key):
"""
This method allows callers to access CourseTab members with the d[key] syntax as is done with
Python dictionary objects.
"""
if key == 'name':
return self.name
elif key == 'type':
return self.type
elif key == 'tab_id':
return self.tab_id
else:
raise KeyError('Key {0} not present in tab {1}'.format(key, self.to_json()))
def __setitem__(self, key, value):
"""
This method allows callers to change CourseTab members with the d[key]=value syntax as is done with
Python dictionary objects. For example: course_tab['name'] = new_name
Note: the 'type' member can be 'get', but not 'set'.
"""
if key == 'name':
self.name = value
elif key == 'tab_id':
self.tab_id = value
else:
raise KeyError('Key {0} cannot be set in tab {1}'.format(key, self.to_json()))
def __eq__(self, other):
"""
Overrides the equal operator to check equality of member variables rather than the object's address.
Also allows comparison with dict-type tabs (needed to support callers implemented before this class
was implemented).
"""
if type(other) is dict and not self.validate(other, raise_error=False):
# 'other' is a dict-type tab and did not validate
return False
# allow tabs without names; if a name is required, its presence was checked in the validator.
name_is_eq = (other.get('name') is None or self.name == other['name'])
# only compare the persisted/serialized members: 'type' and 'name'
return self.type == other.get('type') and name_is_eq
def __ne__(self, other):
"""
Overrides the not equal operator as a partner to the equal operator.
"""
return not (self == other)
@classmethod
def validate(cls, tab_dict, raise_error=True):
"""
Validates the given dict-type tab object to ensure it contains the expected keys.
This method should be overridden by subclasses that require certain keys to be persisted in the tab.
"""
return key_checker(['type'])(tab_dict, raise_error)
def to_json(self):
"""
Serializes the necessary members of the CourseTab object to a json-serializable representation.
This method is overridden by subclasses that have more members to serialize.
Returns:
a dictionary with keys for the properties of the CourseTab object.
"""
return {'type': self.type, 'name': self.name}
@staticmethod
def from_json(tab_dict):
"""
Deserializes a CourseTab from a json-like representation.
The subclass that is instantiated is determined by the value of the 'type' key in the
given dict-type tab. The given dict-type tab is validated before instantiating the CourseTab object.
Args:
tab: a dictionary with keys for the properties of the tab.
Raises:
InvalidTabsException if the given tab doesn't have the right keys.
"""
sub_class_types = {
'courseware': CoursewareTab,
'course_info': CourseInfoTab,
'wiki': WikiTab,
'discussion': DiscussionTab,
'external_discussion': ExternalDiscussionTab,
'external_link': ExternalLinkTab,
'textbooks': TextbookTabs,
'pdf_textbooks': PDFTextbookTabs,
'html_textbooks': HtmlTextbookTabs,
'progress': ProgressTab,
'static_tab': StaticTab,
'peer_grading': PeerGradingTab,
'staff_grading': StaffGradingTab,
'open_ended': OpenEndedGradingTab,
'notes': NotesTab,
'syllabus': SyllabusTab,
'instructor': InstructorTab, # not persisted
}
tab_type = tab_dict.get('type')
if tab_type not in sub_class_types:
raise InvalidTabsException(
'Unknown tab type {0}. Known types: {1}'.format(tab_type, sub_class_types)
)
tab_class = sub_class_types[tab_dict['type']]
tab_class.validate(tab_dict)
return tab_class(tab_dict=tab_dict)
class AuthenticatedCourseTab(CourseTab):
"""
Abstract class for tabs that can be accessed by only authenticated users.
"""
def can_display(self, course, settings, is_user_authenticated, is_user_staff):
return is_user_authenticated
class StaffTab(AuthenticatedCourseTab):
"""
Abstract class for tabs that can be accessed by only users with staff access.
"""
def can_display(self, course, settings, is_user_authenticated, is_user_staff): # pylint: disable=unused-argument
return is_user_staff
class HideableTab(CourseTab):
"""
Abstract class for tabs that are hideable
"""
is_hideable = True
def __init__(self, name, tab_id, link_func, tab_dict):
super(HideableTab, self).__init__(
name=name,
tab_id=tab_id,
link_func=link_func,
)
self.is_hidden = tab_dict.get('is_hidden', False) if tab_dict else False
def __getitem__(self, key):
if key == 'is_hidden':
return self.is_hidden
else:
return super(HideableTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'is_hidden':
self.is_hidden = value
else:
super(HideableTab, self).__setitem__(key, value)
def to_json(self):
to_json_val = super(HideableTab, self).to_json()
if self.is_hidden:
to_json_val.update({'is_hidden': True})
return to_json_val
def __eq__(self, other):
if not super(HideableTab, self).__eq__(other):
return False
return self.is_hidden == other.get('is_hidden', False)
class CoursewareTab(CourseTab):
"""
A tab containing the course content.
"""
type = 'courseware'
is_movable = False
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(CoursewareTab, self).__init__(
# Translators: 'Courseware' refers to the tab in the courseware that leads to the content of a course
name=_('Courseware'), # support fixed name for the courseware tab
tab_id=self.type,
link_func=link_reverse_func(self.type),
)
class CourseInfoTab(CourseTab):
"""
A tab containing information about the course.
"""
type = 'course_info'
is_movable = False
def __init__(self, tab_dict=None):
super(CourseInfoTab, self).__init__(
# Translators: "Course Info" is the name of the course's information and updates page
name=tab_dict['name'] if tab_dict else _('Course Info'),
tab_id='info',
link_func=link_reverse_func('info'),
)
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(CourseInfoTab, cls).validate(tab_dict, raise_error) and need_name(tab_dict, raise_error)
class ProgressTab(AuthenticatedCourseTab):
"""
A tab containing information about the authenticated user's progress.
"""
type = 'progress'
def __init__(self, tab_dict=None):
super(ProgressTab, self).__init__(
# Translators: "Progress" is the name of the student's course progress page
name=tab_dict['name'] if tab_dict else _('Progress'),
tab_id=self.type,
link_func=link_reverse_func(self.type),
)
def can_display(self, course, settings, is_user_authenticated, is_user_staff):
return not course.hide_progress_tab
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(ProgressTab, cls).validate(tab_dict, raise_error) and need_name(tab_dict, raise_error)
class WikiTab(HideableTab):
"""
A tab_dict containing the course wiki.
"""
type = 'wiki'
def __init__(self, tab_dict=None):
super(WikiTab, self).__init__(
# Translators: "Wiki" is the name of the course's wiki page
name=tab_dict['name'] if tab_dict else _('Wiki'),
tab_id=self.type,
link_func=link_reverse_func('course_wiki'),
tab_dict=tab_dict,
)
def can_display(self, course, settings, is_user_authenticated, is_user_staff):
return settings.WIKI_ENABLED
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(WikiTab, cls).validate(tab_dict, raise_error) and need_name(tab_dict, raise_error)
class DiscussionTab(CourseTab):
"""
A tab only for the new Berkeley discussion forums.
"""
type = 'discussion'
def __init__(self, tab_dict=None):
super(DiscussionTab, self).__init__(
# Translators: "Discussion" is the title of the course forum page
name=tab_dict['name'] if tab_dict else _('Discussion'),
tab_id=self.type,
link_func=link_reverse_func('django_comment_client.forum.views.forum_form_discussion'),
)
def can_display(self, course, settings, is_user_authenticated, is_user_staff):
return settings.FEATURES.get('ENABLE_DISCUSSION_SERVICE')
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(DiscussionTab, cls).validate(tab_dict, raise_error) and need_name(tab_dict, raise_error)
class LinkTab(CourseTab):
"""
Abstract class for tabs that contain external links.
"""
link_value = ''
def __init__(self, name, tab_id, link_value):
self.link_value = link_value
super(LinkTab, self).__init__(
name=name,
tab_id=tab_id,
link_func=link_value_func(self.link_value),
)
def __getitem__(self, key):
if key == 'link':
return self.link_value
else:
return super(LinkTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'link':
self.link_value = value
else:
super(LinkTab, self).__setitem__(key, value)
def to_json(self):
to_json_val = super(LinkTab, self).to_json()
to_json_val.update({'link': self.link_value})
return to_json_val
def __eq__(self, other):
if not super(LinkTab, self).__eq__(other):
return False
return self.link_value == other.get('link')
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(LinkTab, cls).validate(tab_dict, raise_error) and key_checker(['link'])(tab_dict, raise_error)
class ExternalDiscussionTab(LinkTab):
"""
A tab that links to an external discussion service.
"""
type = 'external_discussion'
def __init__(self, tab_dict=None, link_value=None):
super(ExternalDiscussionTab, self).__init__(
# Translators: 'Discussion' refers to the tab in the courseware that leads to the discussion forums
name=_('Discussion'),
tab_id='discussion',
link_value=tab_dict['link'] if tab_dict else link_value,
)
class ExternalLinkTab(LinkTab):
"""
A tab containing an external link.
"""
type = 'external_link'
def __init__(self, tab_dict):
super(ExternalLinkTab, self).__init__(
name=tab_dict['name'],
tab_id=None, # External links are never active.
link_value=tab_dict['link'],
)
class StaticTab(CourseTab):
"""
A custom tab.
"""
type = 'static_tab'
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(StaticTab, cls).validate(tab_dict, raise_error) and key_checker(['name', 'url_slug'])(tab_dict, raise_error)
def __init__(self, tab_dict=None, name=None, url_slug=None):
self.url_slug = tab_dict['url_slug'] if tab_dict else url_slug
super(StaticTab, self).__init__(
name=tab_dict['name'] if tab_dict else name,
tab_id='static_tab_{0}'.format(self.url_slug),
link_func=lambda course, reverse_func: reverse_func(self.type, args=[course.id, self.url_slug]),
)
def __getitem__(self, key):
if key == 'url_slug':
return self.url_slug
else:
return super(StaticTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'url_slug':
self.url_slug = value
else:
super(StaticTab, self).__setitem__(key, value)
def to_json(self):
to_json_val = super(StaticTab, self).to_json()
to_json_val.update({'url_slug': self.url_slug})
return to_json_val
def __eq__(self, other):
if not super(StaticTab, self).__eq__(other):
return False
return self.url_slug == other.get('url_slug')
class SingleTextbookTab(CourseTab):
"""
A tab representing a single textbook. It is created temporarily when enumerating all textbooks within a
Textbook collection tab. It should not be serialized or persisted.
"""
type = 'single_textbook'
is_movable = False
is_collection_item = True
def to_json(self):
raise NotImplementedError('SingleTextbookTab should not be serialized.')
class TextbookTabsBase(AuthenticatedCourseTab):
"""
Abstract class for textbook collection tabs classes.
"""
is_collection = True
def __init__(self, tab_id):
# Translators: 'Textbooks' refers to the tab in the course that leads to the course' textbooks
super(TextbookTabsBase, self).__init__(
name=_("Textbooks"),
tab_id=tab_id,
link_func=None,
)
@abstractmethod
def items(self, course):
"""
A generator for iterating through all the SingleTextbookTab book objects associated with this
collection of textbooks.
"""
pass
class TextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all textbook tabs.
"""
type = 'textbooks'
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(TextbookTabs, self).__init__(
tab_id=self.type,
)
def can_display(self, course, settings, is_user_authenticated, is_user_staff):
return settings.FEATURES.get('ENABLE_TEXTBOOK')
def items(self, course):
for index, textbook in enumerate(course.textbooks):
yield SingleTextbookTab(
name=textbook.title,
tab_id='textbook/{0}'.format(index),
link_func=lambda course, reverse_func: reverse_func('book', args=[course.id, index]),
)
class PDFTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all PDF textbook tabs.
"""
type = 'pdf_textbooks'
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(PDFTextbookTabs, self).__init__(
tab_id=self.type,
)
def items(self, course):
for index, textbook in enumerate(course.pdf_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='pdftextbook/{0}'.format(index),
link_func=lambda course, reverse_func: reverse_func('pdf_book', args=[course.id, index]),
)
class HtmlTextbookTabs(TextbookTabsBase):
"""
A tab representing the collection of all Html textbook tabs.
"""
type = 'html_textbooks'
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(HtmlTextbookTabs, self).__init__(
tab_id=self.type,
)
def items(self, course):
for index, textbook in enumerate(course.html_textbooks):
yield SingleTextbookTab(
name=textbook['tab_title'],
tab_id='htmltextbook/{0}'.format(index),
link_func=lambda course, reverse_func: reverse_func('html_book', args=[course.id, index]),
)
class GradingTab(object):
"""
Abstract class for tabs that involve Grading.
"""
pass
class StaffGradingTab(StaffTab, GradingTab):
"""
A tab for staff grading.
"""
type = 'staff_grading'
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(StaffGradingTab, self).__init__(
# Translators: "Staff grading" appears on a tab that allows
# staff to view open-ended problems that require staff grading
name=_("Staff grading"),
tab_id=self.type,
link_func=link_reverse_func(self.type),
)
class PeerGradingTab(AuthenticatedCourseTab, GradingTab):
"""
A tab for peer grading.
"""
type = 'peer_grading'
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(PeerGradingTab, self).__init__(
# Translators: "Peer grading" appears on a tab that allows
# students to view open-ended problems that require grading
name=_("Peer grading"),
tab_id=self.type,
link_func=link_reverse_func(self.type),
)
class OpenEndedGradingTab(AuthenticatedCourseTab, GradingTab):
"""
A tab for open ended grading.
"""
type = 'open_ended'
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(OpenEndedGradingTab, self).__init__(
# Translators: "Open Ended Panel" appears on a tab that, when clicked, opens up a panel that
# displays information about open-ended problems that a user has submitted or needs to grade
name=_("Open Ended Panel"),
tab_id=self.type,
link_func=link_reverse_func('open_ended_notifications'),
)
class SyllabusTab(CourseTab):
"""
A tab for the course syllabus.
"""
type = 'syllabus'
def can_display(self, course, settings, is_user_authenticated, is_user_staff):
return hasattr(course, 'syllabus_present') and course.syllabus_present
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(SyllabusTab, self).__init__(
# Translators: "Syllabus" appears on a tab that, when clicked, opens the syllabus of the course.
name=_('Syllabus'),
tab_id=self.type,
link_func=link_reverse_func(self.type),
)
class NotesTab(AuthenticatedCourseTab):
"""
A tab for the course notes.
"""
type = 'notes'
def can_display(self, course, settings, is_user_authenticated, is_user_staff):
return settings.FEATURES.get('ENABLE_STUDENT_NOTES')
def __init__(self, tab_dict=None):
super(NotesTab, self).__init__(
name=tab_dict['name'],
tab_id=self.type,
link_func=link_reverse_func(self.type),
)
@classmethod
def validate(cls, tab_dict, raise_error=True):
return super(NotesTab, cls).validate(tab_dict, raise_error) and need_name(tab_dict, raise_error)
class InstructorTab(StaffTab):
"""
A tab for the course instructors.
"""
type = 'instructor'
def __init__(self, tab_dict=None): # pylint: disable=unused-argument
super(InstructorTab, self).__init__(
# Translators: 'Instructor' appears on the tab that leads to the instructor dashboard, which is
# a portal where an instructor can get data and perform various actions on their course
name=_('Instructor'),
tab_id=self.type,
link_func=link_reverse_func('instructor_dashboard'),
)
class CourseTabList(List):
"""
An XBlock field class that encapsulates a collection of Tabs in a course.
It is automatically created and can be retrieved through a CourseDescriptor object: course.tabs
"""
@staticmethod
def initialize_default(course):
"""
An explicit initialize method is used to set the default values, rather than implementing an
__init__ method. This is because the default values are dependent on other information from
within the course.
"""
course.tabs.extend([
CoursewareTab(),
CourseInfoTab(),
])
# Presence of syllabus tab is indicated by a course attribute
if hasattr(course, 'syllabus_present') and course.syllabus_present:
course.tabs.append(SyllabusTab())
# If the course has a discussion link specified, use that even if we feature
# flag discussions off. Disabling that is mostly a server safety feature
# at this point, and we don't need to worry about external sites.
if course.discussion_link:
discussion_tab = ExternalDiscussionTab(link_value=course.discussion_link)
else:
discussion_tab = DiscussionTab()
course.tabs.extend([
TextbookTabs(),
discussion_tab,
WikiTab(),
ProgressTab(),
])
@staticmethod
def get_discussion(course):
"""
Returns the discussion tab for the given course. It can be either of type DiscussionTab
or ExternalDiscussionTab. The returned tab object is self-aware of the 'link' that it corresponds to.
"""
# the discussion_link setting overrides everything else, even if there is a discussion tab in the course tabs
if course.discussion_link:
return ExternalDiscussionTab(link_value=course.discussion_link)
# find one of the discussion tab types in the course tabs
for tab in course.tabs:
if isinstance(tab, DiscussionTab) or isinstance(tab, ExternalDiscussionTab):
return tab
return None
@staticmethod
def get_tab_by_slug(tab_list, url_slug):
"""
Look for a tab with the specified 'url_slug'. Returns the tab or None if not found.
"""
return next((tab for tab in tab_list if tab.get('url_slug') == url_slug), None)
@staticmethod
def get_tab_by_type(tab_list, tab_type):
"""
Look for a tab with the specified type. Returns the first matching tab.
"""
return next((tab for tab in tab_list if tab.type == tab_type), None)
@staticmethod
def get_tab_by_id(tab_list, tab_id):
"""
Look for a tab with the specified tab_id. Returns the first matching tab.
"""
return next((tab for tab in tab_list if tab.tab_id == tab_id), None)
@staticmethod
def iterate_displayable(
course,
settings,
is_user_authenticated=True,
is_user_staff=True,
):
"""
Generator method for iterating through all tabs that can be displayed for the given course and
the given user with the provided access settings.
"""
for tab in course.tabs:
if tab.can_display(
course, settings, is_user_authenticated, is_user_staff
) and (not tab.is_hideable or not tab.is_hidden):
if tab.is_collection:
for item in tab.items(course):
yield item
else:
yield tab
instructor_tab = InstructorTab()
if instructor_tab.can_display(course, settings, is_user_authenticated, is_user_staff):
yield instructor_tab
@staticmethod
def iterate_displayable_cms(
course,
settings
):
"""
Generator method for iterating through all tabs that can be displayed for the given course
with the provided settings.
"""
for tab in course.tabs:
if tab.can_display(course, settings, is_user_authenticated=True, is_user_staff=True):
if tab.is_collection and not len(list(tab.items(course))):
# do not yield collections that have no items
continue
yield tab
@classmethod
def validate_tabs(cls, tabs):
"""
Check that the tabs set for the specified course is valid. If it
isn't, raise InvalidTabsException with the complaint.
Specific rules checked:
- if no tabs specified, that's fine
- if tabs specified, first two must have type 'courseware' and 'course_info', in that order.
"""
if tabs is None or len(tabs) == 0:
return
if len(tabs) < 2:
raise InvalidTabsException("Expected at least two tabs. tabs: '{0}'".format(tabs))
if tabs[0].get('type') != CoursewareTab.type:
raise InvalidTabsException(
"Expected first tab to have type 'courseware'. tabs: '{0}'".format(tabs))
if tabs[1].get('type') != CourseInfoTab.type:
raise InvalidTabsException(
"Expected second tab to have type 'course_info'. tabs: '{0}'".format(tabs))
# the following tabs should appear only once
for tab_type in [
CoursewareTab.type,
CourseInfoTab.type,
NotesTab.type,
TextbookTabs.type,
PDFTextbookTabs.type,
HtmlTextbookTabs.type,
]:
cls._validate_num_tabs_of_type(tabs, tab_type, 1)
@staticmethod
def _validate_num_tabs_of_type(tabs, tab_type, max_num):
"""
Check that the number of times that the given 'tab_type' appears in 'tabs' is less than or equal to 'max_num'.
"""
count = sum(1 for tab in tabs if tab.get('type') == tab_type)
if count > max_num:
raise InvalidTabsException(
"Tab of type '{0}' appears {1} time(s). Expected maximum of {2} time(s).".format(
tab_type, count, max_num
))
def to_json(self, values):
"""
Overrides the to_json method to serialize all the CourseTab objects to a json-serializable representation.
"""
json_data = []
if values:
for val in values:
if isinstance(val, CourseTab):
json_data.append(val.to_json())
elif isinstance(val, dict):
json_data.append(val)
else:
continue
return json_data
def from_json(self, values):
"""
Overrides the from_json method to de-serialize the CourseTab objects from a json-like representation.
"""
self.validate_tabs(values)
return [CourseTab.from_json(tab_dict) for tab_dict in values]
#### Link Functions
def link_reverse_func(reverse_name):
"""
Returns a function that takes in a course and reverse_url_func,
and calls the reverse_url_func with the given reverse_name and course' ID.
"""
return lambda course, reverse_url_func: reverse_url_func(reverse_name, args=[course.id])
def link_value_func(value):
"""
Returns a function takes in a course and reverse_url_func, and returns the given value.
"""
return lambda course, reverse_url_func: value
#### Validators
# A validator takes a dict and raises InvalidTabsException if required fields are missing or otherwise wrong.
# (e.g. "is there a 'name' field?). Validators can assume that the type field is valid.
def key_checker(expected_keys):
"""
Returns a function that checks that specified keys are present in a dict.
"""
def check(actual_dict, raise_error=True):
"""
Function that checks whether all keys in the expected_keys object is in the given actual_dict object.
"""
missing = set(expected_keys) - set(actual_dict.keys())
if not missing:
return True
if raise_error:
raise InvalidTabsException(
"Expected keys '{0}' are not present in the given dict: {1}".format(expected_keys, actual_dict)
)
else:
return False
return check
def need_name(dictionary, raise_error=True):
"""
Returns whether the 'name' key exists in the given dictionary.
"""
return key_checker(['name'])(dictionary, raise_error)
class InvalidTabsException(Exception):
"""
A complaint about invalid tabs.
"""
pass
class UnequalTabsException(Exception):
"""
A complaint about tab lists being unequal
"""
pass
|
Jumpers/MysoftAutoTest | refs/heads/master | Step1-PythonBasic/Practices/yuxq/1-5/ex1.py | 1 | print "Hello World!"
print "Hello Again"
print "I Like typing this."
print "This is fun."
print "Yay! Printing."
print "I'd much rather you 'not'."
print 'I "said" do not touch this.' |
NeCTAR-RC/nova | refs/heads/nectar/mitaka | nova/api/openstack/compute/hosts.py | 10 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
from oslo_log import log as logging
import six
import webob.exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas import hosts
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova import exception
from nova.i18n import _LI
from nova import objects
LOG = logging.getLogger(__name__)
ALIAS = 'os-hosts'
authorize = extensions.os_compute_authorizer(ALIAS)
class HostController(wsgi.Controller):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = compute.HostAPI()
super(HostController, self).__init__()
@extensions.expected_errors(())
def index(self, req):
"""Returns a dict in the format
| {'hosts': [{'host_name': 'some.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'some.other.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'some.celly.host.name',
| 'service': 'cells',
| 'zone': 'internal'},
| {'host_name': 'console1.host.com',
| 'service': 'consoleauth',
| 'zone': 'internal'},
| {'host_name': 'network1.host.com',
| 'service': 'network',
| 'zone': 'internal'},
| {'host_name': 'netwwork2.host.com',
| 'service': 'network',
| 'zone': 'internal'},
| {'host_name': 'compute1.host.com',
| 'service': 'compute',
| 'zone': 'nova'},
| {'host_name': 'compute2.host.com',
| 'service': 'compute',
| 'zone': 'nova'},
| {'host_name': 'sched1.host.com',
| 'service': 'scheduler',
| 'zone': 'internal'},
| {'host_name': 'sched2.host.com',
| 'service': 'scheduler',
| 'zone': 'internal'},
| {'host_name': 'vol1.host.com',
| 'service': 'volume',
| 'zone': 'internal'}]}
"""
context = req.environ['nova.context']
authorize(context)
filters = {'disabled': False}
zone = req.GET.get('zone', None)
if zone:
filters['availability_zone'] = zone
services = self.api.service_get_all(context, filters=filters,
set_zones=True)
hosts = []
api_services = ('nova-osapi_compute', 'nova-ec2', 'nova-metadata')
for service in services:
if service.binary not in api_services:
hosts.append({'host_name': service['host'],
'service': service['topic'],
'zone': service['availability_zone']})
return {'hosts': hosts}
@extensions.expected_errors((400, 404, 501))
@validation.schema(hosts.update)
def update(self, req, id, body):
"""Return booleanized version of body dict.
:param Request req: The request object (containing 'nova-context'
env var).
:param str id: The host name.
:param dict body: example format {'host': {'status': 'enable',
'maintenance_mode': 'enable'}}
:return: Same dict as body but 'enable' strings for 'status' and
'maintenance_mode' are converted into True, else False.
:rtype: dict
"""
def read_enabled(orig_val):
# Convert enable/disable str to a bool.
val = orig_val.strip().lower()
return val == "enable"
context = req.environ['nova.context']
authorize(context)
# See what the user wants to 'update'
status = body.get('status')
maint_mode = body.get('maintenance_mode')
if status is not None:
status = read_enabled(status)
if maint_mode is not None:
maint_mode = read_enabled(maint_mode)
# Make the calls and merge the results
result = {'host': id}
if status is not None:
result['status'] = self._set_enabled_status(context, id, status)
if maint_mode is not None:
result['maintenance_mode'] = self._set_host_maintenance(context,
id,
maint_mode)
return result
def _set_host_maintenance(self, context, host_name, mode=True):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
LOG.info(_LI("Putting host %(host_name)s in maintenance mode "
"%(mode)s."),
{'host_name': host_name, 'mode': mode})
try:
result = self.api.set_host_maintenance(context, host_name, mode)
except NotImplementedError:
common.raise_feature_not_supported()
except exception.HostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("on_maintenance", "off_maintenance"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _set_enabled_status(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances.
:param enabled: a boolean - if False no new VMs will be able to start
on the host.
"""
if enabled:
LOG.info(_LI("Enabling host %s."), host_name)
else:
LOG.info(_LI("Disabling host %s."), host_name)
try:
result = self.api.set_host_enabled(context, host_name=host_name,
enabled=enabled)
except NotImplementedError:
common.raise_feature_not_supported()
except exception.HostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
if result not in ("enabled", "disabled"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _host_power_action(self, req, host_name, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
authorize(context)
try:
result = self.api.host_power_action(context, host_name=host_name,
action=action)
except NotImplementedError:
common.raise_feature_not_supported()
except exception.HostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
except exception.ComputeServiceUnavailable as e:
raise webob.exc.HTTPBadRequest(explanation=e.format_message())
return {"host": host_name, "power_action": result}
@extensions.expected_errors((400, 404, 501))
def startup(self, req, id):
return self._host_power_action(req, host_name=id, action="startup")
@extensions.expected_errors((400, 404, 501))
def shutdown(self, req, id):
return self._host_power_action(req, host_name=id, action="shutdown")
@extensions.expected_errors((400, 404, 501))
def reboot(self, req, id):
return self._host_power_action(req, host_name=id, action="reboot")
@staticmethod
def _get_total_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(total)',
'cpu': compute_node['vcpus'],
'memory_mb': compute_node['memory_mb'],
'disk_gb': compute_node['local_gb']}}
@staticmethod
def _get_used_now_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(used_now)',
'cpu': compute_node['vcpus_used'],
'memory_mb': compute_node['memory_mb_used'],
'disk_gb': compute_node['local_gb_used']}}
@staticmethod
def _get_resource_totals_from_instances(host_name, instances):
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
for instance in instances:
cpu_sum += instance['vcpus']
mem_sum += instance['memory_mb']
hdd_sum += instance['root_gb'] + instance['ephemeral_gb']
return {'resource': {'host': host_name,
'project': '(used_max)',
'cpu': cpu_sum,
'memory_mb': mem_sum,
'disk_gb': hdd_sum}}
@staticmethod
def _get_resources_by_project(host_name, instances):
# Getting usage resource per project
project_map = {}
for instance in instances:
resource = project_map.setdefault(instance['project_id'],
{'host': host_name,
'project': instance['project_id'],
'cpu': 0,
'memory_mb': 0,
'disk_gb': 0})
resource['cpu'] += instance['vcpus']
resource['memory_mb'] += instance['memory_mb']
resource['disk_gb'] += (instance['root_gb'] +
instance['ephemeral_gb'])
return project_map
@extensions.expected_errors(404)
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
:param id: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
context = req.environ['nova.context']
authorize(context)
host_name = id
try:
compute_node = (
objects.ComputeNode.get_first_node_by_host_for_old_compat(
context, host_name))
except exception.ComputeHostNotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.format_message())
instances = self.api.instance_get_all_by_host(context, host_name)
resources = [self._get_total_resources(host_name, compute_node)]
resources.append(self._get_used_now_resources(host_name,
compute_node))
resources.append(self._get_resource_totals_from_instances(host_name,
instances))
by_proj_resources = self._get_resources_by_project(host_name,
instances)
for resource in six.itervalues(by_proj_resources):
resources.append({'resource': resource})
return {'host': resources}
class Hosts(extensions.V21APIExtensionBase):
"""Admin-only host administration."""
name = "Hosts"
alias = ALIAS
version = 1
def get_resources(self):
resources = [extensions.ResourceExtension(ALIAS,
HostController(),
member_actions={"startup": "GET", "shutdown": "GET",
"reboot": "GET"})]
return resources
def get_controller_extensions(self):
return []
|
tkf/emacs-ipython-notebook | refs/heads/master | tools/testein.py | 1 | #!/usr/bin/env python
"""
Run EIN test suite
"""
import sys
import os
import glob
from subprocess import Popen, PIPE, STDOUT
import itertools
EIN_ROOT = os.path.normpath(
os.path.join(os.path.dirname(__file__), os.path.pardir))
def has_library(emacs, library):
"""
Return True when `emacs` has build-in `library`.
"""
with open(os.devnull, 'w') as devnull:
proc = Popen(
[emacs, '-Q', '-batch', '-l', 'cl',
'--eval', '(assert (locate-library "{0}"))'.format(library)],
stdout=devnull, stderr=devnull)
return proc.wait() == 0
def eindir(*path):
return os.path.join(EIN_ROOT, *path)
def einlispdir(*path):
return eindir('lisp', *path)
def eintestdir(*path):
return eindir('tests', *path)
def einlibdir(*path):
return eindir('lib', *path)
def show_nonprinting(string, stream=sys.stdout):
"""Emulate ``cat -v`` (``--show-nonprinting``)."""
stream.writelines(itertools.imap(chr, convert_nonprinting(string)))
def convert_nonprinting(string):
"""
Convert non-printing characters in `string`.
Output is iterable of int. So for Python 2, you need to
convert it into string using `chr`.
Adapted from: http://stackoverflow.com/a/437542/727827
"""
for b in itertools.imap(ord, string):
assert 0 <= b < 0x100
if b in (0x09, 0x0a): # '\t\n'
yield b
continue
if b > 0x7f: # not ascii
yield 0x4d # 'M'
yield 0x2d # '-'
b &= 0x7f
if b < 0x20: # control char
yield 0x5e # '^'
b |= 0x40
elif b == 0x7f:
yield 0x5e # '^'
yield 0x3f # '?'
continue
yield b
class BaseRunner(object):
def __init__(self, **kwds):
self.__dict__.update(kwds)
self.batch = self.batch and not self.debug_on_error
def logpath(self, name, ext='log'):
return os.path.join(
self.log_dir,
"{testname}_{logname}_{modename}_{emacsname}.{ext}".format(
ext=ext,
logname=name,
emacsname=os.path.basename(self.emacs),
testname=os.path.splitext(self.testfile)[0],
modename='batch' if self.batch else 'interactive',
))
@property
def command(self):
raise NotImplementedError
def do_run(self):
raise NotImplementedError
def run(self):
if self.dry_run:
command = self.command
if isinstance(command, basestring):
print command
else:
print construct_command(command)
return 0
else:
mkdirp(self.log_dir)
return self.do_run()
class TestRunner(BaseRunner):
def __init__(self, **kwds):
super(TestRunner, self).__init__(**kwds)
fmtdata = self.__dict__.copy()
fmtdata.update(
emacsname=os.path.basename(self.emacs),
testname=os.path.splitext(self.testfile)[0],
modename='batch' if self.batch else 'interactive',
)
quote = '"{0}"'.format
self.logpath_log = self.logpath('log')
self.logpath_messages = self.logpath('messages')
self.lispvars = {
'ein:testing-dump-file-log': quote(self.logpath_log),
'ein:testing-dump-file-messages': quote(self.logpath_messages),
'ein:log-level': self.ein_log_level,
'ein:log-message-level': self.ein_message_level,
}
if self.ein_debug:
self.lispvars['ein:debug'] = "'t"
def setq(self, sym, val):
self.lispvars[sym] = val
def bind_lispvars(self):
command = []
for (k, v) in self.lispvars.iteritems():
if v is not None:
command.extend([
'--eval', '(setq {0} {1})'.format(k, v)])
return command
@property
def base_command(self):
command = [self.emacs, '-Q'] + self.bind_lispvars()
if self.batch:
command.append('-batch')
if self.debug_on_error:
command.extend(['-f', 'toggle-debug-on-error'])
# load modules
if self.need_ert():
ertdir = einlibdir('ert', 'lisp', 'emacs-lisp')
command.extend([
'-L', ertdir,
# Load `ert-run-tests-batch-and-exit`:
'-l', os.path.join(ertdir, 'ert-batch.el'),
# Load `ert-run-tests-interactively`:
'-l', os.path.join(ertdir, 'ert-ui.el'),
])
for path in self.load_path:
command.extend(['-L', path])
for path in self.load:
command.extend(['-l', path])
command.extend(['-L', einlispdir(),
'-L', einlibdir('websocket'),
'-L', einlibdir('request'),
'-L', einlibdir('auto-complete'),
'-L', einlibdir('popup'),
'-L', eintestdir(),
'-l', eintestdir(self.testfile)])
return command
@property
def command(self):
command = self.base_command[:]
if self.batch:
command.extend(['-f', 'ert-run-tests-batch-and-exit'])
else:
command.extend(['--eval', "(ert 't)"])
return command
def show_sys_info(self):
print "*" * 50
command = self.base_command + [
'-batch', '-l', 'ein-dev', '-f', 'ein:dev-print-sys-info']
proc = Popen(command, stderr=PIPE)
err = proc.stderr.read()
proc.wait()
if proc.returncode != 0:
print "Error with return code {0} while running {1}".format(
proc.returncode, command)
print err
pass
print "*" * 50
def need_ert(self):
if self.load_ert:
return True
if self.auto_ert:
if has_library(self.emacs, 'ert'):
print "{0} has ERT module.".format(self.emacs)
return False
else:
print "{0} has no ERT module.".format(self.emacs),
print "ERT is going to be loaded from git submodule."
return True
return False
def make_process(self):
print "Start test {0}".format(self.testfile)
self.proc = Popen(self.command, stdout=PIPE, stderr=STDOUT)
return self.proc
def report(self):
(stdout, _) = self.proc.communicate()
self.stdout = stdout
self.failed = self.proc.returncode != 0
if self.failed:
print "*" * 50
print "Showing {0}:".format(self.logpath_log)
print open(self.logpath_log).read()
print
print "*" * 50
print "Showing STDOUT/STDERR:"
show_nonprinting(stdout)
print
print "{0} failed".format(self.testfile)
else:
print "{0} OK".format(self.testfile)
for line in reversed(stdout.splitlines()):
if line.startswith('Ran'):
print line
break
return int(self.failed)
def do_run(self):
self.show_sys_info()
self.make_process()
return self.report()
def is_known_failure(self):
"""
Check if failures are known, based on STDOUT from ERT.
"""
import re
lines = iter(self.stdout.splitlines())
for l in lines:
if re.match("[0-9]+ unexpected results:.*", l):
break
else:
return True # no failure
# Check "FAILED <test-name>" lines
for l in lines:
if not l:
break # end with an empty line
for f in self.known_failures:
if re.search(f, l):
break
else:
return False
return True
known_failures = [
"ein:notebook-execute-current-cell-pyout-image$",
]
"""
A list of regexp which matches to test that is known to fail (sometimes).
This is a workaround for ##74.
"""
def mkdirp(path):
"""Do ``mkdir -p {path}``"""
if not os.path.isdir(path):
os.makedirs(path)
def remove_elc():
files = glob.glob(einlispdir("*.elc")) + glob.glob(eintestdir("*.elc"))
map(os.remove, files)
print "Removed {0} elc files".format(len(files))
class ServerRunner(BaseRunner):
port = None
notebook_dir = os.path.join(EIN_ROOT, "tests", "notebook")
def __enter__(self):
self.run()
return self.port
def __exit__(self, type, value, traceback):
self.stop()
def do_run(self):
self.clear_notebook_dir()
self.start()
self.get_port()
print "Server running at", self.port
def clear_notebook_dir(self):
files = glob.glob(os.path.join(self.notebook_dir, '*.ipynb'))
map(os.remove, files)
print "Removed {0} ipynb files".format(len(files))
@staticmethod
def _parse_port_line(line):
return line.strip().rsplit(':', 1)[-1].strip('/')
def get_port(self):
if self.port is None:
self.port = self._parse_port_line(self.proc.stdout.readline())
return self.port
def start(self):
from subprocess import Popen, PIPE, STDOUT
self.proc = Popen(
self.command, stdout=PIPE, stderr=STDOUT, stdin=PIPE,
shell=True)
# Answer "y" to the prompt: Shutdown Notebook Server (y/[n])?
self.proc.stdin.write('y\n')
def stop(self):
print "Stopping server", self.port
returncode = self.proc.poll()
if returncode is not None:
logpath = self.logpath('server')
print "Server process was already dead by exit code", returncode
print "*" * 50
print "Showing {0}:".format(logpath)
print open(logpath).read()
print
return
if not self.dry_run:
try:
kill_subprocesses(self.proc.pid, lambda x: 'ipython' in x)
finally:
self.proc.terminate()
@property
def command(self):
fmtdata = dict(
notebook_dir=self.notebook_dir,
ipython=self.ipython,
server_log=self.logpath('server'),
)
return self.command_template.format(**fmtdata)
command_template = r"""
{ipython} notebook \
--notebook-dir {notebook_dir} \
--debug \
--no-browser 2>&1 \
| tee {server_log} \
| grep --line-buffered 'The IPython Notebook is running at' \
| head -n1
"""
def kill_subprocesses(pid, include=lambda x: True):
from subprocess import Popen, PIPE
import signal
command = ['ps', '-e', '-o', 'ppid,pid,command']
proc = Popen(command, stdout=PIPE, stderr=PIPE)
(stdout, stderr) = proc.communicate()
if proc.returncode != 0:
raise RuntimeError(
'Command {0} failed with code {1} and following error message:\n'
'{2}'.format(command, proc.returncode, stderr))
for line in map(str.strip, stdout.splitlines()):
(cmd_ppid, cmd_pid, cmd) = line.split(None, 2)
if cmd_ppid == str(pid) and include(cmd):
print "Killing PID={0} COMMAND={1}".format(cmd_pid, cmd)
os.kill(int(cmd_pid), signal.SIGINT)
def construct_command(args):
"""
Construct command as a string given a list of arguments.
"""
command = []
escapes = set(' ()')
for a in args:
if set(a) & escapes:
command.append(repr(str(a))) # hackish way to escape
else:
command.append(a)
return " ".join(command)
def run_ein_test(unit_test, func_test, func_test_max_retries,
no_skip, clean_elc, **kwds):
if clean_elc and not kwds['dry_run']:
remove_elc()
if unit_test:
unit_test_runner = TestRunner(testfile='test-load.el', **kwds)
if unit_test_runner.run() != 0:
return 1
if func_test:
for i in range(func_test_max_retries + 1):
func_test_runner = TestRunner(testfile='func-test.el', **kwds)
with ServerRunner(testfile='func-test.el', **kwds) as port:
func_test_runner.setq('ein:testing-port', port)
if func_test_runner.run() == 0:
print "Functional test succeeded after {0} retries." \
.format(i)
return 0
if not no_skip and func_test_runner.is_known_failure():
print "All failures are known. Ending functional test."
return 0
print "Functional test failed after {0} retries.".format(i)
return 1
return 0
def main():
import sys
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__.splitlines()[1])
parser.add_argument('--emacs', '-e', default='emacs',
help='Emacs executable.')
parser.add_argument('--load-path', '-L', default=[], action='append',
help="add a directory to load-path. "
"can be specified multiple times.")
parser.add_argument('--load', '-l', default=[], action='append',
help="load lisp file before tests. "
"can be specified multiple times.")
parser.add_argument('--load-ert', default=False, action='store_true',
help="load ERT from git submodule. "
"you need to update git submodule manually "
"if ert/ directory does not exist yet.")
parser.add_argument('--no-auto-ert', default=True,
dest='auto_ert', action='store_false',
help="load ERT from git submodule. "
"if this Emacs has no build-in ERT module.")
parser.add_argument('--no-batch', '-B', default=True,
dest='batch', action='store_false',
help="start interactive session.")
parser.add_argument('--debug-on-error', '-d', default=False,
action='store_true',
help="set debug-on-error to t and start "
"interactive session.")
parser.add_argument('--func-test-max-retries', default=4, type=int,
help="""
Specify number of retries for functional test
before failing with error. This is workaround
for the issue #74.
""")
parser.add_argument('--no-skip', default=False, action='store_true',
help="""
Do no skip known failures. Known failures
are implemented as another workaround for the
issue #74.
""")
parser.add_argument('--no-func-test', '-F', default=True,
dest='func_test', action='store_false',
help="do not run functional test.")
parser.add_argument('--no-unit-test', '-U', default=True,
dest='unit_test', action='store_false',
help="do not run unit test.")
parser.add_argument('--clean-elc', '-c', default=False,
action='store_true',
help="remove *.elc files in ein/lisp and "
"ein/tests directories.")
parser.add_argument('--dry-run', default=False,
action='store_true',
help="Print commands to be executed.")
parser.add_argument('--ipython', default='ipython',
help="""
ipython executable to use to run notebook server.
""")
parser.add_argument('--ein-log-level', default=40)
parser.add_argument('--ein-message-level', default=30)
parser.add_argument('--ein-debug', default=False, action='store_true',
help="(setq ein:debug t) when given.")
parser.add_argument('--log-dir', default="log",
help="Directory to store log (default: %(default)s)")
args = parser.parse_args()
sys.exit(run_ein_test(**vars(args)))
if __name__ == '__main__':
main()
|
peterm-itr/edx-platform | refs/heads/master | common/djangoapps/util/bad_request_rate_limiter.py | 200 | """
A utility class which wraps the RateLimitMixin 3rd party class to do bad request counting
which can be used for rate limiting
"""
from ratelimitbackend.backends import RateLimitMixin
class BadRequestRateLimiter(RateLimitMixin):
"""
Use the 3rd party RateLimitMixin to help do rate limiting on the Password Reset flows
"""
def is_rate_limit_exceeded(self, request):
"""
Returns if the client has been rated limited
"""
counts = self.get_counters(request)
return sum(counts.values()) >= self.requests
def tick_bad_request_counter(self, request):
"""
Ticks any counters used to compute when rate limt has been reached
"""
self.cache_incr(self.get_cache_key(request))
|
timeyyy/apptools | refs/heads/master | peasoup/util.py | 1 | """
peasoup.util: misc utility functions for peasoup
"""
import sys
import os
# Since Peaoup will have
# every invocation, we want as little overhead as possible when importing
# the main module. We therefore use a simple lazy-loading scheme for many
# of our imports, built from the functions below.
def lazy_import(func):
"""Decorator for declaring a lazy import.
This decorator turns a function into an object that will act as a lazy
importer. Whenever the object's attributes are accessed, the function
is called and its return value used in place of the object. So you
can declare lazy imports like this:
@lazy_import
def socket():
import socket
return socket
The name "socket" will then be bound to a transparent object proxy which
will import the socket module upon first use.
The syntax here is slightly more verbose than other lazy import recipes,
but it's designed not to hide the actual "import" statements from tools
like py2exe or grep.
"""
try:
f = sys._getframe(1)
except Exception:
namespace = None
else:
namespace = f.f_locals
return _LazyImport(func.__name__,func,namespace)
class _LazyImport(object):
"""Class representing a lazy import."""
def __init__(self,name,loader,namespace=None):
self._esky_lazy_target = _LazyImport
self._esky_lazy_name = name
self._esky_lazy_loader = loader
self._esky_lazy_namespace = namespace
def _esky_lazy_load(self):
if self._esky_lazy_target is _LazyImport:
self._esky_lazy_target = self._esky_lazy_loader()
ns = self._esky_lazy_namespace
if ns is not None:
try:
if ns[self._esky_lazy_name] is self:
ns[self._esky_lazy_name] = self._esky_lazy_target
except KeyError:
pass
def __getattribute__(self,attr):
try:
return object.__getattribute__(self,attr)
except AttributeError:
if self._esky_lazy_target is _LazyImport:
self._esky_lazy_load()
return getattr(self._esky_lazy_target,attr)
def __bool__(self):
if self._esky_lazy_target is _LazyImport:
self._esky_lazy_load()
return bool(self._esky_lazy_target)
def sftp_upload_window_size_set(srv,file, method_to_call='put'):
'''
sets config for uploading files with pysftp
'''
channel = srv.sftp_client.get_channel()
channel.lock.acquire()
channel.out_window_size += os.stat(file).st_size * 1.1 # bit more bytes incase packet loss
channel.out_buffer_cv.notifyAll()
channel.lock.release()
|
METASPACE2020/sm-engine | refs/heads/master | sm/engine/mol_db.py | 2 | from collections import OrderedDict
import pandas as pd
import logging
import requests
from sm.engine.db import DB
from sm.engine.util import SMConfig
logger = logging.getLogger('engine')
SF_INS = 'INSERT INTO sum_formula (db_id, sf) values (%s, %s)'
SF_COUNT = 'SELECT count(*) FROM sum_formula WHERE db_id = %s'
SF_SELECT = 'SELECT sf FROM sum_formula WHERE db_id = %s'
class MolDBServiceWrapper(object):
def __init__(self, service_url):
self._service_url = service_url
self._session = requests.Session()
def _fetch(self, url):
r = self._session.get(url)
r.raise_for_status()
return r.json()['data']
def fetch_all_dbs(self):
url = '{}/databases'.format(self._service_url)
return self._fetch(url)
def find_db_by_id(self, id):
url = '{}/databases/{}'.format(self._service_url, id)
return self._fetch(url)
def find_db_by_name_version(self, name, version=None):
url = '{}/databases?name={}'.format(self._service_url, name)
if version:
url += '&version={}'.format(version)
return self._fetch(url)
def fetch_db_sfs(self, db_id):
return self._fetch('{}/databases/{}/sfs'.format(self._service_url, db_id))
def fetch_molecules(self, db_id, sf=None):
if sf:
url = '{}/databases/{}/molecules?sf={}&fields=mol_id,mol_name'
return self._fetch(url.format(self._service_url, db_id, sf))
else:
# TODO: replace one large request with several smaller ones
url = '{}/databases/{}/molecules?fields=sf,mol_id,mol_name&limit=10000000'
return self._fetch(url.format(self._service_url, db_id))
class MolecularDB(object):
""" A class representing a molecule database to search through.
Provides several data structures used in the engine to speed up computation
Args
----------
name: str
version: str
If None the latest version will be used
iso_gen_config : dict
Isotope generator configuration
mol_db_service : sm.engine.MolDBServiceWrapper
Molecular database ID/name resolver
db : DB
Database connector
"""
def __init__(self, id=None, name=None, version=None, iso_gen_config=None,
mol_db_service=None, db=None):
self._iso_gen_config = iso_gen_config
sm_config = SMConfig.get_conf()
self._mol_db_service = mol_db_service or MolDBServiceWrapper(sm_config['services']['mol_db'])
self._db = db
if id is not None:
data = self._mol_db_service.find_db_by_id(id)
elif name is not None:
data = self._mol_db_service.find_db_by_name_version(name, version)[0]
else:
raise Exception('MolDB id or name should be provided')
self._id, self._name, self._version = data['id'], data['name'], data['version']
self._sf_df = None
self._job_id = None
self._sfs = None
self._ion_centroids = None
def __str__(self):
return '{} {}'.format(self.name, self.version)
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def version(self):
return self._version
@property
def ion_centroids(self):
return self._ion_centroids
def set_ion_centroids(self, ion_centroids):
self._ion_centroids = ion_centroids
def set_job_id(self, job_id):
self._job_id = job_id
def get_molecules(self, sf=None):
""" Returns a dataframe with (mol_id, mol_name) or (sf, mol_id, mol_name) rows
Args
----------
sf: str
Returns
----------
pd.DataFrame
"""
return pd.DataFrame(self._mol_db_service.fetch_molecules(self.id, sf=sf))
@property
def sfs(self):
""" Total list of formulas """
if not self._sfs:
if self._db.select_one(SF_COUNT, params=(self._id,))[0] == 0:
sfs = self._mol_db_service.fetch_db_sfs(self.id)
rows = [(self._id, sf) for sf in sfs]
self._db.insert(SF_INS, rows)
self._sfs = [row[0] for row in self._db.select(SF_SELECT, params=(self._id,))]
return self._sfs
|
megraf/asuswrt-merlin | refs/heads/master | release/src/router/nfs-utils/tools/nfs-iostat/nfs-iostat.py | 16 | #!/usr/bin/env python
# -*- python-mode -*-
"""Emulate iostat for NFS mount points using /proc/self/mountstats
"""
__copyright__ = """
Copyright (C) 2005, Chuck Lever <cel@netapp.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
import sys, os, time
Iostats_version = '0.2'
def difference(x, y):
"""Used for a map() function
"""
return x - y
NfsEventCounters = [
'inoderevalidates',
'dentryrevalidates',
'datainvalidates',
'attrinvalidates',
'vfsopen',
'vfslookup',
'vfspermission',
'vfsupdatepage',
'vfsreadpage',
'vfsreadpages',
'vfswritepage',
'vfswritepages',
'vfsreaddir',
'vfssetattr',
'vfsflush',
'vfsfsync',
'vfslock',
'vfsrelease',
'congestionwait',
'setattrtrunc',
'extendwrite',
'sillyrenames',
'shortreads',
'shortwrites',
'delay'
]
NfsByteCounters = [
'normalreadbytes',
'normalwritebytes',
'directreadbytes',
'directwritebytes',
'serverreadbytes',
'serverwritebytes',
'readpages',
'writepages'
]
class DeviceData:
"""DeviceData objects provide methods for parsing and displaying
data for a single mount grabbed from /proc/self/mountstats
"""
def __init__(self):
self.__nfs_data = dict()
self.__rpc_data = dict()
self.__rpc_data['ops'] = []
def __parse_nfs_line(self, words):
if words[0] == 'device':
self.__nfs_data['export'] = words[1]
self.__nfs_data['mountpoint'] = words[4]
self.__nfs_data['fstype'] = words[7]
if words[7] == 'nfs':
self.__nfs_data['statvers'] = words[8]
elif words[0] == 'age:':
self.__nfs_data['age'] = long(words[1])
elif words[0] == 'opts:':
self.__nfs_data['mountoptions'] = ''.join(words[1:]).split(',')
elif words[0] == 'caps:':
self.__nfs_data['servercapabilities'] = ''.join(words[1:]).split(',')
elif words[0] == 'nfsv4:':
self.__nfs_data['nfsv4flags'] = ''.join(words[1:]).split(',')
elif words[0] == 'sec:':
keys = ''.join(words[1:]).split(',')
self.__nfs_data['flavor'] = int(keys[0].split('=')[1])
self.__nfs_data['pseudoflavor'] = 0
if self.__nfs_data['flavor'] == 6:
self.__nfs_data['pseudoflavor'] = int(keys[1].split('=')[1])
elif words[0] == 'events:':
i = 1
for key in NfsEventCounters:
self.__nfs_data[key] = int(words[i])
i += 1
elif words[0] == 'bytes:':
i = 1
for key in NfsByteCounters:
self.__nfs_data[key] = long(words[i])
i += 1
def __parse_rpc_line(self, words):
if words[0] == 'RPC':
self.__rpc_data['statsvers'] = float(words[3])
self.__rpc_data['programversion'] = words[5]
elif words[0] == 'xprt:':
self.__rpc_data['protocol'] = words[1]
if words[1] == 'udp':
self.__rpc_data['port'] = int(words[2])
self.__rpc_data['bind_count'] = int(words[3])
self.__rpc_data['rpcsends'] = int(words[4])
self.__rpc_data['rpcreceives'] = int(words[5])
self.__rpc_data['badxids'] = int(words[6])
self.__rpc_data['inflightsends'] = long(words[7])
self.__rpc_data['backlogutil'] = long(words[8])
elif words[1] == 'tcp':
self.__rpc_data['port'] = words[2]
self.__rpc_data['bind_count'] = int(words[3])
self.__rpc_data['connect_count'] = int(words[4])
self.__rpc_data['connect_time'] = int(words[5])
self.__rpc_data['idle_time'] = int(words[6])
self.__rpc_data['rpcsends'] = int(words[7])
self.__rpc_data['rpcreceives'] = int(words[8])
self.__rpc_data['badxids'] = int(words[9])
self.__rpc_data['inflightsends'] = long(words[10])
self.__rpc_data['backlogutil'] = long(words[11])
elif words[1] == 'rdma':
self.__rpc_data['port'] = words[2]
self.__rpc_data['bind_count'] = int(words[3])
self.__rpc_data['connect_count'] = int(words[4])
self.__rpc_data['connect_time'] = int(words[5])
self.__rpc_data['idle_time'] = int(words[6])
self.__rpc_data['rpcsends'] = int(words[7])
self.__rpc_data['rpcreceives'] = int(words[8])
self.__rpc_data['badxids'] = int(words[9])
self.__rpc_data['backlogutil'] = int(words[10])
self.__rpc_data['read_chunks'] = int(words[11])
self.__rpc_data['write_chunks'] = int(words[12])
self.__rpc_data['reply_chunks'] = int(words[13])
self.__rpc_data['total_rdma_req'] = int(words[14])
self.__rpc_data['total_rdma_rep'] = int(words[15])
self.__rpc_data['pullup'] = int(words[16])
self.__rpc_data['fixup'] = int(words[17])
self.__rpc_data['hardway'] = int(words[18])
self.__rpc_data['failed_marshal'] = int(words[19])
self.__rpc_data['bad_reply'] = int(words[20])
elif words[0] == 'per-op':
self.__rpc_data['per-op'] = words
else:
op = words[0][:-1]
self.__rpc_data['ops'] += [op]
self.__rpc_data[op] = [long(word) for word in words[1:]]
def parse_stats(self, lines):
"""Turn a list of lines from a mount stat file into a
dictionary full of stats, keyed by name
"""
found = False
for line in lines:
words = line.split()
if len(words) == 0:
continue
if (not found and words[0] != 'RPC'):
self.__parse_nfs_line(words)
continue
found = True
self.__parse_rpc_line(words)
def is_nfs_mountpoint(self):
"""Return True if this is an NFS or NFSv4 mountpoint,
otherwise return False
"""
if self.__nfs_data['fstype'] == 'nfs':
return True
elif self.__nfs_data['fstype'] == 'nfs4':
return True
return False
def compare_iostats(self, old_stats):
"""Return the difference between two sets of stats
"""
result = DeviceData()
# copy self into result
for key, value in self.__nfs_data.iteritems():
result.__nfs_data[key] = value
for key, value in self.__rpc_data.iteritems():
result.__rpc_data[key] = value
# compute the difference of each item in the list
# note the copy loop above does not copy the lists, just
# the reference to them. so we build new lists here
# for the result object.
for op in result.__rpc_data['ops']:
result.__rpc_data[op] = map(difference, self.__rpc_data[op], old_stats.__rpc_data[op])
# update the remaining keys we care about
result.__rpc_data['rpcsends'] -= old_stats.__rpc_data['rpcsends']
result.__rpc_data['backlogutil'] -= old_stats.__rpc_data['backlogutil']
for key in NfsEventCounters:
result.__nfs_data[key] -= old_stats.__nfs_data[key]
for key in NfsByteCounters:
result.__nfs_data[key] -= old_stats.__nfs_data[key]
return result
def __print_data_cache_stats(self):
"""Print the data cache hit rate
"""
nfs_stats = self.__nfs_data
app_bytes_read = float(nfs_stats['normalreadbytes'])
if app_bytes_read != 0:
client_bytes_read = float(nfs_stats['serverreadbytes'] - nfs_stats['directreadbytes'])
ratio = ((app_bytes_read - client_bytes_read) * 100) / app_bytes_read
print
print 'app bytes: %f client bytes %f' % (app_bytes_read, client_bytes_read)
print 'Data cache hit ratio: %4.2f%%' % ratio
def __print_attr_cache_stats(self, sample_time):
"""Print attribute cache efficiency stats
"""
nfs_stats = self.__nfs_data
getattr_stats = self.__rpc_data['GETATTR']
if nfs_stats['inoderevalidates'] != 0:
getattr_ops = float(getattr_stats[1])
opens = float(nfs_stats['vfsopen'])
revalidates = float(nfs_stats['inoderevalidates']) - opens
if revalidates != 0:
ratio = ((revalidates - getattr_ops) * 100) / revalidates
else:
ratio = 0.0
data_invalidates = float(nfs_stats['datainvalidates'])
attr_invalidates = float(nfs_stats['attrinvalidates'])
print
print '%d inode revalidations, hitting in cache %4.2f%% of the time' % \
(revalidates, ratio)
print '%d open operations (mandatory GETATTR requests)' % opens
if getattr_ops != 0:
print '%4.2f%% of GETATTRs resulted in data cache invalidations' % \
((data_invalidates * 100) / getattr_ops)
def __print_dir_cache_stats(self, sample_time):
"""Print directory stats
"""
nfs_stats = self.__nfs_data
lookup_ops = self.__rpc_data['LOOKUP'][0]
readdir_ops = self.__rpc_data['READDIR'][0]
if self.__rpc_data.has_key('READDIRPLUS'):
readdir_ops += self.__rpc_data['READDIRPLUS'][0]
dentry_revals = nfs_stats['dentryrevalidates']
opens = nfs_stats['vfsopen']
lookups = nfs_stats['vfslookup']
getdents = nfs_stats['vfsreaddir']
print
print '%d open operations (pathname lookups)' % opens
print '%d dentry revalidates and %d vfs lookup requests' % \
(dentry_revals, lookups),
print 'resulted in %d LOOKUPs on the wire' % lookup_ops
print '%d vfs getdents calls resulted in %d READDIRs on the wire' % \
(getdents, readdir_ops)
def __print_page_stats(self, sample_time):
"""Print page cache stats
"""
nfs_stats = self.__nfs_data
vfsreadpage = nfs_stats['vfsreadpage']
vfsreadpages = nfs_stats['vfsreadpages']
pages_read = nfs_stats['readpages']
vfswritepage = nfs_stats['vfswritepage']
vfswritepages = nfs_stats['vfswritepages']
pages_written = nfs_stats['writepages']
print
print '%d nfs_readpage() calls read %d pages' % \
(vfsreadpage, vfsreadpage)
print '%d nfs_readpages() calls read %d pages' % \
(vfsreadpages, pages_read - vfsreadpage),
if vfsreadpages != 0:
print '(%.1f pages per call)' % \
(float(pages_read - vfsreadpage) / vfsreadpages)
else:
print
print
print '%d nfs_updatepage() calls' % nfs_stats['vfsupdatepage']
print '%d nfs_writepage() calls wrote %d pages' % \
(vfswritepage, vfswritepage)
print '%d nfs_writepages() calls wrote %d pages' % \
(vfswritepages, pages_written - vfswritepage),
if (vfswritepages) != 0:
print '(%.1f pages per call)' % \
(float(pages_written - vfswritepage) / vfswritepages)
else:
print
congestionwaits = nfs_stats['congestionwait']
if congestionwaits != 0:
print
print '%d congestion waits' % congestionwaits
def __print_rpc_op_stats(self, op, sample_time):
"""Print generic stats for one RPC op
"""
if not self.__rpc_data.has_key(op):
return
rpc_stats = self.__rpc_data[op]
ops = float(rpc_stats[0])
retrans = float(rpc_stats[1] - rpc_stats[0])
kilobytes = float(rpc_stats[3] + rpc_stats[4]) / 1024
rtt = float(rpc_stats[6])
exe = float(rpc_stats[7])
# prevent floating point exceptions
if ops != 0:
kb_per_op = kilobytes / ops
retrans_percent = (retrans * 100) / ops
rtt_per_op = rtt / ops
exe_per_op = exe / ops
else:
kb_per_op = 0.0
retrans_percent = 0.0
rtt_per_op = 0.0
exe_per_op = 0.0
op += ':'
print '%s' % op.lower().ljust(15),
print ' ops/s\t\t kB/s\t\t kB/op\t\tretrans\t\tavg RTT (ms)\tavg exe (ms)'
print '\t\t%7.3f' % (ops / sample_time),
print '\t%7.3f' % (kilobytes / sample_time),
print '\t%7.3f' % kb_per_op,
print ' %7d (%3.1f%%)' % (retrans, retrans_percent),
print '\t%7.3f' % rtt_per_op,
print '\t%7.3f' % exe_per_op
def display_iostats(self, sample_time, which):
"""Display NFS and RPC stats in an iostat-like way
"""
sends = float(self.__rpc_data['rpcsends'])
if sample_time == 0:
sample_time = float(self.__nfs_data['age'])
if sends != 0:
backlog = (float(self.__rpc_data['backlogutil']) / sends) / sample_time
else:
backlog = 0.0
print
print '%s mounted on %s:' % \
(self.__nfs_data['export'], self.__nfs_data['mountpoint'])
print
print ' op/s\t\trpc bklog'
print '%7.2f' % (sends / sample_time),
print '\t%7.2f' % backlog
if which == 0:
self.__print_rpc_op_stats('READ', sample_time)
self.__print_rpc_op_stats('WRITE', sample_time)
elif which == 1:
self.__print_rpc_op_stats('GETATTR', sample_time)
self.__print_rpc_op_stats('ACCESS', sample_time)
self.__print_attr_cache_stats(sample_time)
elif which == 2:
self.__print_rpc_op_stats('LOOKUP', sample_time)
self.__print_rpc_op_stats('READDIR', sample_time)
if self.__rpc_data.has_key('READDIRPLUS'):
self.__print_rpc_op_stats('READDIRPLUS', sample_time)
self.__print_dir_cache_stats(sample_time)
elif which == 3:
self.__print_rpc_op_stats('READ', sample_time)
self.__print_rpc_op_stats('WRITE', sample_time)
self.__print_page_stats(sample_time)
#
# Functions
#
def print_iostat_help(name):
print 'usage: %s [ <interval> [ <count> ] ] [ <options> ] [ <mount point> ] ' % name
print
print ' Version %s' % Iostats_version
print
print ' Sample iostat-like program to display NFS client per-mount statistics.'
print
print ' The <interval> parameter specifies the amount of time in seconds between'
print ' each report. The first report contains statistics for the time since each'
print ' file system was mounted. Each subsequent report contains statistics'
print ' collected during the interval since the previous report.'
print
print ' If the <count> parameter is specified, the value of <count> determines the'
print ' number of reports generated at <interval> seconds apart. If the interval'
print ' parameter is specified without the <count> parameter, the command generates'
print ' reports continuously.'
print
print ' Options include "--attr", which displays statistics related to the attribute'
print ' cache, "--dir", which displays statistics related to directory operations,'
print ' and "--page", which displays statistics related to the page cache.'
print ' By default, if no option is specified, statistics related to file I/O are'
print ' displayed.'
print
print ' If one or more <mount point> names are specified, statistics for only these'
print ' mount points will be displayed. Otherwise, all NFS mount points on the'
print ' client are listed.'
def parse_stats_file(filename):
"""pop the contents of a mountstats file into a dictionary,
keyed by mount point. each value object is a list of the
lines in the mountstats file corresponding to the mount
point named in the key.
"""
ms_dict = dict()
key = ''
f = file(filename)
for line in f.readlines():
words = line.split()
if len(words) == 0:
continue
if words[0] == 'device':
key = words[4]
new = [ line.strip() ]
else:
new += [ line.strip() ]
ms_dict[key] = new
f.close
return ms_dict
def print_iostat_summary(old, new, devices, time, ac):
for device in devices:
stats = DeviceData()
stats.parse_stats(new[device])
if not old:
stats.display_iostats(time, ac)
else:
old_stats = DeviceData()
old_stats.parse_stats(old[device])
diff_stats = stats.compare_iostats(old_stats)
diff_stats.display_iostats(time, ac)
def iostat_command(name):
"""iostat-like command for NFS mount points
"""
mountstats = parse_stats_file('/proc/self/mountstats')
devices = []
which = 0
interval_seen = False
count_seen = False
for arg in sys.argv:
if arg in ['-h', '--help', 'help', 'usage']:
print_iostat_help(name)
return
if arg in ['-v', '--version', 'version']:
print '%s version %s' % (name, Iostats_version)
return
if arg in ['-a', '--attr']:
which = 1
continue
if arg in ['-d', '--dir']:
which = 2
continue
if arg in ['-p', '--page']:
which = 3
continue
if arg == sys.argv[0]:
continue
if arg in mountstats:
devices += [arg]
elif not interval_seen:
interval = int(arg)
if interval > 0:
interval_seen = True
else:
print 'Illegal <interval> value'
return
elif not count_seen:
count = int(arg)
if count > 0:
count_seen = True
else:
print 'Illegal <count> value'
return
# make certain devices contains only NFS mount points
if len(devices) > 0:
check = []
for device in devices:
stats = DeviceData()
stats.parse_stats(mountstats[device])
if stats.is_nfs_mountpoint():
check += [device]
devices = check
else:
for device, descr in mountstats.iteritems():
stats = DeviceData()
stats.parse_stats(descr)
if stats.is_nfs_mountpoint():
devices += [device]
if len(devices) == 0:
print 'No NFS mount points were found'
return
old_mountstats = None
sample_time = 0.0
if not interval_seen:
print_iostat_summary(old_mountstats, mountstats, devices, sample_time, which)
return
if count_seen:
while count != 0:
print_iostat_summary(old_mountstats, mountstats, devices, sample_time, which)
old_mountstats = mountstats
time.sleep(interval)
sample_time = interval
mountstats = parse_stats_file('/proc/self/mountstats')
count -= 1
else:
while True:
print_iostat_summary(old_mountstats, mountstats, devices, sample_time, which)
old_mountstats = mountstats
time.sleep(interval)
sample_time = interval
mountstats = parse_stats_file('/proc/self/mountstats')
#
# Main
#
prog = os.path.basename(sys.argv[0])
try:
iostat_command(prog)
except KeyboardInterrupt:
print 'Caught ^C... exiting'
sys.exit(1)
sys.exit(0)
|
jgeskens/django | refs/heads/master | tests/admin_scripts/custom_templates/project_template/additional_dir/extra.py | 701 | # this file uses the {{ extra }} variable
|
HyperloopTeam/FullOpenMDAO | refs/heads/master | lib/python2.7/site-packages/matplotlib/projections/geo.py | 11 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import math
import numpy as np
import numpy.ma as ma
import matplotlib
rcParams = matplotlib.rcParams
from matplotlib.axes import Axes
from matplotlib import cbook
from matplotlib.patches import Circle
from matplotlib.path import Path
import matplotlib.spines as mspines
import matplotlib.axis as maxis
from matplotlib.ticker import Formatter, Locator, NullLocator, FixedLocator, NullFormatter
from matplotlib.transforms import Affine2D, Affine2DBase, Bbox, \
BboxTransformTo, IdentityTransform, Transform, TransformWrapper
class GeoAxes(Axes):
"""
An abstract base class for geographic projections
"""
class ThetaFormatter(Formatter):
"""
Used to format the theta tick labels. Converts the native
unit of radians into degrees and adds a degree symbol.
"""
def __init__(self, round_to=1.0):
self._round_to = round_to
def __call__(self, x, pos=None):
degrees = (x / np.pi) * 180.0
degrees = round(degrees / self._round_to) * self._round_to
if rcParams['text.usetex'] and not rcParams['text.latex.unicode']:
return r"$%0.0f^\circ$" % degrees
else:
return "%0.0f\u00b0" % degrees
RESOLUTION = 75
def _init_axis(self):
self.xaxis = maxis.XAxis(self)
self.yaxis = maxis.YAxis(self)
# Do not register xaxis or yaxis with spines -- as done in
# Axes._init_axis() -- until GeoAxes.xaxis.cla() works.
# self.spines['geo'].register_axis(self.yaxis)
self._update_transScale()
def cla(self):
Axes.cla(self)
self.set_longitude_grid(30)
self.set_latitude_grid(15)
self.set_longitude_grid_ends(75)
self.xaxis.set_minor_locator(NullLocator())
self.yaxis.set_minor_locator(NullLocator())
self.xaxis.set_ticks_position('none')
self.yaxis.set_ticks_position('none')
self.yaxis.set_tick_params(label1On=True)
# Why do we need to turn on yaxis tick labels, but
# xaxis tick labels are already on?
self.grid(rcParams['axes.grid'])
Axes.set_xlim(self, -np.pi, np.pi)
Axes.set_ylim(self, -np.pi / 2.0, np.pi / 2.0)
def _set_lim_and_transforms(self):
# A (possibly non-linear) projection on the (already scaled) data
self.transProjection = self._get_core_transform(self.RESOLUTION)
self.transAffine = self._get_affine_transform()
self.transAxes = BboxTransformTo(self.bbox)
# The complete data transformation stack -- from data all the
# way to display coordinates
self.transData = \
self.transProjection + \
self.transAffine + \
self.transAxes
# This is the transform for longitude ticks.
self._xaxis_pretransform = \
Affine2D() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
self._xaxis_transform = \
self._xaxis_pretransform + \
self.transData
self._xaxis_text1_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, 4.0)
self._xaxis_text2_transform = \
Affine2D().scale(1.0, 0.0) + \
self.transData + \
Affine2D().translate(0.0, -4.0)
# This is the transform for latitude ticks.
yaxis_stretch = Affine2D().scale(np.pi * 2.0, 1.0).translate(-np.pi, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_stretch + \
self.transData
yaxis_text_base = \
yaxis_stretch + \
self.transProjection + \
(yaxis_space + \
self.transAffine + \
self.transAxes)
self._yaxis_text1_transform = \
yaxis_text_base + \
Affine2D().translate(-8.0, 0.0)
self._yaxis_text2_transform = \
yaxis_text_base + \
Affine2D().translate(8.0, 0.0)
def _get_affine_transform(self):
transform = self._get_core_transform(1)
xscale, _ = transform.transform_point((np.pi, 0))
_, yscale = transform.transform_point((0, np.pi / 2.0))
return Affine2D() \
.scale(0.5 / xscale, 0.5 / yscale) \
.translate(0.5, 0.5)
def get_xaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._xaxis_transform
def get_xaxis_text1_transform(self, pad):
return self._xaxis_text1_transform, 'bottom', 'center'
def get_xaxis_text2_transform(self, pad):
return self._xaxis_text2_transform, 'top', 'center'
def get_yaxis_transform(self,which='grid'):
assert which in ['tick1','tick2','grid']
return self._yaxis_transform
def get_yaxis_text1_transform(self, pad):
return self._yaxis_text1_transform, 'center', 'right'
def get_yaxis_text2_transform(self, pad):
return self._yaxis_text2_transform, 'center', 'left'
def _gen_axes_patch(self):
return Circle((0.5, 0.5), 0.5)
def _gen_axes_spines(self):
return {'geo':mspines.Spine.circular_spine(self,
(0.5, 0.5), 0.5)}
def set_yscale(self, *args, **kwargs):
if args[0] != 'linear':
raise NotImplementedError
set_xscale = set_yscale
def set_xlim(self, *args, **kwargs):
raise TypeError("It is not possible to change axes limits "
"for geographic projections. Please consider "
"using Basemap or Cartopy.")
set_ylim = set_xlim
def format_coord(self, lon, lat):
'return a format string formatting the coordinate'
lon = lon * (180.0 / np.pi)
lat = lat * (180.0 / np.pi)
if lat >= 0.0:
ns = 'N'
else:
ns = 'S'
if lon >= 0.0:
ew = 'E'
else:
ew = 'W'
return '%f\u00b0%s, %f\u00b0%s' % (abs(lat), ns, abs(lon), ew)
def set_longitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (360.0 / degrees) + 1
self.xaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi, np.pi, number, True)[1:-1]))
self._logitude_degrees = degrees
self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_latitude_grid(self, degrees):
"""
Set the number of degrees between each longitude grid.
"""
number = (180.0 / degrees) + 1
self.yaxis.set_major_locator(
FixedLocator(
np.linspace(-np.pi / 2.0, np.pi / 2.0, number, True)[1:-1]))
self._latitude_degrees = degrees
self.yaxis.set_major_formatter(self.ThetaFormatter(degrees))
def set_longitude_grid_ends(self, degrees):
"""
Set the latitude(s) at which to stop drawing the longitude grids.
"""
self._longitude_cap = degrees * (np.pi / 180.0)
self._xaxis_pretransform \
.clear() \
.scale(1.0, self._longitude_cap * 2.0) \
.translate(0.0, -self._longitude_cap)
def get_data_ratio(self):
'''
Return the aspect ratio of the data itself.
'''
return 1.0
### Interactive panning
def can_zoom(self):
"""
Return *True* if this axes supports the zoom box button functionality.
This axes object does not support interactive zoom box.
"""
return False
def can_pan(self) :
"""
Return *True* if this axes supports the pan/zoom button functionality.
This axes object does not support interactive pan/zoom.
"""
return False
def start_pan(self, x, y, button):
pass
def end_pan(self):
pass
def drag_pan(self, button, key, x, y):
pass
class AitoffAxes(GeoAxes):
name = 'aitoff'
class AitoffTransform(Transform):
"""
The base Aitoff transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Aitoff transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Aitoff space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
alpha = np.arccos(cos_latitude * np.cos(half_long))
# Mask this array or we'll get divide-by-zero errors
alpha = ma.masked_where(alpha == 0.0, alpha)
# The numerators also need to be masked so that masked
# division will be invoked.
# We want unnormalized sinc. numpy.sinc gives us normalized
sinc_alpha = ma.sin(alpha) / alpha
x = (cos_latitude * ma.sin(half_long)) / sinc_alpha
y = (ma.sin(latitude) / sinc_alpha)
return np.concatenate((x.filled(0), y.filled(0)), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return AitoffAxes.InvertedAitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedAitoffTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, xy):
# MGDTODO: Math is hard ;(
return xy
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return AitoffAxes.AitoffTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.AitoffTransform(resolution)
class HammerAxes(GeoAxes):
name = 'hammer'
class HammerTransform(Transform):
"""
The base Hammer transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Hammer transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Hammer space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
# Pre-compute some values
half_long = longitude / 2.0
cos_latitude = np.cos(latitude)
sqrt2 = np.sqrt(2.0)
alpha = np.sqrt(1.0 + cos_latitude * np.cos(half_long))
x = (2.0 * sqrt2) * (cos_latitude * np.sin(half_long)) / alpha
y = (sqrt2 * np.sin(latitude)) / alpha
return np.concatenate((x, y), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return HammerAxes.InvertedHammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedHammerTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
quarter_x = 0.25 * x
half_y = 0.5 * y
z = np.sqrt(1.0 - quarter_x*quarter_x - half_y*half_y)
longitude = 2 * np.arctan((z*x) / (2.0 * (2.0*z*z - 1.0)))
latitude = np.arcsin(y*z)
return np.concatenate((longitude, latitude), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return HammerAxes.HammerTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.HammerTransform(resolution)
class MollweideAxes(GeoAxes):
name = 'mollweide'
class MollweideTransform(Transform):
"""
The base Mollweide transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
"""
Create a new Mollweide transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Mollweide space.
"""
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, ll):
def d(theta):
delta = -(theta + np.sin(theta) - pi_sin_l) / (1 + np.cos(theta))
return delta, np.abs(delta) > 0.001
longitude = ll[:, 0]
latitude = ll[:, 1]
clat = np.pi/2 - np.abs(latitude)
ihigh = clat < 0.087 # within 5 degrees of the poles
ilow = ~ihigh
aux = np.empty(latitude.shape, dtype=np.float)
if ilow.any(): # Newton-Raphson iteration
pi_sin_l = np.pi * np.sin(latitude[ilow])
theta = 2.0 * latitude[ilow]
delta, large_delta = d(theta)
while np.any(large_delta):
theta[large_delta] += delta[large_delta]
delta, large_delta = d(theta)
aux[ilow] = theta / 2
if ihigh.any(): # Taylor series-based approx. solution
e = clat[ihigh]
d = 0.5 * (3 * np.pi * e**2) ** (1.0/3)
aux[ihigh] = (np.pi/2 - d) * np.sign(latitude[ihigh])
xy = np.empty(ll.shape, dtype=np.float)
xy[:,0] = (2.0 * np.sqrt(2.0) / np.pi) * longitude * np.cos(aux)
xy[:,1] = np.sqrt(2.0) * np.sin(aux)
return xy
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return MollweideAxes.InvertedMollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedMollweideTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, resolution):
Transform.__init__(self)
self._resolution = resolution
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
# from Equations (7, 8) of
# http://mathworld.wolfram.com/MollweideProjection.html
theta = np.arcsin(y / np.sqrt(2))
lon = (np.pi / (2 * np.sqrt(2))) * x / np.cos(theta)
lat = np.arcsin((2 * theta + np.sin(2 * theta)) / np.pi)
return np.concatenate((lon, lat), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return MollweideAxes.MollweideTransform(self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect(0.5, adjustable='box', anchor='C')
self.cla()
def _get_core_transform(self, resolution):
return self.MollweideTransform(resolution)
class LambertAxes(GeoAxes):
name = 'lambert'
class LambertTransform(Transform):
"""
The base Lambert transform.
"""
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
"""
Create a new Lambert transform. Resolution is the number of steps
to interpolate between each input line segment to approximate its
path in curved Lambert space.
"""
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform_non_affine(self, ll):
longitude = ll[:, 0:1]
latitude = ll[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
cos_lat = np.cos(latitude)
sin_lat = np.sin(latitude)
diff_long = longitude - clong
cos_diff_long = np.cos(diff_long)
inner_k = (1.0 +
np.sin(clat)*sin_lat +
np.cos(clat)*cos_lat*cos_diff_long)
# Prevent divide-by-zero problems
inner_k = np.where(inner_k == 0.0, 1e-15, inner_k)
k = np.sqrt(2.0 / inner_k)
x = k*cos_lat*np.sin(diff_long)
y = k*(np.cos(clat)*sin_lat -
np.sin(clat)*cos_lat*cos_diff_long)
return np.concatenate((x, y), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def transform_path_non_affine(self, path):
vertices = path.vertices
ipath = path.interpolated(self._resolution)
return Path(self.transform(ipath.vertices), ipath.codes)
transform_path_non_affine.__doc__ = Transform.transform_path_non_affine.__doc__
def inverted(self):
return LambertAxes.InvertedLambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
class InvertedLambertTransform(Transform):
input_dims = 2
output_dims = 2
is_separable = False
def __init__(self, center_longitude, center_latitude, resolution):
Transform.__init__(self)
self._resolution = resolution
self._center_longitude = center_longitude
self._center_latitude = center_latitude
def transform_non_affine(self, xy):
x = xy[:, 0:1]
y = xy[:, 1:2]
clong = self._center_longitude
clat = self._center_latitude
p = np.sqrt(x*x + y*y)
p = np.where(p == 0.0, 1e-9, p)
c = 2.0 * np.arcsin(0.5 * p)
sin_c = np.sin(c)
cos_c = np.cos(c)
lat = np.arcsin(cos_c*np.sin(clat) +
((y*sin_c*np.cos(clat)) / p))
lon = clong + np.arctan(
(x*sin_c) / (p*np.cos(clat)*cos_c - y*np.sin(clat)*sin_c))
return np.concatenate((lon, lat), 1)
transform_non_affine.__doc__ = Transform.transform_non_affine.__doc__
def inverted(self):
return LambertAxes.LambertTransform(
self._center_longitude,
self._center_latitude,
self._resolution)
inverted.__doc__ = Transform.inverted.__doc__
def __init__(self, *args, **kwargs):
self._longitude_cap = np.pi / 2.0
self._center_longitude = kwargs.pop("center_longitude", 0.0)
self._center_latitude = kwargs.pop("center_latitude", 0.0)
GeoAxes.__init__(self, *args, **kwargs)
self.set_aspect('equal', adjustable='box', anchor='C')
self.cla()
def cla(self):
GeoAxes.cla(self)
self.yaxis.set_major_formatter(NullFormatter())
def _get_core_transform(self, resolution):
return self.LambertTransform(
self._center_longitude,
self._center_latitude,
resolution)
def _get_affine_transform(self):
return Affine2D() \
.scale(0.25) \
.translate(0.5, 0.5)
|
android-ia/platform_external_chromium_org | refs/heads/master | tools/telemetry/telemetry/web_perf/metrics/mainthread_jank_stats_unittest.py | 55 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.timeline import model as model_module
from telemetry.timeline import async_slice
from telemetry.web_perf import timeline_interaction_record as tir_module
from telemetry.web_perf.metrics import mainthread_jank_stats
class MainthreadJankTests(unittest.TestCase):
def CreateTestRecord(self, name, start, end, thread_start, thread_end,
parent_thread):
s = async_slice.AsyncSlice(
'cat', 'Interaction.%s/is_responsive' % name,
timestamp=start, duration=end - start, start_thread=parent_thread,
end_thread=parent_thread, thread_start=thread_start,
thread_duration=thread_end - thread_start)
return tir_module.TimelineInteractionRecord.FromAsyncEvent(s)
def testComputeMainthreadJankStatsForRecord(self):
# The slice hierarchy should look something like this:
# [ MessageLoop::RunTask ] [MessageLoop::RunTask][ MessagLoop::RunTask ]
# [ foo ] [ bar ]
# | |
# 200ms 800ms
# (thread_start) (thread_end)
#
# Note: all timings mentioned here and in comments below are thread time.
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# [ MessageLoop::RunTask ]
# 100ms 300ms
renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 112, 100)
renderer_main.EndSlice(240, 300)
# [ MessageLoop::RunTask ]
# 450ms [ foo ] 475 ms
# 460ms 470ms
renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 462, 450)
renderer_main.BeginSlice('otherlevel', 'foo', 468, 460)
renderer_main.EndSlice(475, 470)
renderer_main.EndSlice(620, 475)
# [ MessageLoop::RunTask ]
# 620ms [ bar ] 900ms
# 750ms 850ms
renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 652, 620)
renderer_main.BeginSlice('otherlevel', 'bar', 785, 750)
renderer_main.EndSlice(875, 850)
renderer_main.EndSlice(1040, 900)
model.FinalizeImport(shift_world_to_zero=False)
# Make a record that starts at 200ms and ends at 800ms in thread time
record = self.CreateTestRecord('test', 100, 700, 200, 800, renderer_main)
# pylint: disable=W0212
stat = mainthread_jank_stats._ComputeMainthreadJankStatsForRecord(
renderer_main, record)
# The overlapped between thread time range(200ms -> 800ms)
# with the first top slice (100ms -> 300ms) is 300 - 200 = 100ms,
# with the second slice (450ms -> 475ms) is 475 - 450 = 25 ms,
# with the third slice (620ms -> 900ms) is 800 - 620 = 180 ms.
#
# Hence we have 2 big top slices which overlapped duration > 50ms,
# the biggest top slice is 180ms, and the total big top slice's thread time
# is 100 + 180 = 280ms.
self.assertEquals(180, stat.biggest_top_slice_thread_time)
self.assertEquals(280, stat.sum_big_top_slices_thread_time)
def testMainthreadJankStats(self):
# [ MessageLoop::RunTask] [MessageLoop::RunTask] [MessagLoop::RunTask]
# 10 100 120 400 450 750
# [ record_1 ] [ record_2 ] [ record_3 ]
# 40 70 120 200 220 900
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# [ MessageLoop::RunTask ]
# 10ms 100ms
renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 12, 10)
renderer_main.EndSlice(120, 100)
# [ MessageLoop::RunTask ]
# 120ms 200ms
renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 115, 120)
renderer_main.EndSlice(410, 400)
# [ MessageLoop::RunTask ]
# 220ms 900ms
renderer_main.BeginSlice('toplevel', 'MessageLoop::RunTask', 477, 450)
renderer_main.EndSlice(772, 750)
model.FinalizeImport(shift_world_to_zero=False)
test_records = [
self.CreateTestRecord('record_1', 10, 80, 40, 70, renderer_main),
self.CreateTestRecord('record_2', 100, 210, 120, 200, renderer_main),
self.CreateTestRecord('record_3', 215, 920, 220, 900, renderer_main)
]
stats = mainthread_jank_stats.MainthreadJankStats(
renderer_main, test_records)
# Main thread janks covered by records' ranges are:
# Record 1: (40ms -> 70ms)
# Record 2: (120ms -> 200ms)
# Record 3: (220ms -> 400ms), (450ms -> 750ms)
self.assertEquals(560, stats.total_big_jank_thread_time)
self.assertEquals(300, stats.biggest_jank_thread_time)
|
fpy171/django | refs/heads/master | django/contrib/redirects/apps.py | 590 | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class RedirectsConfig(AppConfig):
name = 'django.contrib.redirects'
verbose_name = _("Redirects")
|
RENCI/xDCIShare | refs/heads/xdci-develop | hs_tracking/__init__.py | 4 | default_app_config = 'hs_tracking.apps.HSTrackingAppConfig'
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.