repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
cnsuperx/Cocos2d-x-2.2.5
|
refs/heads/master
|
tools/cocos2d-console/console/cocos2d_jscompile.py
|
5
|
#!/usr/bin/python
# ----------------------------------------------------------------------------
# cocos2d "jscompile" plugin
#
# Copyright 2013 (C) Intel
#
# License: MIT
# ----------------------------------------------------------------------------
'''
"jscompile" plugin for cocos2d command line tool
'''
__docformat__ = 'restructuredtext'
import sys
import subprocess
import os
import json
import inspect
import cocos2d
class CCPluginJSCompile(cocos2d.CCPlugin):
"""
compiles (encodes) and minifies JS files
"""
@staticmethod
def brief_description():
# returns a short description of this module
return "jscompile\tminifies and/or compiles js files"
# This is not the constructor, just an initializator
def init(self, options, workingdir):
"""
Arguments:
- `options`:
"""
self._current_src_dir = None
self._src_dir_arr = self.normalize_path_in_list(options.src_dir_arr)
self._dst_dir = options.dst_dir
self._use_closure_compiler = options.use_closure_compiler
self._config = None
self._workingdir = workingdir
if options.compiler_config != None:
f = open(options.compiler_config)
self._config = json.load(f)
f.close()
self.normalize_path_in_list(self._config["pre_order"])
self.normalize_path_in_list(self._config["post_order"])
self.normalize_path_in_list(self._config["skip"])
self._success = []
self._failure = []
self._js_files = {}
self._compressed_js_path = os.path.join(self._dst_dir, options.compressed_filename)
self._compressed_jsc_path = os.path.join(self._dst_dir, options.compressed_filename+"c")
def normalize_path_in_list(self, list):
for i in list:
tmp = os.path.normpath(i)
list[list.index(i)] = tmp
return list
def get_relative_path(self, jsfile):
try:
# print "current src dir: "+self._current_src_dir
pos = jsfile.index(self._current_src_dir)
if pos != 0:
raise Exception("cannot find src directory in file path.")
# print "origin js path: "+ jsfile
# print "relative path: "+jsfile[len(self._current_src_dir)+1:]
return jsfile[len(self._current_src_dir)+1:]
except ValueError:
raise Exception("cannot find src directory in file path.")
def get_output_file_path(self, jsfile):
"""
Gets output file path by source js file
"""
# create folder for generated file
jsc_filepath = ""
relative_path = self.get_relative_path(jsfile)+"c"
jsc_filepath = os.path.join(self._dst_dir, relative_path)
dst_rootpath = os.path.split(jsc_filepath)[0]
try:
# print "creating dir (%s)" % (dst_rootpath)
os.makedirs(dst_rootpath)
except OSError:
if os.path.exists(dst_rootpath) == False:
# There was an error on creation, so make sure we know about it
raise Exception("Error: cannot create folder in "+dst_rootpath)
# print "return jsc path: "+jsc_filepath
return jsc_filepath
def compile_js(self, jsfile, output_file):
"""
Compiles js file
"""
print "compiling js (%s) to bytecode..." % (jsfile)
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "jsbcc");
ret = subprocess.call(jsbcc_exe_path + " " + jsfile+" "+output_file, shell=True)
if ret == 0:
self._success.append(jsfile)
else:
self._failure.append(jsfile)
print "----------------------------------------"
def compress_js(self):
"""
Compress all js files into one big file.
"""
jsfiles = ""
for src_dir in self._src_dir_arr:
# print "\n----------src:"+src_dir
jsfiles = jsfiles + " --js ".join(self._js_files[src_dir]) + " "
compiler_jar_path = os.path.join(self._workingdir, "bin", "compiler.jar")
command = "java -jar %s --js %s --js_output_file %s" % (compiler_jar_path, jsfiles, self._compressed_js_path)
print "\ncommand:"+command+"\n"
ret = subprocess.call(command, shell=True)
if ret == 0:
print "js files were compressed successfully..."
else:
print "js files were compressed unsuccessfully..."
def deep_iterate_dir(self, rootDir):
for lists in os.listdir(rootDir):
path = os.path.join(rootDir, lists)
if os.path.isdir(path):
self.deep_iterate_dir(path)
elif os.path.isfile(path):
if os.path.splitext(path)[1] == ".js":
self._js_files[self._current_src_dir].append(path)
def index_in_list(self, jsfile, l):
"""
Arguments:
- `self`:
- `jsfile`:
- `l`:
"""
index = -1
for el in l:
if jsfile.rfind(el) != -1:
# print "index:"+str(index+1)+", el:"+el
return index+1
index = index + 1
return -1
def js_filename_pre_order_compare(self, a, b):
"""
"""
pre_order = self._config["pre_order"]
index_a = self.index_in_list(a, pre_order)
index_b = self.index_in_list(b, pre_order)
is_a_in_list = index_a != -1
is_b_in_list = index_b != -1
if is_a_in_list and not is_b_in_list:
return -1
elif not is_a_in_list and is_b_in_list:
return 1
elif is_a_in_list and is_b_in_list:
if index_a > index_b:
return 1
elif index_a < index_b:
return -1
else:
return 0
else:
return 0
def js_filename_post_order_compare(self, a, b):
"""
"""
post_order = self._config["post_order"]
index_a = self.index_in_list(a, post_order)
index_b = self.index_in_list(b, post_order)
is_a_in_list = index_a != -1
is_b_in_list = index_b != -1
if is_a_in_list and not is_b_in_list:
return 1
elif not is_a_in_list and is_b_in_list:
return -1
elif is_a_in_list and is_b_in_list:
if index_a > index_b:
return 1
elif index_a < index_b:
return -1
else:
return 0
else:
return 0
def reorder_js_files(self):
if self._config == None:
return
# print "before:"+str(self._js_files)
for src_dir in self._js_files:
# Remove file in exclude list
need_remove_arr = []
for jsfile in self._js_files[src_dir]:
for exclude_file in self._config["skip"]:
if jsfile.rfind(exclude_file) != -1:
# print "remove:" + jsfile
need_remove_arr.append(jsfile)
for need_remove in need_remove_arr:
self._js_files[src_dir].remove(need_remove)
if (self._config != None):
pre_order = self._config["pre_order"]
self._js_files[src_dir].sort(cmp=self.js_filename_pre_order_compare)
self._js_files[src_dir].sort(cmp=self.js_filename_post_order_compare)
# print '-------------------'
# print "after:" + str(self._js_files)
def handle_all_js_files(self):
"""
Arguments:
- `self`:
"""
if self._use_closure_compiler == True:
self.compress_js()
self.compile_js(self._compressed_js_path, self._compressed_jsc_path)
# remove tmp compressed file
os.remove(self._compressed_js_path)
else:
for src_dir in self._src_dir_arr:
for jsfile in self._js_files[src_dir]:
self._current_src_dir = src_dir
self.compile_js(jsfile, self.get_output_file_path(jsfile))
# will be called from the cocos2d.py script
def run(self, argv):
"""
"""
self.parse_args(argv)
# create output directory
try:
os.makedirs(self._dst_dir)
except OSError:
if os.path.exists(self._dst_dir) == False:
raise Exception("Error: cannot create folder in "+self._dst_dir)
# download the bin folder
jsbcc_exe_path = os.path.join(self._workingdir, "bin", "jsbcc");
if not os.path.exists(jsbcc_exe_path):
download_cmd_path = os.path.join(self._workingdir, os.pardir)
subprocess.call("python %s -f" % (os.path.join(download_cmd_path, "download-bin.py")), shell=True, cwd=download_cmd_path)
# deep iterate the src directory
for src_dir in self._src_dir_arr:
self._current_src_dir = src_dir
self._js_files[self._current_src_dir] = []
self.deep_iterate_dir(src_dir)
self.reorder_js_files()
self.handle_all_js_files()
print "\nCompilation finished, (%d) files succeed, (%d) files fail." % (len(self._success), len(self._failure))
if len(self._failure) > 0:
print "Failure files are:"
print self._failure
print "------------------------------"
def parse_args(self, argv):
"""
"""
from optparse import OptionParser
parser = OptionParser("usage: %prog jscompile -s src_dir -d dst_dir [-c -o COMPRESSED_FILENAME -j COMPILER_CONFIG]")
parser.add_option("-s", "--src",
action="append", type="string", dest="src_dir_arr",
help="source directory of js files needed to be compiled, supports mutiple source directory")
parser.add_option("-d", "--dst",
action="store", type="string", dest="dst_dir",
help="destination directory of js bytecode files to be stored")
parser.add_option("-c", "--use_closure_compiler",
action="store_true", dest="use_closure_compiler", default=False,
help="Whether to use closure compiler to compress all js files into just a big file")
parser.add_option("-o", "--output_compressed_filename",
action="store", dest="compressed_filename", default="game.min.js",
help="Only available when '-c' option was True")
parser.add_option("-j", "--compiler_config",
action="store", dest="compiler_config",
help="The configuration for closure compiler by using JSON, please refer to compiler_config_sample.json")
(options, args) = parser.parse_args(argv)
# print options
if options.src_dir_arr == None:
raise Exception("Please set source folder by \"-s\" or \"-src\", run ./jscompile.py -h for the usage ")
elif options.dst_dir == None:
raise Exception("Please set destination folder by \"-d\" or \"-dst\", run ./jscompile.py -h for the usage ")
else:
for src_dir in options.src_dir_arr:
if os.path.exists(src_dir) == False:
raise Exception("Error: dir (%s) doesn't exist..." % (src_dir))
# script directory
workingdir = os.path.dirname(inspect.getfile(inspect.currentframe()))
self.init(options, workingdir)
|
ManjiriBirajdar/coala
|
refs/heads/master
|
tests/settings/DocstringMetadataTest.py
|
11
|
import unittest
from coalib.settings.DocstringMetadata import DocstringMetadata
class DocstringMetadataTest(unittest.TestCase):
def test_from_docstring(self):
self.check_from_docstring_dataset("")
self.check_from_docstring_dataset(" description only ",
desc="description only")
self.check_from_docstring_dataset(" :param test: test description ",
param_dict={
"test": "test description"})
self.check_from_docstring_dataset(" @param test: test description ",
param_dict={
"test": "test description"})
self.check_from_docstring_dataset(" :return: something ",
retval_desc="something")
self.check_from_docstring_dataset(" @return: something ",
retval_desc="something")
self.check_from_docstring_dataset("""
Main description
@param p1: this is
a multiline desc for p1
:param p2: p2 description
@return: retval description
:return: retval description
override
""", desc="Main description", param_dict={
"p1": "this is\na multiline desc for p1\n",
"p2": "p2 description\n"
}, retval_desc="retval description override")
def test_str(self):
uut = DocstringMetadata.from_docstring(
'''
Description of something. No params.
''')
self.assertEqual(str(uut), "Description of something. No params.")
uut = DocstringMetadata.from_docstring(
'''
Description of something with params.
:param x: Imagine something.
:param y: x^2
''')
self.assertEqual(str(uut), "Description of something with params.")
def check_from_docstring_dataset(self,
docstring,
desc="",
param_dict=None,
retval_desc=""):
param_dict = param_dict or {}
self.assertIsInstance(docstring,
str,
"docstring needs to be a string for this test.")
doc_comment = DocstringMetadata.from_docstring(docstring)
self.assertEqual(doc_comment.desc, desc)
self.assertEqual(doc_comment.param_dict, param_dict)
self.assertEqual(doc_comment.retval_desc, retval_desc)
|
akaihola/django
|
refs/heads/master
|
tests/regressiontests/views/tests/static.py
|
4
|
from __future__ import absolute_import
import mimetypes
from os import path
from django.conf import settings
from django.conf.urls.static import static
from django.test import TestCase
from django.http import HttpResponseNotModified
from .. import urls
from ..urls import media_dir
class StaticTests(TestCase):
"""Tests django views in django/views/static.py"""
def setUp(self):
self.prefix = 'site_media'
self.old_debug = settings.DEBUG
settings.DEBUG = True
def tearDown(self):
settings.DEBUG = self.old_debug
def test_serve(self):
"The static view can serve static media"
media_files = ['file.txt', 'file.txt.gz']
for filename in media_files:
response = self.client.get('/views/%s/%s' % (self.prefix, filename))
file_path = path.join(media_dir, filename)
with open(file_path) as fp:
self.assertEqual(fp.read(), response.content)
self.assertEqual(len(response.content), int(response['Content-Length']))
self.assertEqual(mimetypes.guess_type(file_path)[1], response.get('Content-Encoding', None))
def test_unknown_mime_type(self):
response = self.client.get('/views/%s/file.unknown' % self.prefix)
self.assertEqual('application/octet-stream', response['Content-Type'])
def test_copes_with_empty_path_component(self):
file_name = 'file.txt'
response = self.client.get('/views/%s//%s' % (self.prefix, file_name))
with open(path.join(media_dir, file_name)) as fp:
self.assertEqual(fp.read(), response.content)
def test_is_modified_since(self):
file_name = 'file.txt'
response = self.client.get('/views/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE='Thu, 1 Jan 1970 00:00:00 GMT')
with open(path.join(media_dir, file_name)) as fp:
self.assertEqual(fp.read(), response.content)
def test_not_modified_since(self):
file_name = 'file.txt'
response = self.client.get(
'/views/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE='Mon, 18 Jan 2038 05:14:07 GMT'
# This is 24h before max Unix time. Remember to fix Django and
# update this test well before 2038 :)
)
self.assertTrue(isinstance(response, HttpResponseNotModified))
def test_invalid_if_modified_since(self):
"""Handle bogus If-Modified-Since values gracefully
Assume that a file is modified since an invalid timestamp as per RFC
2616, section 14.25.
"""
file_name = 'file.txt'
invalid_date = 'Mon, 28 May 999999999999 28:25:26 GMT'
response = self.client.get('/views/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE=invalid_date)
with open(path.join(media_dir, file_name)) as fp:
self.assertEqual(fp.read(), response.content)
self.assertEqual(len(response.content),
int(response['Content-Length']))
def test_invalid_if_modified_since2(self):
"""Handle even more bogus If-Modified-Since values gracefully
Assume that a file is modified since an invalid timestamp as per RFC
2616, section 14.25.
"""
file_name = 'file.txt'
invalid_date = ': 1291108438, Wed, 20 Oct 2010 14:05:00 GMT'
response = self.client.get('/views/%s/%s' % (self.prefix, file_name),
HTTP_IF_MODIFIED_SINCE=invalid_date)
with open(path.join(media_dir, file_name)) as fp:
self.assertEqual(fp.read(), response.content)
self.assertEqual(len(response.content),
int(response['Content-Length']))
class StaticHelperTest(StaticTests):
"""
Test case to make sure the static URL pattern helper works as expected
"""
def setUp(self):
super(StaticHelperTest, self).setUp()
self.prefix = 'media'
self._old_views_urlpatterns = urls.urlpatterns[:]
urls.urlpatterns += static('/media/', document_root=media_dir)
def tearDown(self):
super(StaticHelperTest, self).tearDown()
urls.urlpatterns = self._old_views_urlpatterns
|
EmanueleCannizzaro/scons
|
refs/heads/master
|
test/CC/CCCOMSTR.py
|
1
|
#!/usr/bin/env python
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/CC/CCCOMSTR.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
"""
Test that the $CCCOMSTR construction variable allows you to configure
the C compilation output.
"""
import os
import TestSCons
_python_ = TestSCons._python_
_exe = TestSCons._exe
test = TestSCons.TestSCons()
test.write('mycc.py', r"""
import sys
outfile = open(sys.argv[1], 'wb')
infile = open(sys.argv[2], 'rb')
for l in [l for l in infile.readlines() if l[:6] != '/*cc*/']:
outfile.write(l)
sys.exit(0)
""")
if os.path.normcase('.c') == os.path.normcase('.C'):
alt_c_suffix = '.C'
else:
alt_c_suffix = '.c'
test.write('SConstruct', """
env = Environment(CCCOM = r'%(_python_)s mycc.py $TARGET $SOURCE',
CCCOMSTR = 'Building $TARGET from $SOURCE',
OBJSUFFIX='.obj')
env.Object(target = 'test1', source = 'test1.c')
env.Object(target = 'test2', source = 'test2%(alt_c_suffix)s')
""" % locals())
test.write('test1.c', """\
test1.c
/*cc*/
""")
test.write('test2'+alt_c_suffix, """\
test2.C
/*cc*/
""")
test.run(stdout = test.wrap_stdout("""\
Building test1.obj from test1.c
Building test2.obj from test2%(alt_c_suffix)s
""" % locals()))
test.must_match('test1.obj', "test1.c\n")
test.must_match('test2.obj', "test2.C\n")
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
himadriganguly/featurerequest
|
refs/heads/master
|
ticketing/tests/test_functional.py
|
1
|
from django.test import LiveServerTestCase
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
class LoginTestCase(StaticLiveServerTestCase):
fixtures = ['auth_user.json']
def setUp(self):
# This line is used when selenium is used as a standalone
self.selenium = webdriver.Firefox()
# This line will be used when selenium is used in Jenkins with Selenium Grid
# self.selenium = webdriver.Remote(
# command_executor='http://127.0.0.1:4444/wd/hub',
# desired_capabilities={
# "browserName": "firefox",
# "platform": "LINUX",
# }
# )
self.selenium.maximize_window()
super(LoginTestCase, self).setUp()
def tearDown(self):
self.selenium.quit()
super(LoginTestCase, self).tearDown()
def test_login_page(self):
self.selenium.get(self.live_server_url)
# Fill login information of admin
username = self.selenium.find_element_by_id("username")
username.send_keys("admin")
password = self.selenium.find_element_by_id("password")
password.send_keys("admin12345678")
# Locate Login button and click it
self.selenium.find_element_by_id('login-btn').click()
self.assertIn('New Feature | Dashboard', self.selenium.title)
|
benfinke/ns_python
|
refs/heads/master
|
nssrc/com/citrix/netscaler/nitro/resource/config/tunnel/tunneltrafficpolicy_binding.py
|
3
|
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class tunneltrafficpolicy_binding(base_resource):
""" Binding class showing the resources that can be bound to tunneltrafficpolicy_binding.
"""
def __init__(self) :
self._name = ""
self.tunneltrafficpolicy_tunnelglobal_binding = []
@property
def name(self) :
ur"""Name of the tunnel traffic policy for which to show detailed information.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name of the tunnel traffic policy for which to show detailed information.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def tunneltrafficpolicy_tunnelglobal_bindings(self) :
ur"""tunnelglobal that can be bound to tunneltrafficpolicy.
"""
try :
return self._tunneltrafficpolicy_tunnelglobal_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(tunneltrafficpolicy_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.tunneltrafficpolicy_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name) :
ur""" Use this API to fetch tunneltrafficpolicy_binding resource.
"""
try :
if type(name) is not list :
obj = tunneltrafficpolicy_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [tunneltrafficpolicy_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class tunneltrafficpolicy_binding_response(base_response) :
def __init__(self, length=1) :
self.tunneltrafficpolicy_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.tunneltrafficpolicy_binding = [tunneltrafficpolicy_binding() for _ in range(length)]
|
vivekananda/fbeats
|
refs/heads/master
|
django/contrib/localflavor/ch/ch_states.py
|
544
|
# -*- coding: utf-8 -*
from django.utils.translation import ugettext_lazy as _
STATE_CHOICES = (
('AG', _('Aargau')),
('AI', _('Appenzell Innerrhoden')),
('AR', _('Appenzell Ausserrhoden')),
('BS', _('Basel-Stadt')),
('BL', _('Basel-Land')),
('BE', _('Berne')),
('FR', _('Fribourg')),
('GE', _('Geneva')),
('GL', _('Glarus')),
('GR', _('Graubuenden')),
('JU', _('Jura')),
('LU', _('Lucerne')),
('NE', _('Neuchatel')),
('NW', _('Nidwalden')),
('OW', _('Obwalden')),
('SH', _('Schaffhausen')),
('SZ', _('Schwyz')),
('SO', _('Solothurn')),
('SG', _('St. Gallen')),
('TG', _('Thurgau')),
('TI', _('Ticino')),
('UR', _('Uri')),
('VS', _('Valais')),
('VD', _('Vaud')),
('ZG', _('Zug')),
('ZH', _('Zurich'))
)
|
fxfitz/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/ovirt/ovirt_host_networks.py
|
19
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016, 2018 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ovirt_host_networks
short_description: Module to manage host networks in oVirt/RHV
version_added: "2.3"
author: "Ondra Machacek (@machacekondra)"
description:
- "Module to manage host networks in oVirt/RHV."
options:
name:
description:
- "Name of the host to manage networks for."
required: true
state:
description:
- "Should the host be present/absent."
choices: ['present', 'absent']
default: present
bond:
description:
- "Dictionary describing network bond:"
- "C(name) - Bond name."
- "C(mode) - Bonding mode."
- "C(options) - Bonding options."
- "C(interfaces) - List of interfaces to create a bond."
interface:
description:
- "Name of the network interface where logical network should be attached."
networks:
description:
- "List of dictionary describing networks to be attached to interface or bond:"
- "C(name) - Name of the logical network to be assigned to bond or interface."
- "C(boot_protocol) - Boot protocol one of the I(none), I(static) or I(dhcp)."
- "C(address) - IP address in case of I(static) boot protocol is used."
- "C(netmask) - Subnet mask in case of I(static) boot protocol is used."
- "C(gateway) - Gateway in case of I(static) boot protocol is used."
- "C(version) - IP version. Either v4 or v6. Default is v4."
labels:
description:
- "List of names of the network label to be assigned to bond or interface."
check:
description:
- "If I(true) verify connectivity between host and engine."
- "Network configuration changes will be rolled back if connectivity between
engine and the host is lost after changing network configuration."
save:
description:
- "If I(true) network configuration will be persistent, by default they are temporary."
extends_documentation_fragment: ovirt
'''
EXAMPLES = '''
# Examples don't contain auth parameter for simplicity,
# look at ovirt_auth module to see how to reuse authentication:
# Create bond on eth0 and eth1 interface, and put 'myvlan' network on top of it:
- name: Bonds
ovirt_host_networks:
name: myhost
bond:
name: bond0
mode: 2
interfaces:
- eth1
- eth2
networks:
- name: myvlan
boot_protocol: static
address: 1.2.3.4
netmask: 255.255.255.0
gateway: 1.2.3.4
version: v4
# Create bond on eth1 and eth2 interface, specifiyng both mode and miimon:
- name: Bonds
ovirt_host_networks:
name: myhost
bond:
name: bond0
mode: 1
options:
miimon: 200
interfaces:
- eth1
- eth2
# Remove bond0 bond from host interfaces:
- ovirt_host_networks:
state: absent
name: myhost
bond:
name: bond0
# Assign myvlan1 and myvlan2 vlans to host eth0 interface:
- ovirt_host_networks:
name: myhost
interface: eth0
networks:
- name: myvlan1
- name: myvlan2
# Remove myvlan2 vlan from host eth0 interface:
- ovirt_host_networks:
state: absent
name: myhost
interface: eth0
networks:
- name: myvlan2
# Remove all networks/vlans from host eth0 interface:
- ovirt_host_networks:
state: absent
name: myhost
interface: eth0
'''
RETURN = '''
id:
description: ID of the host NIC which is managed
returned: On success if host NIC is found.
type: str
sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c
host_nic:
description: "Dictionary of all the host NIC attributes. Host NIC attributes can be found on your oVirt/RHV instance
at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/host_nic."
returned: On success if host NIC is found.
type: dict
'''
import traceback
try:
import ovirtsdk4.types as otypes
except ImportError:
pass
from ansible.module_utils import six
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ovirt import (
BaseModule,
check_sdk,
create_connection,
equal,
get_dict_of_struct,
get_entity,
get_link_name,
ovirt_full_argument_spec,
search_by_name,
)
def get_bond_options(mode, usr_opts):
MIIMON_100 = dict(miimon='100')
DEFAULT_MODE_OPTS = {
'1': MIIMON_100,
'2': MIIMON_100,
'3': MIIMON_100,
'4': dict(xmit_hash_policy='2', **MIIMON_100)
}
options = []
if mode is None:
return options
def get_type_name(mode_number):
"""
We need to maintain this type strings, for the __compare_options method,
for easier comparision.
"""
modes = [
'Active-Backup',
'Load balance (balance-xor)',
None,
'Dynamic link aggregation (802.3ad)',
]
if (not 0 < mode_number <= len(modes) - 1):
return None
return modes[mode_number - 1]
try:
mode_number = int(mode)
except ValueError:
raise Exception('Bond mode must be a number.')
options.append(
otypes.Option(
name='mode',
type=get_type_name(mode_number),
value=str(mode_number)
)
)
opts_dict = DEFAULT_MODE_OPTS.get(mode, {})
opts_dict.update(**usr_opts)
options.extend(
[otypes.Option(name=opt, value=value)
for opt, value in six.iteritems(opts_dict)]
)
return options
class HostNetworksModule(BaseModule):
def __compare_options(self, new_options, old_options):
return sorted(get_dict_of_struct(opt) for opt in new_options) != sorted(get_dict_of_struct(opt) for opt in old_options)
def build_entity(self):
return otypes.Host()
def update_address(self, attachments_service, attachment, network):
# Check if there is any change in address assignments and
# update it if needed:
for ip in attachment.ip_address_assignments:
if str(ip.ip.version) == network.get('version', 'v4'):
changed = False
if not equal(network.get('boot_protocol'), str(ip.assignment_method)):
ip.assignment_method = otypes.BootProtocol(network.get('boot_protocol'))
changed = True
if not equal(network.get('address'), ip.ip.address):
ip.ip.address = network.get('address')
changed = True
if not equal(network.get('gateway'), ip.ip.gateway):
ip.ip.gateway = network.get('gateway')
changed = True
if not equal(network.get('netmask'), ip.ip.netmask):
ip.ip.netmask = network.get('netmask')
changed = True
if changed:
if not self._module.check_mode:
attachments_service.service(attachment.id).update(attachment)
self.changed = True
break
def has_update(self, nic_service):
update = False
bond = self._module.params['bond']
networks = self._module.params['networks']
labels = self._module.params['labels']
nic = get_entity(nic_service)
if nic is None:
return update
# Check if bond configuration should be updated:
if bond:
update = self.__compare_options(get_bond_options(bond.get('mode'), bond.get('options')), getattr(nic.bonding, 'options', []))
update = update or not equal(
sorted(bond.get('interfaces')) if bond.get('interfaces') else None,
sorted(get_link_name(self._connection, s) for s in nic.bonding.slaves)
)
# Check if labels need to be updated on interface/bond:
if labels:
net_labels = nic_service.network_labels_service().list()
# If any lables which user passed aren't assigned, relabel the interface:
if sorted(labels) != sorted([lbl.id for lbl in net_labels]):
return True
if not networks:
return update
# Check if networks attachments configuration should be updated:
attachments_service = nic_service.network_attachments_service()
network_names = [network.get('name') for network in networks]
attachments = {}
for attachment in attachments_service.list():
name = get_link_name(self._connection, attachment.network)
if name in network_names:
attachments[name] = attachment
for network in networks:
attachment = attachments.get(network.get('name'))
# If attachment don't exists, we need to create it:
if attachment is None:
return True
self.update_address(attachments_service, attachment, network)
return update
def _action_save_configuration(self, entity):
if self._module.params['save']:
if not self._module.check_mode:
self._service.service(entity.id).commit_net_config()
self.changed = True
def main():
argument_spec = ovirt_full_argument_spec(
state=dict(
choices=['present', 'absent'],
default='present',
),
name=dict(default=None, aliases=['host'], required=True),
bond=dict(default=None, type='dict'),
interface=dict(default=None),
networks=dict(default=None, type='list'),
labels=dict(default=None, type='list'),
check=dict(default=None, type='bool'),
save=dict(default=None, type='bool'),
)
module = AnsibleModule(argument_spec=argument_spec)
check_sdk(module)
try:
auth = module.params.pop('auth')
connection = create_connection(auth)
hosts_service = connection.system_service().hosts_service()
host_networks_module = HostNetworksModule(
connection=connection,
module=module,
service=hosts_service,
)
host = host_networks_module.search_entity()
if host is None:
raise Exception("Host '%s' was not found." % module.params['name'])
bond = module.params['bond']
interface = module.params['interface']
networks = module.params['networks']
labels = module.params['labels']
nic_name = bond.get('name') if bond else module.params['interface']
host_service = hosts_service.host_service(host.id)
nics_service = host_service.nics_service()
nic = search_by_name(nics_service, nic_name)
network_names = [network['name'] for network in networks or []]
state = module.params['state']
if (
state == 'present' and
(nic is None or host_networks_module.has_update(nics_service.service(nic.id)))
):
# Remove networks which are attached to different interface then user want:
attachments_service = host_service.network_attachments_service()
# Append attachment ID to network if needs update:
for a in attachments_service.list():
current_network_name = get_link_name(connection, a.network)
if current_network_name in network_names:
for n in networks:
if n['name'] == current_network_name:
n['id'] = a.id
# Check if we have to break some bonds:
removed_bonds = []
if nic is not None:
for host_nic in nics_service.list():
if host_nic.bonding and nic.id in [slave.id for slave in host_nic.bonding.slaves]:
removed_bonds.append(otypes.HostNic(id=host_nic.id))
# Assign the networks:
host_networks_module.action(
entity=host,
action='setup_networks',
post_action=host_networks_module._action_save_configuration,
check_connectivity=module.params['check'],
removed_bonds=removed_bonds if removed_bonds else None,
modified_bonds=[
otypes.HostNic(
name=bond.get('name'),
bonding=otypes.Bonding(
options=get_bond_options(bond.get('mode'), bond.get('options')),
slaves=[
otypes.HostNic(name=i) for i in bond.get('interfaces', [])
],
),
),
] if bond else None,
modified_labels=[
otypes.NetworkLabel(
id=str(name),
host_nic=otypes.HostNic(
name=bond.get('name') if bond else interface
),
) for name in labels
] if labels else None,
modified_network_attachments=[
otypes.NetworkAttachment(
id=network.get('id'),
network=otypes.Network(
name=network['name']
) if network['name'] else None,
host_nic=otypes.HostNic(
name=bond.get('name') if bond else interface
),
ip_address_assignments=[
otypes.IpAddressAssignment(
assignment_method=otypes.BootProtocol(
network.get('boot_protocol', 'none')
),
ip=otypes.Ip(
address=network.get('address'),
gateway=network.get('gateway'),
netmask=network.get('netmask'),
version=otypes.IpVersion(
network.get('version')
) if network.get('version') else None,
),
),
],
) for network in networks
] if networks else None,
)
elif state == 'absent' and nic:
attachments = []
nic_service = nics_service.nic_service(nic.id)
attached_labels = set([str(lbl.id) for lbl in nic_service.network_labels_service().list()])
if networks:
attachments_service = nic_service.network_attachments_service()
attachments = attachments_service.list()
attachments = [
attachment for attachment in attachments
if get_link_name(connection, attachment.network) in network_names
]
# Remove unmanaged networks:
unmanaged_networks_service = host_service.unmanaged_networks_service()
unmanaged_networks = [(u.id, u.name) for u in unmanaged_networks_service.list()]
for net_id, net_name in unmanaged_networks:
if net_name in network_names:
if not module.check_mode:
unmanaged_networks_service.unmanaged_network_service(net_id).remove()
host_networks_module.changed = True
# Need to check if there are any labels to be removed, as backend fail
# if we try to send remove non existing label, for bond and attachments it's OK:
if (labels and set(labels).intersection(attached_labels)) or bond or attachments:
host_networks_module.action(
entity=host,
action='setup_networks',
post_action=host_networks_module._action_save_configuration,
check_connectivity=module.params['check'],
removed_bonds=[
otypes.HostNic(
name=bond.get('name'),
),
] if bond else None,
removed_labels=[
otypes.NetworkLabel(id=str(name)) for name in labels
] if labels else None,
removed_network_attachments=attachments if attachments else None,
)
nic = search_by_name(nics_service, nic_name)
module.exit_json(**{
'changed': host_networks_module.changed,
'id': nic.id if nic else None,
'host_nic': get_dict_of_struct(nic),
})
except Exception as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
finally:
connection.close(logout=auth.get('token') is None)
if __name__ == "__main__":
main()
|
RydrDojo/Ridr_app
|
refs/heads/master
|
pylotVenv/lib/python2.7/site-packages/jinja2/loaders.py
|
333
|
# -*- coding: utf-8 -*-
"""
jinja2.loaders
~~~~~~~~~~~~~~
Jinja loader classes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
from types import ModuleType
from os import path
from hashlib import sha1
from jinja2.exceptions import TemplateNotFound
from jinja2.utils import open_if_exists, internalcode
from jinja2._compat import string_types, iteritems
def split_template_path(template):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise TemplateNotFound(template)
elif piece and piece != '.':
pieces.append(piece)
return pieces
class BaseLoader(object):
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with file(path) as f:
source = f.read().decode('utf-8')
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(self, environment, template):
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as unicode string or a ASCII bytestring. The filename should
be the name of the file on the filesystem if it was loaded from there,
otherwise `None`. The filename is used by python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError('%s cannot provide access to the source' %
self.__class__.__name__)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError('this loader cannot iterate over all templates')
@internalcode
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
class FileSystemLoader(BaseLoader):
"""Loads templates from the file system. This loader can find templates
in folders on the file system and is the preferred way to load them.
The loader takes the path to the templates as string, or if multiple
locations are wanted a list of them which is then looked up in the
given order::
>>> loader = FileSystemLoader('/path/to/templates')
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else.
To follow symbolic links, set the *followlinks* parameter to ``True``::
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
.. versionchanged:: 2.8+
The *followlinks* parameter was added.
"""
def __init__(self, searchpath, encoding='utf-8', followlinks=False):
if isinstance(searchpath, string_types):
searchpath = [searchpath]
self.searchpath = list(searchpath)
self.encoding = encoding
self.followlinks = followlinks
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
for dirpath, dirnames, filenames in walk_dir:
for filename in filenames:
template = os.path.join(dirpath, filename) \
[len(searchpath):].strip(os.path.sep) \
.replace(os.path.sep, '/')
if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from python eggs or packages. It is constructed with
the name of the python package and the path to the templates in that
package::
loader = PackageLoader('mypackage', 'views')
If the package path is not given, ``'templates'`` is assumed.
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. Due to the nature
of eggs it's only possible to reload templates if the package was loaded
from the file system and not a zip file.
"""
def __init__(self, package_name, package_path='templates',
encoding='utf-8'):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
self.filesystem_bound = isinstance(provider, DefaultProvider)
self.provider = provider
self.package_path = package_path
def get_source(self, environment, template):
pieces = split_template_path(template)
p = '/'.join((self.package_path,) + tuple(pieces))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
source = self.provider.get_resource_string(self.manager, p)
return source.decode(self.encoding), filename, uptodate
def list_templates(self):
path = self.package_path
if path[:2] == './':
path = path[2:]
elif path == '.':
path = ''
offset = len(path)
results = []
def _walk(path):
for filename in self.provider.resource_listdir(path):
fullname = path + '/' + filename
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
results.append(fullname[offset:].lstrip('/'))
_walk(path)
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a python dict. It's passed a dict of unicode
strings bound to template names. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping):
self.mapping = mapping
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self):
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function receives the name of the template and has to return either
an unicode string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
elif isinstance(rv, string_types):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(self, mapping, delimiter='/'):
self.mapping = mapping
self.delimiter = delimiter
def get_loader(self, template):
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError):
raise TemplateNotFound(template)
return loader, name
def get_source(self, environment, template):
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name, globals)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(name)
def list_templates(self):
result = []
for prefix, loader in iteritems(self.mapping):
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders):
self.loaders = loaders
def get_source(self, environment, template):
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
except TemplateNotFound:
pass
raise TemplateNotFound(name)
def list_templates(self):
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(self, path):
package_name = '_jinja2_module_templates_%x' % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if isinstance(path, string_types):
path = [path]
else:
path = list(path)
mod.__path__ = path
sys.modules[package_name] = weakref.proxy(mod,
lambda x: sys.modules.pop(package_name, None))
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name):
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
@staticmethod
def get_module_filename(name):
return ModuleLoader.get_template_key(name) + '.py'
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
module = '%s.%s' % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ['root'])
except ImportError:
raise TemplateNotFound(name)
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals)
|
devops2014/djangosite
|
refs/heads/master
|
tests/template_backends/test_django.py
|
32
|
from template_tests.test_response import test_processor_name
from django.template import RequestContext
from django.template.backends.django import DjangoTemplates
from django.test import RequestFactory, ignore_warnings
from django.utils.deprecation import RemovedInDjango20Warning
from .test_dummy import TemplateStringsTests
class DjangoTemplatesTests(TemplateStringsTests):
engine_class = DjangoTemplates
backend_name = 'django'
def test_context_has_priority_over_template_context_processors(self):
# See ticket #23789.
engine = DjangoTemplates({
'DIRS': [],
'APP_DIRS': False,
'NAME': 'django',
'OPTIONS': {
'context_processors': [test_processor_name],
},
})
template = engine.from_string('{{ processors }}')
request = RequestFactory().get('/')
# Check that context processors run
content = template.render({}, request)
self.assertEqual(content, 'yes')
# Check that context overrides context processors
content = template.render({'processors': 'no'}, request)
self.assertEqual(content, 'no')
@ignore_warnings(category=RemovedInDjango20Warning)
def test_request_context_conflicts_with_request(self):
template = self.engine.from_string('hello')
request = RequestFactory().get('/')
request_context = RequestContext(request)
# This doesn't raise an exception.
template.render(request_context, request)
other_request = RequestFactory().get('/')
msg = ("render() was called with a RequestContext and a request "
"argument which refer to different requests. Make sure "
"that the context argument is a dict or at least that "
"the two arguments refer to the same request.")
with self.assertRaisesMessage(ValueError, msg):
template.render(request_context, other_request)
|
Micronaet/micronaet-sql7
|
refs/heads/master
|
sql_accounting_product_status_bs_set_date/wizard/set_date_wizard.py
|
2
|
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright (C) 2001-2014 Micronaet SRL (<http://www.micronaet.it>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import os
import sys
import netsvc
import logging
from openerp.osv import osv, orm, fields
from datetime import datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP, float_compare
import openerp.addons.decimal_precision as dp
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class res_company_inventory_date_wizard(osv.osv_memory):
''' Object that manage the request of demo on the web
'''
_name = 'res.company.inventory.date.wizard'
# Button event:
def set_date(self, cr, uid, ids, context=None):
''' Change date in company parameters '''
wizard_proxy = self.browse(cr, uid, ids, context=context)[0]
if wizard_proxy.inventory_date:
self.pool.get("res.company").set_inventory_date(
cr, uid, wizard_proxy.inventory_date,
context=context)
return True
def set_today_date(self, cr, uid, ids, context=None):
''' Change date in company parameters '''
self.pool.get("res.company").set_inventory_date(
cr, uid, datetime.now().strftime(
DEFAULT_SERVER_DATE_FORMAT),
context=context)
return True
# Fields function:
def _get_inventory_date(self, cr, uid, context=None):
''' Read company date else today:
'''
return self.pool.get("res.company").get_inventory_date(
cr, uid, context=context) or datetime.now().strftime(
DEFAULT_SERVER_DATE_FORMAT)
return
_columns = {
'inventory_date': fields.date('Date'),
}
_defaults = {
'inventory_date': lambda s, cr, uid, ctx: s._get_inventory_date(
cr, uid, ctx),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
robobrobro/ballin-octo-shame
|
refs/heads/master
|
lib/Python-3.4.3/Lib/concurrent/futures/_base.py
|
88
|
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
import collections
import logging
import threading
import time
FIRST_COMPLETED = 'FIRST_COMPLETED'
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
ALL_COMPLETED = 'ALL_COMPLETED'
_AS_COMPLETED = '_AS_COMPLETED'
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_FUTURE_STATES = [
PENDING,
RUNNING,
CANCELLED,
CANCELLED_AND_NOTIFIED,
FINISHED
]
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
# Logger for internal use by the futures package.
LOGGER = logging.getLogger("concurrent.futures")
class Error(Exception):
"""Base class for all future-related exceptions."""
pass
class CancelledError(Error):
"""The Future was cancelled."""
pass
class TimeoutError(Error):
"""The operation exceeded the given deadline."""
pass
class _Waiter(object):
"""Provides the event that wait() and as_completed() block on."""
def __init__(self):
self.event = threading.Event()
self.finished_futures = []
def add_result(self, future):
self.finished_futures.append(future)
def add_exception(self, future):
self.finished_futures.append(future)
def add_cancelled(self, future):
self.finished_futures.append(future)
class _AsCompletedWaiter(_Waiter):
"""Used by as_completed()."""
def __init__(self):
super(_AsCompletedWaiter, self).__init__()
self.lock = threading.Lock()
def add_result(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_result(future)
self.event.set()
def add_exception(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_exception(future)
self.event.set()
def add_cancelled(self, future):
with self.lock:
super(_AsCompletedWaiter, self).add_cancelled(future)
self.event.set()
class _FirstCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_COMPLETED)."""
def add_result(self, future):
super().add_result(future)
self.event.set()
def add_exception(self, future):
super().add_exception(future)
self.event.set()
def add_cancelled(self, future):
super().add_cancelled(future)
self.event.set()
class _AllCompletedWaiter(_Waiter):
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
def __init__(self, num_pending_calls, stop_on_exception):
self.num_pending_calls = num_pending_calls
self.stop_on_exception = stop_on_exception
self.lock = threading.Lock()
super().__init__()
def _decrement_pending_calls(self):
with self.lock:
self.num_pending_calls -= 1
if not self.num_pending_calls:
self.event.set()
def add_result(self, future):
super().add_result(future)
self._decrement_pending_calls()
def add_exception(self, future):
super().add_exception(future)
if self.stop_on_exception:
self.event.set()
else:
self._decrement_pending_calls()
def add_cancelled(self, future):
super().add_cancelled(future)
self._decrement_pending_calls()
class _AcquireFutures(object):
"""A context manager that does an ordered acquire of Future conditions."""
def __init__(self, futures):
self.futures = sorted(futures, key=id)
def __enter__(self):
for future in self.futures:
future._condition.acquire()
def __exit__(self, *args):
for future in self.futures:
future._condition.release()
def _create_and_install_waiters(fs, return_when):
if return_when == _AS_COMPLETED:
waiter = _AsCompletedWaiter()
elif return_when == FIRST_COMPLETED:
waiter = _FirstCompletedWaiter()
else:
pending_count = sum(
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
if return_when == FIRST_EXCEPTION:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
elif return_when == ALL_COMPLETED:
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
else:
raise ValueError("Invalid return condition: %r" % return_when)
for f in fs:
f._waiters.append(waiter)
return waiter
def as_completed(fs, timeout=None):
"""An iterator over the given futures that yields each as it completes.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
iterate over.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator that yields the given Futures as they complete (finished or
cancelled). If any given Futures are duplicated, they will be returned
once.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = set(fs)
with _AcquireFutures(fs):
finished = set(
f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
pending = fs - finished
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
try:
yield from finished
while pending:
if timeout is None:
wait_timeout = None
else:
wait_timeout = end_time - time.time()
if wait_timeout < 0:
raise TimeoutError(
'%d (of %d) futures unfinished' % (
len(pending), len(fs)))
waiter.event.wait(wait_timeout)
with waiter.lock:
finished = waiter.finished_futures
waiter.finished_futures = []
waiter.event.clear()
for future in finished:
yield future
pending.remove(future)
finally:
for f in fs:
with f._condition:
f._waiters.remove(waiter)
DoneAndNotDoneFutures = collections.namedtuple(
'DoneAndNotDoneFutures', 'done not_done')
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
"""Wait for the futures in the given sequence to complete.
Args:
fs: The sequence of Futures (possibly created by different Executors) to
wait upon.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
return_when: Indicates when this function should return. The options
are:
FIRST_COMPLETED - Return when any future finishes or is
cancelled.
FIRST_EXCEPTION - Return when any future finishes by raising an
exception. If no future raises an exception
then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED - Return when all futures finish or are cancelled.
Returns:
A named 2-tuple of sets. The first set, named 'done', contains the
futures that completed (is finished or cancelled) before the wait
completed. The second set, named 'not_done', contains uncompleted
futures.
"""
with _AcquireFutures(fs):
done = set(f for f in fs
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
not_done = set(fs) - done
if (return_when == FIRST_COMPLETED) and done:
return DoneAndNotDoneFutures(done, not_done)
elif (return_when == FIRST_EXCEPTION) and done:
if any(f for f in done
if not f.cancelled() and f.exception() is not None):
return DoneAndNotDoneFutures(done, not_done)
if len(done) == len(fs):
return DoneAndNotDoneFutures(done, not_done)
waiter = _create_and_install_waiters(fs, return_when)
waiter.event.wait(timeout)
for f in fs:
with f._condition:
f._waiters.remove(waiter)
done.update(waiter.finished_futures)
return DoneAndNotDoneFutures(done, set(fs) - done)
class Future(object):
"""Represents the result of an asynchronous computation."""
def __init__(self):
"""Initializes the future. Should not be called by clients."""
self._condition = threading.Condition()
self._state = PENDING
self._result = None
self._exception = None
self._waiters = []
self._done_callbacks = []
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except Exception:
LOGGER.exception('exception calling callback for %r', self)
def __repr__(self):
with self._condition:
if self._state == FINISHED:
if self._exception:
return '<Future at %s state=%s raised %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._exception.__class__.__name__)
else:
return '<Future at %s state=%s returned %s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state],
self._result.__class__.__name__)
return '<Future at %s state=%s>' % (
hex(id(self)),
_STATE_TO_DESCRIPTION_MAP[self._state])
def cancel(self):
"""Cancel the future if possible.
Returns True if the future was cancelled, False otherwise. A future
cannot be cancelled if it is running or has already completed.
"""
with self._condition:
if self._state in [RUNNING, FINISHED]:
return False
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
return True
self._state = CANCELLED
self._condition.notify_all()
self._invoke_callbacks()
return True
def cancelled(self):
"""Return True if the future was cancelled."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
def running(self):
"""Return True if the future is currently executing."""
with self._condition:
return self._state == RUNNING
def done(self):
"""Return True of the future was cancelled or finished executing."""
with self._condition:
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
def __get_result(self):
if self._exception:
raise self._exception
else:
return self._result
def add_done_callback(self, fn):
"""Attaches a callable that will be called when the future finishes.
Args:
fn: A callable that will be called with this future as its only
argument when the future completes or is cancelled. The callable
will always be called by a thread in the same process in which
it was added. If the future has already completed or been
cancelled then the callable will be called immediately. These
callables are called in the order that they were added.
"""
with self._condition:
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
self._done_callbacks.append(fn)
return
fn(self)
def result(self, timeout=None):
"""Return the result of the call that the future represents.
Args:
timeout: The number of seconds to wait for the result if the future
isn't done. If None, then there is no limit on the wait time.
Returns:
The result of the call that the future represents.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
Exception: If the call raised then that exception will be raised.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self.__get_result()
else:
raise TimeoutError()
def exception(self, timeout=None):
"""Return the exception raised by the call that the future represents.
Args:
timeout: The number of seconds to wait for the exception if the
future isn't done. If None, then there is no limit on the wait
time.
Returns:
The exception raised by the call that the future represents or None
if the call completed without raising.
Raises:
CancelledError: If the future was cancelled.
TimeoutError: If the future didn't finish executing before the given
timeout.
"""
with self._condition:
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
self._condition.wait(timeout)
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
raise CancelledError()
elif self._state == FINISHED:
return self._exception
else:
raise TimeoutError()
# The following methods should only be used by Executors and in tests.
def set_running_or_notify_cancel(self):
"""Mark the future as running or process any cancel notifications.
Should only be used by Executor implementations and unit tests.
If the future has been cancelled (cancel() was called and returned
True) then any threads waiting on the future completing (though calls
to as_completed() or wait()) are notified and False is returned.
If the future was not cancelled then it is put in the running state
(future calls to running() will return True) and True is returned.
This method should be called by Executor implementations before
executing the work associated with this future. If this method returns
False then the work should not be executed.
Returns:
False if the Future was cancelled, True otherwise.
Raises:
RuntimeError: if this method was already called or if set_result()
or set_exception() was called.
"""
with self._condition:
if self._state == CANCELLED:
self._state = CANCELLED_AND_NOTIFIED
for waiter in self._waiters:
waiter.add_cancelled(self)
# self._condition.notify_all() is not necessary because
# self.cancel() triggers a notification.
return False
elif self._state == PENDING:
self._state = RUNNING
return True
else:
LOGGER.critical('Future %s in unexpected state: %s',
id(self),
self._state)
raise RuntimeError('Future in unexpected state')
def set_result(self, result):
"""Sets the return value of work associated with the future.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._result = result
self._state = FINISHED
for waiter in self._waiters:
waiter.add_result(self)
self._condition.notify_all()
self._invoke_callbacks()
def set_exception(self, exception):
"""Sets the result of the future as being the given exception.
Should only be used by Executor implementations and unit tests.
"""
with self._condition:
self._exception = exception
self._state = FINISHED
for waiter in self._waiters:
waiter.add_exception(self)
self._condition.notify_all()
self._invoke_callbacks()
class Executor(object):
"""This is an abstract base class for concrete asynchronous executors."""
def submit(self, fn, *args, **kwargs):
"""Submits a callable to be executed with the given arguments.
Schedules the callable to be executed as fn(*args, **kwargs) and returns
a Future instance representing the execution of the callable.
Returns:
A Future representing the given call.
"""
raise NotImplementedError()
def map(self, fn, *iterables, timeout=None):
"""Returns a iterator equivalent to map(fn, iter).
Args:
fn: A callable that will take as many arguments as there are
passed iterables.
timeout: The maximum number of seconds to wait. If None, then there
is no limit on the wait time.
Returns:
An iterator equivalent to: map(func, *iterables) but the calls may
be evaluated out-of-order.
Raises:
TimeoutError: If the entire result iterator could not be generated
before the given timeout.
Exception: If fn(*args) raises for any values.
"""
if timeout is not None:
end_time = timeout + time.time()
fs = [self.submit(fn, *args) for args in zip(*iterables)]
# Yield must be hidden in closure so that the futures are submitted
# before the first iterator value is required.
def result_iterator():
try:
for future in fs:
if timeout is None:
yield future.result()
else:
yield future.result(end_time - time.time())
finally:
for future in fs:
future.cancel()
return result_iterator()
def shutdown(self, wait=True):
"""Clean-up the resources associated with the Executor.
It is safe to call this method several times. Otherwise, no other
methods can be called after this one.
Args:
wait: If True then shutdown will not return until all running
futures have finished executing and the resources used by the
executor have been reclaimed.
"""
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown(wait=True)
return False
|
klahnakoski/cloc
|
refs/heads/master
|
cloc/util/vendor/dateutil/__init__.py
|
147
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard Python
datetime module.
"""
__author__ = "Tomi Pieviläinen <tomi.pievilainen@iki.fi>"
__license__ = "Simplified BSD"
__version__ = "2.1"
|
trondhindenes/ansible
|
refs/heads/devel
|
lib/ansible/plugins/lookup/csvfile.py
|
80
|
# (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: csvfile
author: Jan-Piet Mens (@jpmens) <jpmens(at)gmail.com>
version_added: "1.5"
short_description: read data from a TSV or CSV file
description:
- The csvfile lookup reads the contents of a file in CSV (comma-separated value) format.
The lookup looks for the row where the first column matches keyname, and returns the value in the second column, unless a different column is specified.
options:
col:
description: column to return (0 index).
default: "1"
default:
description: what to return if the value is not found in the file.
default: ''
delimiter:
description: field separator in the file, for a tab you can specify "TAB" or "t".
default: TAB
file:
description: name of the CSV/TSV file to open.
default: ansible.csv
encoding:
description: Encoding (character set) of the used CSV file.
default: utf-8
version_added: "2.1"
notes:
- The default is for TSV files (tab delimited) not CSV (comma delimited) ... yes the name is misleading.
"""
EXAMPLES = """
- name: Match 'Li' on the first column, return the second column (0 based index)
debug: msg="The atomic number of Lithium is {{ lookup('csvfile', 'Li file=elements.csv delimiter=,') }}"
- name: msg="Match 'Li' on the first column, but return the 3rd column (columns start counting after the match)"
debug: msg="The atomic mass of Lithium is {{ lookup('csvfile', 'Li file=elements.csv delimiter=, col=2') }}"
- name: Define Values From CSV File
set_fact:
loop_ip: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=1') }}"
int_ip: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=2') }}"
int_mask: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=3') }}"
int_name: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=4') }}"
local_as: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=5') }}"
neighbor_as: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=6') }}"
neigh_int_ip: "{{ lookup('csvfile', bgp_neighbor_ip +' file=bgp_neighbors.csv delimiter=, col=7') }}"
delegate_to: localhost
"""
RETURN = """
_raw:
description:
- value(s) stored in file column
"""
import codecs
import csv
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.six import PY2
from ansible.module_utils._text import to_bytes, to_native, to_text
from ansible.module_utils.common._collections_compat import MutableSequence
class CSVRecoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding='utf-8'):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def __next__(self):
return next(self.reader).encode("utf-8")
next = __next__ # For Python 2
class CSVReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds):
if PY2:
f = CSVRecoder(f, encoding)
else:
f = codecs.getreader(encoding)(f)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def __next__(self):
row = next(self.reader)
return [to_text(s) for s in row]
next = __next__ # For Python 2
def __iter__(self):
return self
class LookupModule(LookupBase):
def read_csv(self, filename, key, delimiter, encoding='utf-8', dflt=None, col=1):
try:
f = open(filename, 'rb')
creader = CSVReader(f, delimiter=to_native(delimiter), encoding=encoding)
for row in creader:
if len(row) and row[0] == key:
return row[int(col)]
except Exception as e:
raise AnsibleError("csvfile: %s" % to_native(e))
return dflt
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
params = term.split()
key = params[0]
paramvals = {
'col': "1", # column to return
'default': None,
'delimiter': "TAB",
'file': 'ansible.csv',
'encoding': 'utf-8',
}
# parameters specified?
try:
for param in params[1:]:
name, value = param.split('=')
if name not in paramvals:
raise AnsibleAssertionError('%s not in paramvals' % name)
paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
if paramvals['delimiter'] == 'TAB':
paramvals['delimiter'] = "\t"
lookupfile = self.find_file_in_search_path(variables, 'files', paramvals['file'])
var = self.read_csv(lookupfile, key, paramvals['delimiter'], paramvals['encoding'], paramvals['default'], paramvals['col'])
if var is not None:
if isinstance(var, MutableSequence):
for v in var:
ret.append(v)
else:
ret.append(var)
return ret
|
ecosoft-odoo/odoo
|
refs/heads/8.0
|
addons/procurement/__init__.py
|
374
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import procurement
import wizard
|
gentledevil/ansible
|
refs/heads/devel
|
lib/ansible/plugins/connections/funcd.py
|
140
|
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Based on chroot.py (c) 2013, Maykel Moya <mmoya@speedyrails.com>
# (c) 2013, Michael Scherer <misc@zarb.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# ---
# The func transport permit to use ansible over func. For people who have already setup
# func and that wish to play with ansible, this permit to move gradually to ansible
# without having to redo completely the setup of the network.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
HAVE_FUNC=False
try:
import func.overlord.client as fc
HAVE_FUNC=True
except ImportError:
pass
import os
from ansible.callbacks import vvv
from ansible import errors
import tempfile
import shutil
class Connection(object):
''' Func-based connections '''
def __init__(self, runner, host, port, *args, **kwargs):
self.runner = runner
self.host = host
self.has_pipelining = False
# port is unused, this go on func
self.port = port
def connect(self, port=None):
if not HAVE_FUNC:
raise errors.AnsibleError("func is not installed")
self.client = fc.Client(self.host)
return self
def exec_command(self, cmd, tmp_path, become_user=None, sudoable=False,
executable='/bin/sh', in_data=None):
''' run a command on the remote minion '''
if in_data:
raise errors.AnsibleError("Internal Error: this module does not support optimized module pipelining")
# totally ignores privlege escalation
vvv("EXEC %s" % (cmd), host=self.host)
p = self.client.command.run(cmd)[self.host]
return (p[0], '', p[1], p[2])
def _normalize_path(self, path, prefix):
if not path.startswith(os.path.sep):
path = os.path.join(os.path.sep, path)
normpath = os.path.normpath(path)
return os.path.join(prefix, normpath[1:])
def put_file(self, in_path, out_path):
''' transfer a file from local to remote '''
out_path = self._normalize_path(out_path, '/')
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
self.client.local.copyfile.send(in_path, out_path)
def fetch_file(self, in_path, out_path):
''' fetch a file from remote to local '''
in_path = self._normalize_path(in_path, '/')
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
# need to use a tmp dir due to difference of semantic for getfile
# ( who take a # directory as destination) and fetch_file, who
# take a file directly
tmpdir = tempfile.mkdtemp(prefix="func_ansible")
self.client.local.getfile.get(in_path, tmpdir)
shutil.move(os.path.join(tmpdir, self.host, os.path.basename(in_path)),
out_path)
shutil.rmtree(tmpdir)
def close(self):
''' terminate the connection; nothing to do here '''
pass
|
michalliu/OpenWrt-Firefly-Libraries
|
refs/heads/master
|
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/json/tests/test_default.py
|
145
|
from json.tests import PyTest, CTest
class TestDefault(object):
def test_default(self):
self.assertEqual(
self.dumps(type, default=repr),
self.dumps(repr(type)))
class TestPyDefault(TestDefault, PyTest): pass
class TestCDefault(TestDefault, CTest): pass
|
SerpentCS/odoo
|
refs/heads/8.0
|
openerp/report/render/simple.py
|
324
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import render
from cStringIO import StringIO
import xml.dom.minidom
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table
from reportlab.lib.units import mm
from reportlab.lib.pagesizes import A4
import reportlab.lib
import copy
class simple(render.render):
def _render(self):
self.result = StringIO()
parser = xml.dom.minidom.parseString(self.xml)
title = parser.documentElement.tagName
doc = SimpleDocTemplate(self.result, pagesize=A4, title=title,
author='Odoo, Fabien Pinckaers', leftmargin=10*mm, rightmargin=10*mm)
styles = reportlab.lib.styles.getSampleStyleSheet()
title_style = copy.deepcopy(styles["Heading1"])
title_style.alignment = reportlab.lib.enums.TA_CENTER
story = [ Paragraph(title, title_style) ]
style_level = {}
nodes = [ (parser.documentElement,0) ]
while len(nodes):
node = nodes.pop(0)
value = ''
n=len(node[0].childNodes)-1
while n>=0:
if node[0].childNodes[n].nodeType==3:
value += node[0].childNodes[n].nodeValue
else:
nodes.insert( 0, (node[0].childNodes[n], node[1]+1) )
n-=1
if not node[1] in style_level:
style = copy.deepcopy(styles["Normal"])
style.leftIndent=node[1]*6*mm
style.firstLineIndent=-3*mm
style_level[node[1]] = style
story.append( Paragraph('<b>%s</b>: %s' % (node[0].tagName, value), style_level[node[1]]))
doc.build(story)
return self.result.getvalue()
if __name__=='__main__':
s = simple()
s.xml = '''<test>
<author-list>
<author>
<name>Fabien Pinckaers</name>
<age>23</age>
</author>
<author>
<name>Michel Pinckaers</name>
<age>53</age>
</author>
No other
</author-list>
</test>'''
if s.render():
print s.get()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Jenselme/unlog
|
refs/heads/master
|
unlog/unlog.py
|
1
|
import sys
import os
import copy
try:
from config import Config
from filter import Filter
except ImportError:
from unlog.config import Config
from unlog.filter import Filter
class Unlog:
"""Filter the output of a command or a log file according to pattern passed
in the *args* argument or according to a config file.
"""
def __init__(self, args):
""" **PARAMETERS**
* *args* - an ArgumentParser object containing all the option. Look at
:py:mod:`unlog.main` for the list of opitons.
"""
self._args = args
self._check_args()
if args.start_pattern:
self._filter_from_args()
else:
self._filter_from_config()
def _check_args(self):
"""Verify that the arguments are coherent. Exit with error code 2 if
incoherences are fonud.
"""
if not self._args.files and not self._args.start_pattern \
and not self._args.use_config_section:
sys.stderr.write('You must give a file or a start pattern.\n')
sys.exit(2)
if (self._args.start_group_pattern and not self._args.end_group_pattern)\
or (not self._args.start_group_pattern and self._args.end_group_pattern):
sys.stderr.write('You must --start-group and --end-group.')
sys.exit(2)
def _filter_from_args(self):
"""Filter the files or stdin according to the patterns give by the
arguments provided on the command line.
"""
config = copy.copy(self._args.__dict__)
# Must not be passed to filter (unuseful)
del config['files']
# The following key are only used when processing from a config file
del config['config_file']
del config['use_config_section']
# The filter manipulates string in the proper encoding. No need to pass it.
del config['log_encoding']
self._output_filter = Filter(**config)
# If no files are provided, read from stdin
if self._args.files:
self._files = self._args.files
self.process_files()
else:
self.process_stdin()
def process_files(self):
"""Loop on each file given on the command line and process them.
"""
for file in self._files:
self.process_file(file, log_encoding=self._args.log_encoding)
def process_file(self, file_name, log_encoding='utf-8'):
"""Open file_name and process it with :py:meth:`unlog.filter.Filter.process_file`
"""
try:
with open(file_name, 'r', encoding=log_encoding) as file:
self._output_filter.process_file(file)
except IOError as e:
sys.stderr.write(str(e))
sys.stderr.write("\n")
def process_stdin(self):
"""Process each line on the stdin with
:py:meth:`unlog.filter.Filter.process_line`
"""
for line in iter(sys.stdin.readline, ''):
self._output_filter.process_line(line)
# We must print the stack when we reach the last line of stdin so that the
# errors located at the end are displayed.
self._output_filter.print_stack()
self._output_filter.send_mail()
def _filter_from_config(self):
"""Filter the files according to the patterns defined in the
configuration file.
"""
self._config = Config(self._args)
if self._args.files:
self.process_files_from_config()
else:
self._output_filter = self._config.get_filter()
self.process_stdin()
def process_files_from_config(self):
"""Loop over each file given on the command line and process them
according to the actions defined in the associated config file. The file
is then passed to :py:meth:`process_file_filter_from_config`.
"""
for file_name in self._args.files:
file_name = self._correct_path_input_file(file_name)
self.process_file_filter_from_config(file_name)
def _correct_path_input_file(self, file_name):
"""Expand the ~ variable and transform a relative path into an absolute
one.
"""
file_name = os.path.expanduser(file_name)
file_name = os.path.abspath(file_name)
return file_name
def process_file_filter_from_config(self, file_name):
"""Process the file_name with the filters defined in config with
:py:meth:`process_file`.
"""
self._output_filter = self._config.get_filter(file_name)
if self._output_filter:
if 'encoding' in self._config:
self.process_file(file_name, log_encoding=self._config['encoding'])
else:
self.process_file(file_name)
|
TheWardoctor/Wardoctors-repo
|
refs/heads/master
|
script.module.urlresolver/lib/urlresolver/plugins/vshareeu.py
|
2
|
"""
vshare.eu urlresolver plugin
Copyright (C) 2017 jsergio
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib2
import json
from lib import helpers
from urlresolver import common
from urlresolver.common import i18n
from urlresolver.resolver import UrlResolver, ResolverError
class VshareEuResolver(UrlResolver):
name = "vshare.eu"
domains = ['vshare.eu']
pattern = '(?://|\.)(vshare\.eu)/(?:embed-|)?([0-9a-zA-Z/]+)'
def __init__(self):
self.net = common.Net()
self.headers = {'User-Agent': common.SMU_USER_AGENT}
def get_media_url(self, host, media_id):
result = self.__check_auth(media_id)
if not result:
result = self.__auth_ip(media_id)
if result:
return helpers.pick_source(result.items()) + helpers.append_headers(self.headers)
raise ResolverError(i18n('no_ip_authorization'))
def __auth_ip(self, media_id):
header = i18n('vshareeu_auth_header')
line1 = i18n('auth_required')
line2 = i18n('visit_link')
line3 = i18n('click_pair') % ('http://vshare.eu/pair')
with common.kodi.CountdownDialog(header, line1, line2, line3) as cd:
return cd.start(self.__check_auth, [media_id])
def __check_auth(self, media_id):
common.logger.log('Checking Auth: %s' % (media_id))
url = 'http://vshare.eu/cgi-bin/index_dl.fcgi?op=pair&file_code=%s&check' % (media_id)
try:
js_result = json.loads(self.net.http_GET(url, headers=self.headers).content)
except ValueError:
raise ResolverError('Unusable Authorization Response')
except urllib2.HTTPError as e:
if e.code == 401:
js_result = json.loads(str(e.read()))
else:
raise
common.logger.log('Auth Result: %s' % (js_result))
if js_result.get('status') == 'true':
return js_result.get('response', {})
else:
return False
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
|
sonaht/ansible
|
refs/heads/devel
|
hacking/report.py
|
46
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""A tool to aggregate data about Ansible source and testing into a sqlite DB for reporting."""
from __future__ import (absolute_import, print_function)
import argparse
import os
import requests
import sqlite3
import sys
DATABASE_PATH = os.path.expanduser('~/.ansible/report.db')
BASE_PATH = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')) + '/'
ANSIBLE_PATH = os.path.join(BASE_PATH, 'lib')
ANSIBLE_TEST_PATH = os.path.join(BASE_PATH, 'test/runner')
if ANSIBLE_PATH not in sys.path:
sys.path.insert(0, ANSIBLE_PATH)
if ANSIBLE_TEST_PATH not in sys.path:
sys.path.insert(0, ANSIBLE_TEST_PATH)
from ansible.parsing.metadata import extract_metadata
from lib.target import walk_integration_targets
def main():
os.chdir(BASE_PATH)
args = parse_args()
args.func()
def parse_args():
try:
import argcomplete
except ImportError:
argcomplete = None
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(metavar='COMMAND')
subparsers.required = True # work-around for python 3 bug which makes subparsers optional
populate = subparsers.add_parser('populate',
help='populate report database')
populate.set_defaults(func=populate_database)
query = subparsers.add_parser('query',
help='query report database')
query.set_defaults(func=query_database)
if argcomplete:
argcomplete.autocomplete(parser)
args = parser.parse_args()
return args
def query_database():
if not os.path.exists(DATABASE_PATH):
exit('error: Database not found. Did you run `report.py populate` first?')
os.execvp('sqlite3', ('sqlite3', DATABASE_PATH))
def populate_database():
populate_modules()
populate_coverage()
populate_integration_targets()
def populate_modules():
module_dir = os.path.join(BASE_PATH, 'lib/ansible/modules/')
modules_rows = []
module_statuses_rows = []
for root, dir_names, file_names in os.walk(module_dir):
for file_name in file_names:
module, extension = os.path.splitext(file_name)
if module == '__init__' or extension != '.py':
continue
if module.startswith('_'):
module = module[1:]
namespace = os.path.join(root.replace(module_dir, '')).replace('/', '.')
path = os.path.join(root, file_name)
with open(path, 'rb') as module_fd:
module_data = module_fd.read()
result = extract_metadata(module_data=module_data)
metadata = result[0]
if not metadata:
if module == 'async_wrapper':
continue
raise Exception('no metadata for: %s' % path)
modules_rows.append(dict(
module=module,
namespace=namespace,
path=path.replace(BASE_PATH, ''),
supported_by=metadata['supported_by'],
))
for status in metadata['status']:
module_statuses_rows.append(dict(
module=module,
status=status,
))
populate_data(dict(
modules=dict(
rows=modules_rows,
schema=(
('module', 'TEXT'),
('namespace', 'TEXT'),
('path', 'TEXT'),
('supported_by', 'TEXT'),
)),
module_statuses=dict(
rows=module_statuses_rows,
schema=(
('module', 'TEXT'),
('status', 'TEXT'),
)),
))
def populate_coverage():
response = requests.get('https://codecov.io/api/gh/ansible/ansible/tree/devel/?src=extension')
data = response.json()
files = data['commit']['report']['files']
coverage_rows = []
for path, data in files.items():
report = data['t']
coverage_rows.append(dict(
path=path,
coverage=float(report['c']),
lines=report['n'],
hit=report['h'],
partial=report['p'],
missed=report['m'],
))
populate_data(dict(
coverage=dict(
rows=coverage_rows,
schema=(
('path', 'TEXT'),
('coverage', 'REAL'),
('lines', 'INTEGER'),
('hit', 'INTEGER'),
('partial', 'INTEGER'),
('missed', 'INTEGER'),
)),
))
def populate_integration_targets():
targets = list(walk_integration_targets())
integration_targets_rows = [dict(
target=target.name,
type=target.type,
path=target.path,
script_path=target.script_path,
) for target in targets]
integration_target_aliases_rows = [dict(
target=target.name,
alias=alias,
) for target in targets for alias in target.aliases]
integration_target_modules_rows = [dict(
target=target.name,
module=module,
) for target in targets for module in target.modules]
populate_data(dict(
integration_targets=dict(
rows=integration_targets_rows,
schema=(
('target', 'TEXT'),
('type', 'TEXT'),
('path', 'TEXT'),
('script_path', 'TEXT'),
)),
integration_target_aliases=dict(
rows=integration_target_aliases_rows,
schema=(
('target', 'TEXT'),
('alias', 'TEXT'),
)),
integration_target_modules=dict(
rows=integration_target_modules_rows,
schema=(
('target', 'TEXT'),
('module', 'TEXT'),
)),
))
def create_table(cursor, name, columns):
schema = ', '.join('%s %s' % column for column in columns)
cursor.execute('DROP TABLE IF EXISTS %s' % name)
cursor.execute('CREATE TABLE %s (%s)' % (name, schema))
def populate_table(cursor, rows, name, columns):
create_table(cursor, name, columns)
values = ', '.join([':%s' % column[0] for column in columns])
for row in rows:
cursor.execute('INSERT INTO %s VALUES (%s)' % (name, values), row)
def populate_data(data):
connection = sqlite3.connect(DATABASE_PATH)
cursor = connection.cursor()
for table in data:
populate_table(cursor, data[table]['rows'], table, data[table]['schema'])
connection.commit()
connection.close()
if __name__ == '__main__':
main()
|
qingpingguo/git-repo
|
refs/heads/master
|
subcmds/sync.py
|
48
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import netrc
from optparse import SUPPRESS_HELP
import os
import pickle
import re
import shutil
import socket
import subprocess
import sys
import time
import urlparse
import xmlrpclib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
from git_command import GIT
from git_refs import R_HEADS, HEAD
from main import WrapperModule
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand
from error import RepoChangedException, GitError
from project import SyncBuffer
from progress import Progress
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
pass
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
The -f/--force-broken option can be used to proceed with syncing
other projects if a project sync fails.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
SSH Connections
---------------
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
Compatibility
~~~~~~~~~~~~~
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
def _Options(self, p, show_smart=True):
self.jobs = self.manifest.default.sync_j
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help="continue sync even if a project fails to sync")
p.add_option('-l','--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('-n','--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d','--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c','--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-q','--quiet',
dest='quiet', action='store_true',
help='be more quiet')
p.add_option('-j','--jobs',
dest='jobs', action='store', type='int',
help="projects to fetch simultaneously (default %d)" % self.jobs)
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--no-clone-bundle',
dest='no_clone_bundle', action='store_true',
help='disable use of /clone.bundle on HTTP/HTTPS')
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from a known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='no_repo_verify', action='store_true',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _FetchHelper(self, opt, project, lock, fetched, pm, sem, err_event):
"""Main function of the fetch threads when jobs are > 1.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
err_event: We'll set this event in the case of an error (after printing
out info about the error).
"""
# We'll set to true once we've locked the lock.
did_lock = False
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we call sem.release().
# - We always make sure we unlock the lock if we locked it.
try:
try:
start = time.time()
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
clone_bundle=not opt.no_clone_bundle)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
print >>sys.stderr, 'error: Cannot fetch %s' % project.name
if opt.force_broken:
print >>sys.stderr, 'warn: --force-broken, continuing to sync'
else:
raise _FetchError()
fetched.add(project.gitdir)
pm.update()
except _FetchError:
err_event.set()
except:
err_event.set()
raise
finally:
if did_lock:
lock.release()
sem.release()
def _Fetch(self, projects, opt):
fetched = set()
pm = Progress('Fetching projects', len(projects))
if self.jobs == 1:
for project in projects:
pm.update()
if project.Sync_NetworkHalf(
quiet=opt.quiet,
current_branch_only=opt.current_branch_only,
clone_bundle=not opt.no_clone_bundle):
fetched.add(project.gitdir)
else:
print >>sys.stderr, 'error: Cannot fetch %s' % project.name
if opt.force_broken:
print >>sys.stderr, 'warn: --force-broken, continuing to sync'
else:
sys.exit(1)
else:
threads = set()
lock = _threading.Lock()
sem = _threading.Semaphore(self.jobs)
err_event = _threading.Event()
for project in projects:
# Check for any errors before starting any new threads.
# ...we'll let existing threads finish, though.
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target = self._FetchHelper,
args = (opt,
project,
lock,
fetched,
pm,
sem,
err_event))
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.isSet():
print >>sys.stderr, '\nerror: Exited sync due to fetch errors'
sys.exit(1)
pm.end()
self._fetch_times.Save()
self._GCProjects(projects)
return fetched
def _GCProjects(self, projects):
if multiprocessing:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for project in projects:
project.bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count / jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
err_event = _threading.Event()
def GC(project):
try:
try:
project.bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except:
err_event.set()
raise
finally:
sem.release()
for project in projects:
if err_event.isSet():
break
sem.acquire()
t = _threading.Thread(target=GC, args=(project,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
if err_event.isSet():
print >>sys.stderr, '\nerror: Exited sync due to gc errors'
sys.exit(1)
def UpdateProjectList(self):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.manifest.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
fd = open(file_path, 'r')
try:
old_project_paths = fd.read().split('\n')
finally:
fd.close()
for path in old_project_paths:
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
if os.path.exists(self.manifest.topdir + '/' + path):
project = Project(
manifest = self.manifest,
name = path,
remote = RemoteSpec('origin'),
gitdir = os.path.join(self.manifest.topdir,
path, '.git'),
worktree = os.path.join(self.manifest.topdir, path),
relpath = path,
revisionExpr = 'HEAD',
revisionId = None,
groups = None)
if project.IsDirty():
print >>sys.stderr, 'error: Cannot remove project "%s": \
uncommitted changes are present' % project.relpath
print >>sys.stderr, ' commit changes, then run sync again'
return -1
else:
print >>sys.stderr, 'Deleting obsolete path %s' % project.worktree
shutil.rmtree(project.worktree)
# Try deleting parent subdirs if they are empty
project_dir = os.path.dirname(project.worktree)
while project_dir != self.manifest.topdir:
try:
os.rmdir(project_dir)
except OSError:
break
project_dir = os.path.dirname(project_dir)
new_project_paths.sort()
fd = open(file_path, 'w')
try:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
finally:
fd.close()
return 0
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) / 3)
if opt.network_only and opt.detach_head:
print >>sys.stderr, 'error: cannot combine -n and -d'
sys.exit(1)
if opt.network_only and opt.local_only:
print >>sys.stderr, 'error: cannot combine -n and -l'
sys.exit(1)
if opt.manifest_name and opt.smart_sync:
print >>sys.stderr, 'error: cannot combine -m and -s'
sys.exit(1)
if opt.manifest_name and opt.smart_tag:
print >>sys.stderr, 'error: cannot combine -m and -t'
sys.exit(1)
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
print >>sys.stderr, 'error: -u and -p may only be combined with ' \
'-s or -t'
sys.exit(1)
if None in [opt.manifest_server_username, opt.manifest_server_password]:
print >>sys.stderr, 'error: both -u and -p must be given'
sys.exit(1)
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
if opt.smart_sync or opt.smart_tag:
if not self.manifest.manifest_server:
print >>sys.stderr, \
'error: cannot smart sync: no manifest server defined in manifest'
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not '@' in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
print >>sys.stderr, '.netrc file does not exist or could not be opened'
else:
try:
parse_result = urlparse.urlparse(manifest_server)
if parse_result.hostname:
username, _account, password = \
info.authenticators(parse_result.hostname)
except TypeError:
# TypeError is raised when the given hostname is not present
# in the .netrc file.
print >>sys.stderr, 'No credentials found for %s in .netrc' % \
parse_result.hostname
except netrc.NetrcParseError as e:
print >>sys.stderr, 'Error parsing .netrc file: %s' % e
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
try:
server = xmlrpclib.Server(manifest_server)
if opt.smart_sync:
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
env = os.environ.copy()
if (env.has_key('TARGET_PRODUCT') and
env.has_key('TARGET_BUILD_VARIANT')):
target = '%s-%s' % (env['TARGET_PRODUCT'],
env['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = "smart_sync_override.xml"
manifest_path = os.path.join(self.manifest.manifestProject.worktree,
manifest_name)
try:
f = open(manifest_path, 'w')
try:
f.write(manifest_str)
finally:
f.close()
except IOError:
print >>sys.stderr, 'error: cannot write manifest to %s' % \
manifest_path
sys.exit(1)
self.manifest.Override(manifest_name)
else:
print >>sys.stderr, 'error: %s' % manifest_str
sys.exit(1)
except (socket.error, IOError, xmlrpclib.Fault) as e:
print >>sys.stderr, 'error: cannot connect to manifest server %s:\n%s' % (
self.manifest.manifest_server, e)
sys.exit(1)
except xmlrpclib.ProtocolError as e:
print >>sys.stderr, 'error: cannot connect to manifest server %s:\n%d %s' % (
self.manifest.manifest_server, e.errcode, e.errmsg)
sys.exit(1)
rp = self.manifest.repoProject
rp.PreSync()
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.local_only:
mp.Sync_NetworkHalf(quiet=opt.quiet,
current_branch_only=opt.current_branch_only)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
mp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
self.manifest._Unload()
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
all_projects = self.GetProjects(args, missing_ok=True)
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
if _ONE_DAY_S <= (now - rp.LastFetch):
to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
self._Fetch(to_fetch, opt)
_PostRepoFetch(rp, opt.no_repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
return
if self.manifest.IsMirror:
# bail out now, we have no working tree
return
if self.UpdateProjectList():
sys.exit(1)
syncbuf = SyncBuffer(mp.config,
detach_head = opt.detach_head)
pm = Progress('Syncing work tree', len(all_projects))
for project in all_projects:
pm.update()
if project.worktree:
project.Sync_LocalHalf(syncbuf)
pm.end()
print >>sys.stderr
if not syncbuf.Finish():
sys.exit(1)
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print self.manifest.notice
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = WrapperModule()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects.values():
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, no_repo_verify=False, verbose=False):
if rp.HasChanges:
print >>sys.stderr, 'info: A new version of repo is available'
print >>sys.stderr, ''
if no_repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print >>sys.stderr, 'info: Restarting repo with latest version'
raise RepoChangedException(['--repo-upgraded'])
else:
print >>sys.stderr, 'warning: Skipped upgrade to unverified version'
else:
if verbose:
print >>sys.stderr, 'repo version %s is current' % rp.work_git.describe(HEAD)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print >>sys.stderr,\
"""warning: GnuPG was not available during last "repo init"
warning: Cannot automatically authenticate repo."""
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print >>sys.stderr
print >>sys.stderr,\
"warning: project '%s' branch '%s' is not signed" \
% (project.name, rev)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir.encode()
env['GNUPGHOME'] = gpg_dir.encode()
cmd = [GIT, 'tag', '-v', cur]
proc = subprocess.Popen(cmd,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = env)
out = proc.stdout.read()
proc.stdout.close()
err = proc.stderr.read()
proc.stderr.close()
if proc.wait() != 0:
print >>sys.stderr
print >>sys.stderr, out
print >>sys.stderr, err
print >>sys.stderr
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repopickle_fetchtimes')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a*t) + ((1-a) * old)
def _Load(self):
if self._times is None:
try:
f = open(self._path)
except IOError:
self._times = {}
return self._times
try:
try:
self._times = pickle.load(f)
except:
try:
os.remove(self._path)
except OSError:
pass
self._times = {}
finally:
f.close()
return self._times
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
f = open(self._path, 'wb')
try:
pickle.dump(self._times, f)
except (IOError, OSError, pickle.PickleError):
try:
os.remove(self._path)
except OSError:
pass
finally:
f.close()
|
laurivosandi/certidude
|
refs/heads/master
|
certidude/errors.py
|
2
|
class RequestExists(Exception):
pass
class RequestDoesNotExist(Exception):
pass
class FatalError(Exception):
"""
Exception to be raised when user intervention is required
"""
pass
class DuplicateCommonNameError(FatalError):
pass
|
sadikovi/octohaven
|
refs/heads/master
|
src/job.py
|
1
|
#!/usr/bin/env python
#
# Copyright 2015 sadikovi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import utils, shlex
from flask import json
from sqlalchemy import asc, desc, and_, or_
from types import LongType, DictType, ListType
from octohaven import db, api
from sparkmodule import SPARK_OCTOHAVEN_JOB_ID
class Job(db.Model):
uid = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(255), nullable=False)
status = db.Column(db.String(30), nullable=False)
createtime = db.Column(db.BigInteger, nullable=False)
submittime = db.Column(db.BigInteger, nullable=False)
starttime = db.Column(db.BigInteger)
finishtime = db.Column(db.BigInteger)
priority = db.Column(db.BigInteger, nullable=False)
# Spark job options
sparkappid = db.Column(db.String(255))
entrypoint = db.Column(db.String(1024), nullable=False)
jar = db.Column(db.String(1024), nullable=False)
options = db.Column(db.String(2000), nullable=False)
jobconf = db.Column(db.String(2000), nullable=False)
# List of statuses available
READY = "READY"
DELAYED = "DELAYED"
RUNNING = "RUNNING"
FINISHED = "FINISHED"
CLOSED = "CLOSED"
STATUSES = [READY, DELAYED, RUNNING, FINISHED, CLOSED]
def __init__(self, name, status, priority, createtime, submittime, entrypoint, jar, dmemory,
ememory, options, jobconf):
# Canonicalize name
self.name = utils.getCanonicalName(name)
# Make sure that timestamps are longs
utils.assertInstance(createtime, LongType)
if not createtime > 0:
raise StandardError("Create time must be > 0, got %s" % createtime)
self.createtime = createtime
utils.assertInstance(submittime, LongType)
if not submittime > 0:
raise StandardError("Create time must be > 0, got %s" % submittime)
self.submittime = submittime
# Check status
if status not in self.STATUSES:
raise StandardError("Unrecognized status '%s'" % status)
self.status = status
# Validate priority for the job
self.priority = utils.validatePriority(priority)
# Parse Spark options into key-value pairs
parsedOptions = options if isinstance(options, DictType) else {}
if not parsedOptions:
cli = filter(lambda x: len(x) == 2, [x.split("=", 1) for x in shlex.split(str(options))])
for pre in cli:
parsedOptions[pre[0]] = pre[1]
# Manually set driver or executor memory takes precedence over Spark options
parsedOptions["spark.driver.memory"] = utils.validateMemory(dmemory)
parsedOptions["spark.executor.memory"] = utils.validateMemory(ememory)
# For storing in database options must be a string
self.options = json.dumps(parsedOptions)
# Parse job configuration/options into list of values
parsedJobConf = jobconf if isinstance(jobconf, ListType) else shlex.split(str(jobconf))
self.jobconf = json.dumps(parsedJobConf)
# Entrypoint for the Spark job
self.entrypoint = utils.validateEntrypoint(entrypoint)
# Jar file path
self.jar = utils.validateJarPath(jar)
# Properties with None default values (methods provided to set them)
self.sparkappid = None
self.starttime = None
self.finishtime = None
# Return deep copy of the job, note that this instance is not persistent in database
def jobCopy(self, name, status, priority, createtime, submittime):
# Create dummy job, we overwrite some parameters later, we also have to specify dummy
# memory for driver and executors to pass validation. Eventually we just reassign the
# same options from current job
deepCopy = Job(name=name, status=status, priority=priority, createtime=createtime,
submittime=submittime, entrypoint=self.entrypoint, jar=self.jar, dmemory="1g",
ememory="1g", options={}, jobconf=[])
# options below are completely overwritten
deepCopy.options = self.options
deepCopy.jobconf = self.jobconf
# Options such as sparkappid, starttime, and finishtime will be set to None automatically
return deepCopy
def setAppId(self, appId):
self.sparkappid = appId
# Return Spark options as dictionary
def getSparkOptions(self):
return json.loads(self.options)
# Return Job configuration/options as dictionary
def getJobConf(self):
return json.loads(self.jobconf)
def canClose(self):
return self.status == self.READY or self.status == self.DELAYED
@classmethod
@utils.sql
def create(cls, session, **opts):
# Resolve primary options
createtime = utils.currentTimeMillis()
# Resolve delay in seconds, if delay is negative it is reset to 0
delay = utils.intOrElse(opts["delay"] if "delay" in opts else 0, 0)
resolvedDelay = 0 if delay < 0 else delay
# Resolve status based on delay
status = cls.READY if resolvedDelay == 0 else cls.DELAYED
# Resolve submit time (when job will be added to the queue)
submittime = createtime + resolvedDelay * 1000
# Resolve status based on submittime (if submit time more than 1 second greater than create
# time, we mark it as delayed, otherwise it is ready
status = cls.DELAYED if submittime > createtime + 1000 else cls.READY
# Resolve priority, if status is READY then priority is submittime else truncated
# submittime, so delayed job can be scheduled as soon as possible once it is queued
priority = submittime if status == cls.READY else submittime / 1000L
# Create (including validation and options parsing) Job instance
job = cls(name=opts["name"], status=status, priority=priority,
createtime=createtime, submittime=submittime,
entrypoint=opts["entrypoint"], jar=opts["jar"],
dmemory=opts["dmemory"], ememory=opts["ememory"],
options=opts["options"], jobconf=opts["jobconf"])
session.add(job)
session.commit()
return job
@classmethod
@utils.sql
def get(cls, session, uid):
return session.query(cls).get(uid)
@classmethod
@utils.sql
def list(cls, session, status, limit=100):
query = session.query(cls)
if status in cls.STATUSES:
query = query.filter_by(status = status)
ordered = query.order_by(desc(cls.createtime))
# If limit is negative, return all records
return ordered.limit(limit).all() if limit > 0 else ordered.all()
# This method is used to fetch jobs for job scheduler. We look for any jobs that are ready to
# run, also fetching delayed jobs that are before time specified. `limit` allows to
# fetch jobs with size of the queue, and `delayedTime` (which is most of the time is now())
# is a upper bound on submit time. We also sort by priority in ascending order, since the
# higher priority has lower value.
# If `limit` is negative, we do not apply limit
@classmethod
@utils.sql
def listRunnable(cls, session, limit, delayedTime=utils.currentTimeMillis()):
query = session.query(cls)
filtered = query.filter(or_(cls.status == cls.READY,
and_(cls.status == cls.DELAYED, cls.submittime <= delayedTime)))
ordered = filtered.order_by(asc(cls.priority))
if limit == 0:
return []
return ordered.limit(limit).all() if limit > 0 else ordered.all()
@classmethod
@utils.sql
def listRunning(cls, session):
# For running jobs ordering does not matter, and we return all records in database
return cls.list(session, cls.RUNNING, limit=-1)
@classmethod
@utils.sql
def close(cls, session, job):
if not job.canClose():
raise StandardError("Cannot close job")
job.status = cls.CLOSED
session.commit()
# Method to register job as finished by updating status and finish time
@classmethod
@utils.sql
def finish(cls, session, job):
if job.status != cls.RUNNING:
raise StandardError("Cannot finish not running job")
job.status = cls.FINISHED
job.finishtime = utils.currentTimeMillis()
session.commit()
# Method to register job as running by updating status and start time
@classmethod
@utils.sql
def run(cls, session, job):
if job.status != cls.READY and job.status != cls.DELAYED:
raise StandardError("Job must be READY or DELAYED to run")
job.status = cls.RUNNING
job.starttime = utils.currentTimeMillis()
session.commit()
def json(self):
return {
"uid": self.uid,
"name": self.name,
"status": self.status,
"createtime": self.createtime,
"submittime": self.submittime,
"starttime": self.starttime,
"finishtime": self.finishtime,
"priority": self.priority,
"sparkappid": self.sparkappid,
"entrypoint": self.entrypoint,
"jar": self.jar,
"options": self.getSparkOptions(),
"jobconf": self.getJobConf(),
"html_url": "/job/%s" % self.uid,
"create_timetable_html_url": "/create/timetable/job/%s" % self.uid,
"view_stdout_html_url": "/job/%s/stdout" % self.uid,
"view_stderr_html_url": "/job/%s/stderr" % self.uid,
"stdout_url": api("/job/log/%s/stdout/page/1" % self.uid),
"stderr_url": api("/job/log/%s/stderr/page/1" % self.uid),
"url": api("/job/get/%s" % self.uid),
"close_url": api("/job/close/%s" % self.uid) if self.canClose() else None
}
# Return shell command to execute as a list of arguments
# Method allows to pass extra Spark options and additional job arguments to command line. These
# options are transient, therefore are not saved for each job.
def execCommand(self, sparkContext, extraArguments=[], extraSparkOptions={}):
# `spark-submit --master sparkurl --conf "" --conf "" --class entrypoint jar`
sparkSubmit = [sparkContext.getSparkSubmit()]
# Note that name can be overwritten in Spark job itself, so when this job name will be
# shown in Octohaven UI, Spark UI might display different name
name = ["--name", "%s" % self.name]
master = ["--master", "%s" % sparkContext.getMasterAddress()]
# Update options with `additionalOptions` argument
confOptions = self.getSparkOptions().copy()
confOptions.update(extraSparkOptions)
# We also append octohaven job id to the spark-submit, so we can later assign Spark app id,
# and find our job in processes
confOptions.update({SPARK_OCTOHAVEN_JOB_ID: self.uid})
# Create list of conf options, ready to be used in cmd, flatten conf
conf = [["--conf", "%s=%s" % (key, value)] for key, value in confOptions.items()]
conf = [num for elem in conf for num in elem]
entrypoint = ["--class", "%s" % self.entrypoint]
jar = ["%s" % self.jar]
# Create list of job arguments, also append passed extra arguments
jobConf = self.getJobConf() + extraArguments
jobconf = ["%s" % elem for elem in jobConf]
# Construct exec command for shell
cmd = sparkSubmit + name + master + conf + entrypoint + jar + jobconf
return cmd
|
Peddle/hue
|
refs/heads/master
|
desktop/core/ext-py/boto-2.38.0/boto/ec2/elb/policies.py
|
152
|
# Copyright (c) 2010 Reza Lotun http://reza.lotun.name
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.resultset import ResultSet
class AppCookieStickinessPolicy(object):
def __init__(self, connection=None):
self.cookie_name = None
self.policy_name = None
def __repr__(self):
return 'AppCookieStickiness(%s, %s)' % (self.policy_name,
self.cookie_name)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'CookieName':
self.cookie_name = value
elif name == 'PolicyName':
self.policy_name = value
class LBCookieStickinessPolicy(object):
def __init__(self, connection=None):
self.policy_name = None
self.cookie_expiration_period = None
def __repr__(self):
return 'LBCookieStickiness(%s, %s)' % (self.policy_name,
self.cookie_expiration_period)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'CookieExpirationPeriod':
self.cookie_expiration_period = value
elif name == 'PolicyName':
self.policy_name = value
class OtherPolicy(object):
def __init__(self, connection=None):
self.policy_name = None
def __repr__(self):
return 'OtherPolicy(%s)' % (self.policy_name)
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
self.policy_name = value
class Policies(object):
"""
ELB Policies
"""
def __init__(self, connection=None):
self.connection = connection
self.app_cookie_stickiness_policies = None
self.lb_cookie_stickiness_policies = None
self.other_policies = None
def __repr__(self):
app = 'AppCookieStickiness%s' % self.app_cookie_stickiness_policies
lb = 'LBCookieStickiness%s' % self.lb_cookie_stickiness_policies
other = 'Other%s' % self.other_policies
return 'Policies(%s,%s,%s)' % (app, lb, other)
def startElement(self, name, attrs, connection):
if name == 'AppCookieStickinessPolicies':
rs = ResultSet([('member', AppCookieStickinessPolicy)])
self.app_cookie_stickiness_policies = rs
return rs
elif name == 'LBCookieStickinessPolicies':
rs = ResultSet([('member', LBCookieStickinessPolicy)])
self.lb_cookie_stickiness_policies = rs
return rs
elif name == 'OtherPolicies':
rs = ResultSet([('member', OtherPolicy)])
self.other_policies = rs
return rs
def endElement(self, name, value, connection):
return
|
festeh/BuildingMachineLearningSystemsWithPython
|
refs/heads/master
|
ch12/chapter.py
|
20
|
from jug import TaskGenerator
from glob import glob
import mahotas as mh
@TaskGenerator
def compute_texture(im):
from features import texture
imc = mh.imread(im)
return texture(mh.colors.rgb2gray(imc))
@TaskGenerator
def chist_file(fname):
from features import chist
im = mh.imread(fname)
return chist(im)
import numpy as np
to_array = TaskGenerator(np.array)
hstack = TaskGenerator(np.hstack)
haralicks = []
chists = []
labels = []
# Change this variable to point to
# the location of the dataset is on disk
basedir = '../SimpleImageDataset/'
# Use glob to get all the images
images = glob('{}/*.jpg'.format(basedir))
for fname in sorted(images):
haralicks.append(compute_texture(fname))
chists.append(chist_file(fname))
# The class is encoded in the filename as xxxx00.jpg
labels.append(fname[:-len('00.jpg')])
haralicks = to_array(haralicks)
chists = to_array(chists)
labels = to_array(labels)
@TaskGenerator
def accuracy(features, labels):
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn import cross_validation
clf = Pipeline([('preproc', StandardScaler()),
('classifier', LogisticRegression())])
cv = cross_validation.LeaveOneOut(len(features))
scores = cross_validation.cross_val_score(
clf, features, labels, cv=cv)
return scores.mean()
scores_base = accuracy(haralicks, labels)
scores_chist = accuracy(chists, labels)
combined = hstack([chists, haralicks])
scores_combined = accuracy(combined, labels)
@TaskGenerator
def print_results(scores):
with open('results.image.txt', 'w') as output:
for k,v in scores:
output.write('Accuracy [{}]: {:.1%}\n'.format(
k, v.mean()))
print_results([
('base', scores_base),
('chists', scores_chist),
('combined' , scores_combined),
])
@TaskGenerator
def compute_lbp(fname):
from mahotas.features import lbp
imc = mh.imread(fname)
im = mh.colors.rgb2grey(imc)
return lbp(im, radius=8, points=6)
lbps = []
for fname in sorted(images):
# the rest of the loop as before
lbps.append(compute_lbp(fname))
lbps = to_array(lbps)
scores_lbps = accuracy(lbps, labels)
combined_all = hstack([chists, haralicks, lbps])
scores_combined_all = accuracy(combined_all, labels)
print_results([
('base', scores_base),
('chists', scores_chist),
('lbps', scores_lbps),
('combined' , scores_combined),
('combined_all' , scores_combined_all),
])
|
levkar/odoo-addons
|
refs/heads/8.0
|
stock_remit/sale.py
|
1
|
# -*- coding: utf-8 -*-
from openerp import models
class sale_order(models.Model):
_inherit = "sale.order"
# def _prepare_order_picking(self, cr, uid, order, context=None):
# result = super(sale_order, self)._prepare_order_picking(
# cr, uid, order, context=context)
# if order.shop_id.warehouse_id and order.shop_id.warehouse_id.stock_journal_id:
# result.update(
# stock_journal_id=order.shop_id.warehouse_id.stock_journal_id.id)
# return result
|
saturn597/stem
|
refs/heads/master
|
test/unit/exit_policy/rule.py
|
7
|
"""
Unit tests for the stem.exit_policy.ExitPolicyRule class.
"""
import unittest
from stem.exit_policy import AddressType, ExitPolicyRule
class TestExitPolicyRule(unittest.TestCase):
def test_accept_or_reject(self):
self.assertTrue(ExitPolicyRule("accept *:*").is_accept)
self.assertFalse(ExitPolicyRule("reject *:*").is_accept)
invalid_inputs = (
"accept",
"reject",
"accept *:*",
"accept\t*:*",
"accept\n*:*",
"acceptt *:*",
"rejectt *:*",
"blarg *:*",
" *:*",
"*:*",
"",
)
for rule_arg in invalid_inputs:
self.assertRaises(ValueError, ExitPolicyRule, rule_arg)
def test_str_unchanged(self):
# provides a series of test inputs where the str() representation should
# match the input rule
test_inputs = (
"accept *:*",
"reject *:*",
"accept *:80",
"accept *:80-443",
"accept 127.0.0.1:80",
"accept 87.0.0.1/24:80",
"accept 156.5.38.3/255.255.0.255:80",
"accept [FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF]:80",
"accept [FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF]/32:80",
)
for rule_arg in test_inputs:
rule = ExitPolicyRule(rule_arg)
self.assertEquals(rule_arg, str(rule))
def test_str_changed(self):
# some instances where our rule is valid but won't match our str() representation
test_inputs = {
"accept 10.0.0.1/32:80": "accept 10.0.0.1:80",
"accept 192.168.0.1/255.255.255.0:80": "accept 192.168.0.1/24:80",
"accept [::]/32:*": "accept [0000:0000:0000:0000:0000:0000:0000:0000]/32:*",
"accept [::]/128:*": "accept [0000:0000:0000:0000:0000:0000:0000:0000]:*",
}
for rule_arg, expected_str in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
self.assertEquals(expected_str, str(rule))
def test_valid_wildcard(self):
test_inputs = {
"reject *:*": (True, True),
"reject *:80": (True, False),
"accept 192.168.0.1:*": (False, True),
"accept 192.168.0.1:80": (False, False),
"reject 127.0.0.1/0:*": (True, True),
"reject 127.0.0.1/0.0.0.0:*": (True, True),
"reject 127.0.0.1/16:*": (False, True),
"reject 127.0.0.1/32:*": (False, True),
"reject [0000:0000:0000:0000:0000:0000:0000:0000]/0:80": (True, False),
"reject [0000:0000:0000:0000:0000:0000:0000:0000]/64:80": (False, False),
"reject [0000:0000:0000:0000:0000:0000:0000:0000]/128:80": (False, False),
"accept 192.168.0.1:0-65535": (False, True),
"accept 192.168.0.1:1-65535": (False, True),
"accept 192.168.0.1:2-65535": (False, False),
"accept 192.168.0.1:1-65534": (False, False),
}
for rule_arg, attr in test_inputs.items():
is_address_wildcard, is_port_wildcard = attr
rule = ExitPolicyRule(rule_arg)
self.assertEquals(is_address_wildcard, rule.is_address_wildcard())
self.assertEquals(is_port_wildcard, rule.is_port_wildcard())
# check that when appropriate a /0 is reported as *not* being a wildcard
rule = ExitPolicyRule("reject 127.0.0.1/0:*")
rule._submask_wildcard = False
self.assertEquals(False, rule.is_address_wildcard())
rule = ExitPolicyRule("reject [0000:0000:0000:0000:0000:0000:0000:0000]/0:80")
rule._submask_wildcard = False
self.assertEquals(False, rule.is_address_wildcard())
def test_invalid_wildcard(self):
test_inputs = (
"reject */16:*",
"reject 127.0.0.1/*:*",
"reject *:0-*",
"reject *:*-15",
)
for rule_arg in test_inputs:
self.assertRaises(ValueError, ExitPolicyRule, rule_arg)
def test_wildcard_attributes(self):
rule = ExitPolicyRule("reject *:*")
self.assertEquals(AddressType.WILDCARD, rule.get_address_type())
self.assertEquals(None, rule.address)
self.assertEquals(None, rule.get_mask())
self.assertEquals(None, rule.get_masked_bits())
self.assertEquals(1, rule.min_port)
self.assertEquals(65535, rule.max_port)
def test_valid_ipv4_addresses(self):
test_inputs = {
"0.0.0.0": ("0.0.0.0", "255.255.255.255", 32),
"127.0.0.1/32": ("127.0.0.1", "255.255.255.255", 32),
"192.168.0.50/24": ("192.168.0.50", "255.255.255.0", 24),
"255.255.255.255/0": ("255.255.255.255", "0.0.0.0", 0),
}
for rule_addr, attr in test_inputs.items():
address, mask, masked_bits = attr
rule = ExitPolicyRule("accept %s:*" % rule_addr)
self.assertEquals(AddressType.IPv4, rule.get_address_type())
self.assertEquals(address, rule.address)
self.assertEquals(mask, rule.get_mask())
self.assertEquals(masked_bits, rule.get_masked_bits())
def test_invalid_ipv4_addresses(self):
test_inputs = (
"256.0.0.0",
"-1.0.0.0",
"0.0.0",
"0.0.0.",
"0.0.0.a",
"127.0.0.1/-1",
"127.0.0.1/33",
)
for rule_addr in test_inputs:
self.assertRaises(ValueError, ExitPolicyRule, "accept %s:*" % rule_addr)
def test_valid_ipv6_addresses(self):
test_inputs = {
"[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]":
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329",
"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF", 128),
"[FE80::0202:b3ff:fe1e:8329]":
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329",
"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF", 128),
"[0000:0000:0000:0000:0000:0000:0000:0000]/0":
("0000:0000:0000:0000:0000:0000:0000:0000",
"0000:0000:0000:0000:0000:0000:0000:0000", 0),
"[::]":
("0000:0000:0000:0000:0000:0000:0000:0000",
"FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF:FFFF", 128),
}
for rule_addr, attr in test_inputs.items():
address, mask, masked_bits = attr
rule = ExitPolicyRule("accept %s:*" % rule_addr)
self.assertEquals(AddressType.IPv6, rule.get_address_type())
self.assertEquals(address, rule.address)
self.assertEquals(mask, rule.get_mask())
self.assertEquals(masked_bits, rule.get_masked_bits())
def test_invalid_ipv6_addresses(self):
test_inputs = (
"fe80::0202:b3ff:fe1e:8329",
"[fe80::0202:b3ff:fe1e:8329",
"fe80::0202:b3ff:fe1e:8329]",
"[fe80::0202:b3ff:fe1e:832g]",
"[fe80:::b3ff:fe1e:8329]",
"[fe80::b3ff::fe1e:8329]",
"[fe80::0202:b3ff:fe1e:8329]/-1",
"[fe80::0202:b3ff:fe1e:8329]/129",
)
for rule_addr in test_inputs:
self.assertRaises(ValueError, ExitPolicyRule, "accept %s:*" % rule_addr)
def test_valid_ports(self):
test_inputs = {
"0": (0, 0),
"1": (1, 1),
"80": (80, 80),
"80-443": (80, 443),
}
for rule_port, attr in test_inputs.items():
min_port, max_port = attr
rule = ExitPolicyRule("accept 127.0.0.1:%s" % rule_port)
self.assertEquals(min_port, rule.min_port)
self.assertEquals(max_port, rule.max_port)
def test_invalid_ports(self):
test_inputs = (
"65536",
"a",
"5-3",
"5-",
"-3",
)
for rule_port in test_inputs:
self.assertRaises(ValueError, ExitPolicyRule, "accept 127.0.0.1:%s" % rule_port)
def test_is_match_wildcard(self):
test_inputs = {
"reject *:*": {
("192.168.0.1", 80): True,
("0.0.0.0", 80): True,
("255.255.255.255", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): True,
("[FE80:0000:0000:0000:0202:B3FF:FE1E:8329]", 80): True,
("192.168.0.1", None): True,
(None, 80, False): True,
(None, 80, True): True,
(None, None, False): True,
(None, None, True): True,
},
"reject 255.255.255.255/0:*": {
("192.168.0.1", 80): True,
("0.0.0.0", 80): True,
("255.255.255.255", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): False,
("[FE80:0000:0000:0000:0202:B3FF:FE1E:8329]", 80): False,
("192.168.0.1", None): True,
(None, 80, False): True,
(None, 80, True): False,
(None, None, False): True,
(None, None, True): False,
},
}
for rule_arg, matches in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
rule._submask_wildcard = False
for match_args, expected_result in matches.items():
self.assertEquals(expected_result, rule.is_match(*match_args))
# port zero is special in that exit policies can include it, but it's not
# something that we can match against
rule = ExitPolicyRule("reject *:*")
self.assertRaises(ValueError, rule.is_match, "127.0.0.1", 0)
def test_is_match_ipv4(self):
test_inputs = {
"reject 192.168.0.50:*": {
("192.168.0.50", 80): True,
("192.168.0.51", 80): False,
("192.168.0.49", 80): False,
(None, 80, False): True,
(None, 80, True): False,
("192.168.0.50", None): True,
},
"reject 0.0.0.0/24:*": {
("0.0.0.0", 80): True,
("0.0.0.1", 80): True,
("0.0.0.255", 80): True,
("0.0.1.0", 80): False,
("0.1.0.0", 80): False,
("1.0.0.0", 80): False,
(None, 80, False): True,
(None, 80, True): False,
("0.0.0.0", None): True,
},
}
for rule_arg, matches in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
for match_args, expected_result in matches.items():
self.assertEquals(expected_result, rule.is_match(*match_args))
def test_is_match_ipv6(self):
test_inputs = {
"reject [FE80:0000:0000:0000:0202:B3FF:FE1E:8329]:*": {
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): True,
("fe80:0000:0000:0000:0202:b3ff:fe1e:8329", 80): True,
("[FE80:0000:0000:0000:0202:B3FF:FE1E:8329]", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8330", 80): False,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8328", 80): False,
(None, 80, False): True,
(None, 80, True): False,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", None): True,
},
"reject [FE80:0000:0000:0000:0202:B3FF:FE1E:8329]/112:*": {
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:0000", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:FFFF", 80): True,
("FE80:0000:0000:0000:0202:B3FF:FE1F:8329", 80): False,
("FE81:0000:0000:0000:0202:B3FF:FE1E:8329", 80): False,
(None, 80, False): True,
(None, 80, True): False,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", None, False): True,
("FE80:0000:0000:0000:0202:B3FF:FE1E:8329", None, True): True,
},
}
for rule_arg, matches in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
for match_args, expected_result in matches.items():
self.assertEquals(expected_result, rule.is_match(*match_args))
def test_is_match_port(self):
test_inputs = {
"reject *:80": {
("192.168.0.50", 80): True,
("192.168.0.50", 81): False,
("192.168.0.50", 79): False,
(None, 80): True,
("192.168.0.50", None, False): True,
("192.168.0.50", None, True): False,
},
"reject *:80-85": {
("192.168.0.50", 79): False,
("192.168.0.50", 80): True,
("192.168.0.50", 83): True,
("192.168.0.50", 85): True,
("192.168.0.50", 86): False,
(None, 83): True,
("192.168.0.50", None, False): True,
("192.168.0.50", None, True): False,
},
}
for rule_arg, matches in test_inputs.items():
rule = ExitPolicyRule(rule_arg)
for match_args, expected_result in matches.items():
self.assertEquals(expected_result, rule.is_match(*match_args))
|
TigorC/zulip
|
refs/heads/master
|
zerver/views/webhooks/travis.py
|
28
|
# Webhooks for external integrations.
from __future__ import absolute_import
from django.http import HttpRequest, HttpResponse
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success
from zerver.lib.validator import check_dict, check_string
from zerver.models import UserProfile, Client
import ujson
@api_key_only_webhook_view('Travis')
@has_request_variables
def api_travis_webhook(request, user_profile, client,
stream=REQ(default='travis'),
topic=REQ(default=None),
message=REQ('payload', validator=check_dict([
('author_name', check_string),
('status_message', check_string),
('compare_url', check_string),
]))):
# type: (HttpRequest, UserProfile, Client, str, str, Dict[str, str]) -> HttpResponse
author = message['author_name']
message_type = message['status_message']
changes = message['compare_url']
good_status = ['Passed', 'Fixed']
bad_status = ['Failed', 'Broken', 'Still Failing']
emoji = ''
if message_type in good_status:
emoji = ':thumbsup:'
elif message_type in bad_status:
emoji = ':thumbsdown:'
else:
emoji = "(No emoji specified for status '%s'.)" % (message_type,)
build_url = message['build_url']
template = (
u'Author: %s\n'
u'Build status: %s %s\n'
u'Details: [changes](%s), [build log](%s)')
body = template % (author, message_type, emoji, changes, build_url)
check_send_message(user_profile, client, 'stream', [stream], topic, body)
return json_success()
|
fhaoquan/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/test/test_genexps.py
|
97
|
doctests = """
Test simple loop with conditional
>>> sum(i*i for i in range(100) if i&1 == 1)
166650
Test simple nesting
>>> list((i,j) for i in range(3) for j in range(4) )
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
Test nesting with the inner expression dependent on the outer
>>> list((i,j) for i in range(4) for j in range(i) )
[(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)]
Make sure the induction variable is not exposed
>>> i = 20
>>> sum(i*i for i in range(100))
328350
>>> i
20
Test first class
>>> g = (i*i for i in range(4))
>>> type(g)
<class 'generator'>
>>> list(g)
[0, 1, 4, 9]
Test direct calls to next()
>>> g = (i*i for i in range(3))
>>> next(g)
0
>>> next(g)
1
>>> next(g)
4
>>> next(g)
Traceback (most recent call last):
File "<pyshell#21>", line 1, in -toplevel-
next(g)
StopIteration
Does it stay stopped?
>>> next(g)
Traceback (most recent call last):
File "<pyshell#21>", line 1, in -toplevel-
next(g)
StopIteration
>>> list(g)
[]
Test running gen when defining function is out of scope
>>> def f(n):
... return (i*i for i in range(n))
>>> list(f(10))
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> def f(n):
... return ((i,j) for i in range(3) for j in range(n))
>>> list(f(4))
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
>>> def f(n):
... return ((i,j) for i in range(3) for j in range(4) if j in range(n))
>>> list(f(4))
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
>>> list(f(2))
[(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
Verify that parenthesis are required in a statement
>>> def f(n):
... return i*i for i in range(n)
Traceback (most recent call last):
...
SyntaxError: invalid syntax
Verify that parenthesis are required when used as a keyword argument value
>>> dict(a = i for i in range(10))
Traceback (most recent call last):
...
SyntaxError: invalid syntax
Verify that parenthesis are required when used as a keyword argument value
>>> dict(a = (i for i in range(10))) #doctest: +ELLIPSIS
{'a': <generator object <genexpr> at ...>}
Verify early binding for the outermost for-expression
>>> x=10
>>> g = (i*i for i in range(x))
>>> x = 5
>>> list(g)
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
Verify that the outermost for-expression makes an immediate check
for iterability
>>> (i for i in 6)
Traceback (most recent call last):
File "<pyshell#4>", line 1, in -toplevel-
(i for i in 6)
TypeError: 'int' object is not iterable
Verify late binding for the outermost if-expression
>>> include = (2,4,6,8)
>>> g = (i*i for i in range(10) if i in include)
>>> include = (1,3,5,7,9)
>>> list(g)
[1, 9, 25, 49, 81]
Verify late binding for the innermost for-expression
>>> g = ((i,j) for i in range(3) for j in range(x))
>>> x = 4
>>> list(g)
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
Verify re-use of tuples (a side benefit of using genexps over listcomps)
>>> tupleids = list(map(id, ((i,i) for i in range(10))))
>>> int(max(tupleids) - min(tupleids))
0
Verify that syntax error's are raised for genexps used as lvalues
>>> (y for y in (1,2)) = 10
Traceback (most recent call last):
...
SyntaxError: can't assign to generator expression
>>> (y for y in (1,2)) += 10
Traceback (most recent call last):
...
SyntaxError: can't assign to generator expression
########### Tests borrowed from or inspired by test_generators.py ############
Make a generator that acts like range()
>>> yrange = lambda n: (i for i in range(n))
>>> list(yrange(10))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print("creator", next(r))
... return r
>>> def caller():
... r = creator()
... for i in r:
... print("caller", i)
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
>>> list(zrange(5))
[0, 1, 2, 3, 4]
Verify that a gen exp cannot be resumed while it is actively running:
>>> g = (next(me) for i in range(10))
>>> me = g
>>> next(me)
Traceback (most recent call last):
File "<pyshell#30>", line 1, in -toplevel-
next(me)
File "<pyshell#28>", line 1, in <generator expression>
g = (next(me) for i in range(10))
ValueError: generator already executing
Verify exception propagation
>>> g = (10 // i for i in (5, 0, 2))
>>> next(g)
2
>>> next(g)
Traceback (most recent call last):
File "<pyshell#37>", line 1, in -toplevel-
next(g)
File "<pyshell#35>", line 1, in <generator expression>
g = (10 // i for i in (5, 0, 2))
ZeroDivisionError: integer division or modulo by zero
>>> next(g)
Traceback (most recent call last):
File "<pyshell#38>", line 1, in -toplevel-
next(g)
StopIteration
Make sure that None is a valid return value
>>> list(None for i in range(10))
[None, None, None, None, None, None, None, None, None, None]
Check that generator attributes are present
>>> g = (i*i for i in range(3))
>>> expected = set(['gi_frame', 'gi_running'])
>>> set(attr for attr in dir(g) if not attr.startswith('__')) >= expected
True
>>> from test.support import HAVE_DOCSTRINGS
>>> print(g.__next__.__doc__ if HAVE_DOCSTRINGS else 'Implement next(self).')
Implement next(self).
>>> import types
>>> isinstance(g, types.GeneratorType)
True
Check the __iter__ slot is defined to return self
>>> iter(g) is g
True
Verify that the running flag is set properly
>>> g = (me.gi_running for i in (0,1))
>>> me = g
>>> me.gi_running
0
>>> next(me)
1
>>> me.gi_running
0
Verify that genexps are weakly referencable
>>> import weakref
>>> g = (i*i for i in range(4))
>>> wr = weakref.ref(g)
>>> wr() is g
True
>>> p = weakref.proxy(g)
>>> list(p)
[0, 1, 4, 9]
"""
import sys
# Trace function can throw off the tuple reuse test.
if hasattr(sys, 'gettrace') and sys.gettrace():
__test__ = {}
else:
__test__ = {'doctests' : doctests}
def test_main(verbose=None):
from test import support
from test import test_genexps
support.run_doctest(test_genexps, verbose)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_doctest(test_genexps, verbose)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
|
aiguofer/bokeh
|
refs/heads/master
|
bokeh/models/axes.py
|
2
|
''' Guide renderers for various kinds of axes that can be added to
Bokeh plots
'''
from __future__ import absolute_import
from ..core.has_props import abstract
from ..core.properties import Auto, Datetime, Either, Enum, Float, Include, Instance, Int, Override, String, Tuple
from ..core.property_mixins import LineProps, TextProps
from .formatters import BasicTickFormatter, CategoricalTickFormatter, DatetimeTickFormatter, LogTickFormatter, TickFormatter
from .renderers import GuideRenderer
from .tickers import Ticker, BasicTicker, LogTicker, CategoricalTicker, DatetimeTicker
@abstract
class Axis(GuideRenderer):
''' A base class that defines common properties for all axis types.
'''
bounds = Either(Auto, Tuple(Float, Float), Tuple(Datetime, Datetime), help="""
Bounds for the rendered axis. If unset, the axis will span the
entire plot in the given dimension.
""")
x_range_name = String('default', help="""
A particular (named) x-range to use for computing screen
locations when rendering an axis on the plot. If unset, use the
default x-range.
""")
y_range_name = String('default', help="""
A particular (named) y-range to use for computing screen
locations when rendering an axis on the plot. If unset, use the
default y-range.
""")
ticker = Instance(Ticker, help="""
A Ticker to use for computing locations of axis components.
""")
formatter = Instance(TickFormatter, help="""
A TickFormatter to use for formatting the visual appearance
of ticks.
""")
axis_label = String(default='', help="""
A text label for the axis, displayed parallel to the axis rule.
.. note::
LaTeX notation is not currently supported; please see
:bokeh-issue:`647` to track progress or contribute.
""")
axis_label_standoff = Int(default=5, help="""
The distance in pixels that the axis labels should be offset
from the tick labels.
""")
axis_label_props = Include(TextProps, help="""
The %s of the axis label.
""")
axis_label_text_font_size = Override(default={'value': "10pt"})
axis_label_text_font_style = Override(default="italic")
major_label_standoff = Int(default=5, help="""
The distance in pixels that the major tick labels should be
offset from the associated ticks.
""")
major_label_orientation = Either(Enum("horizontal", "vertical"), Float, help="""
What direction the major label text should be oriented. If a
number is supplied, the angle of the text is measured from horizontal.
""")
major_label_props = Include(TextProps, help="""
The %s of the major tick labels.
""")
major_label_text_align = Override(default="center")
major_label_text_baseline = Override(default="alphabetic")
major_label_text_font_size = Override(default={'value': "8pt"})
axis_props = Include(LineProps, help="""
The %s of the axis line.
""")
major_tick_props = Include(LineProps, help="""
The %s of the major ticks.
""")
major_tick_in = Int(default=2, help="""
The distance in pixels that major ticks should extend into the
main plot area.
""")
major_tick_out = Int(default=6, help="""
The distance in pixels that major ticks should extend out of the
main plot area.
""")
minor_tick_props = Include(LineProps, help="""
The %s of the minor ticks.
""")
minor_tick_in = Int(default=0, help="""
The distance in pixels that minor ticks should extend into the
main plot area.
""")
minor_tick_out = Int(default=4, help="""
The distance in pixels that major ticks should extend out of the
main plot area.
""")
@abstract
class ContinuousAxis(Axis):
''' A base class for all numeric, non-categorical axes types.
'''
pass
class LinearAxis(ContinuousAxis):
''' An axis that picks nice numbers for tick locations on a
linear scale. Configured with a ``BasicTickFormatter`` by default.
'''
ticker = Override(default=lambda: BasicTicker())
formatter = Override(default=lambda: BasicTickFormatter())
class LogAxis(ContinuousAxis):
''' An axis that picks nice numbers for tick locations on a
log scale. Configured with a ``LogTickFormatter`` by default.
'''
ticker = Override(default=lambda: LogTicker())
formatter = Override(default=lambda: LogTickFormatter())
class CategoricalAxis(Axis):
''' An axis that picks evenly spaced tick locations for a
collection of categories/factors.
'''
ticker = Override(default=lambda: CategoricalTicker())
formatter = Override(default=lambda: CategoricalTickFormatter())
class DatetimeAxis(LinearAxis):
''' An LinearAxis that picks nice numbers for tick locations on
a datetime scale. Configured with a ``DatetimeTickFormatter`` by
default.
'''
ticker = Override(default=lambda: DatetimeTicker())
formatter = Override(default=lambda: DatetimeTickFormatter())
|
PW-Sat2/PWSat2OBC
|
refs/heads/master
|
integration_tests/emulator/beacon_parser/experiment_telemetry_parser.py
|
1
|
from emulator.beacon_parser.units import enum
from experiment_type import ExperimentType, StartResult, IterationResult
from parser import CategoryParser
class ExperimentTelemetryParser(CategoryParser):
def __init__(self, reader, store):
CategoryParser.__init__(self, '09: Experiments', reader, store)
def get_bit_count(self):
return 4+8+8
def parse(self):
self.append('Current experiment code', 4, value_type=enum(ExperimentType))
self.append('Experiment Startup Result', 8, value_type=enum(StartResult))
self.append('Last Experiment Iteration Status', 8, value_type=enum(IterationResult))
|
snnn/tensorflow
|
refs/heads/master
|
tensorflow/contrib/boosted_trees/python/training/functions/gbdt_batch.py
|
4
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Training functions for Gradient boosted decision trees."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
from tensorflow.contrib import learn
from tensorflow.contrib import stateless
from tensorflow.contrib.boosted_trees.lib.learner.batch import categorical_split_handler
from tensorflow.contrib.boosted_trees.lib.learner.batch import ordinal_split_handler
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.python.ops import batch_ops_utils
from tensorflow.contrib.boosted_trees.python.ops import gen_model_ops
from tensorflow.contrib.boosted_trees.python.ops import model_ops
from tensorflow.contrib.boosted_trees.python.ops import prediction_ops
from tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops
from tensorflow.contrib.boosted_trees.python.ops import training_ops
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import summary
from tensorflow.python.training import device_setter
# Key names for prediction dict.
ENSEMBLE_STAMP = "ensemble_stamp"
PREDICTIONS = "predictions"
PARTITION_IDS = "partition_ids"
NUM_LAYERS_ATTEMPTED = "num_layers"
NUM_TREES_ATTEMPTED = "num_trees"
NUM_USED_HANDLERS = "num_used_handlers"
USED_HANDLERS_MASK = "used_handlers_mask"
LEAF_INDEX = "leaf_index"
_FEATURE_NAME_TEMPLATE = "%s_%d"
# Keys in Training state.
GBDTTrainingState = collections.namedtuple("GBDTTrainingState", [
"num_layer_examples", "num_layer_steps", "num_layers", "active_tree",
"active_layer", "continue_centering", "bias_stats_accumulator",
"steps_accumulator", "handlers"
])
def _get_column_by_index(tensor, indices):
"""Returns columns from a 2-D tensor by index."""
shape = array_ops.shape(tensor)
p_flat = array_ops.reshape(tensor, [-1])
i_flat = array_ops.reshape(
array_ops.reshape(math_ops.range(0, shape[0]) * shape[1], [-1, 1]) +
indices, [-1])
return array_ops.reshape(array_ops.gather(p_flat, i_flat), [shape[0], -1])
def _make_predictions_dict(stamp,
logits,
partition_ids,
ensemble_stats,
used_handlers,
leaf_index=None):
"""Returns predictions for the given logits and n_classes.
Args:
stamp: The ensemble stamp.
logits: A rank 2 `Tensor` with shape [batch_size, n_classes - 1]. that
contains predictions when no dropout was applied.
partition_ids: A rank 1 `Tensor` with shape [batch_size].
ensemble_stats: A TreeEnsembleStatsOp result tuple.
used_handlers: A TreeEnsembleUsedHandlerOp result tuple of an int and a
boolean mask.
leaf_index: A rank 2 `Tensor` with shape [batch_size, number of trees]. that
contains leaf id for each example prediction.
Returns:
A dict of predictions.
"""
result = {}
result[ENSEMBLE_STAMP] = stamp
result[PREDICTIONS] = logits
result[PARTITION_IDS] = partition_ids
result[NUM_LAYERS_ATTEMPTED] = ensemble_stats.attempted_layers
result[NUM_TREES_ATTEMPTED] = ensemble_stats.attempted_trees
result[NUM_USED_HANDLERS] = used_handlers.num_used_handlers
result[USED_HANDLERS_MASK] = used_handlers.used_handlers_mask
if leaf_index is not None:
result[LEAF_INDEX] = leaf_index
return result
class _OpRoundRobinStrategy(object):
"""Returns the next ps task index for placement via per-Op round-robin order.
This strategy works slightly better for the GBDT graph because of using
custom resources which vary significantly in compute cost.
"""
def __init__(self, ps_ops, num_tasks):
"""Create a new `_RoundRobinStrategy`.
Args:
ps_ops: List of Op types to place on PS.
num_tasks: Number of ps tasks to cycle among.
"""
next_task = 0
self._next_task_per_op = {}
for op in ps_ops:
self._next_task_per_op[op] = next_task
next_task = (next_task + 1) % num_tasks if num_tasks else 0
self._num_tasks = num_tasks
def __call__(self, op):
"""Choose a ps task index for the given `Operation`.
Args:
op: An `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Returns the next
index, in the range `[offset, offset + num_tasks)`.
Raises:
ValueError: If attempting to place non-PS Op.
"""
if op.type not in self._next_task_per_op:
raise ValueError("Unknown op type '%s' for placement:" % op.type)
task = self._next_task_per_op[op.type]
self._next_task_per_op[op.type] = ((task + 1) % self._num_tasks
if self._num_tasks else 0)
return task
def extract_features(features, feature_columns, use_core_columns):
"""Extracts columns from a dictionary of features.
Args:
features: `dict` of `Tensor` objects.
feature_columns: A list of feature_columns.
Returns:
Seven values:
- A list of all feature column names.
- A list of dense floats.
- A list of sparse float feature indices.
- A list of sparse float feature values.
- A list of sparse float feature shapes.
- A list of sparse int feature indices.
- A list of sparse int feature values.
- A list of sparse int feature shapes.
Raises:
ValueError: if features is not valid.
"""
if not features:
raise ValueError("Features dictionary must be specified.")
# Make a shallow copy of features to ensure downstream usage
# is unaffected by modifications in the model function.
features = copy.copy(features)
if feature_columns:
scope = "gbdt"
with variable_scope.variable_scope(scope):
feature_columns = list(feature_columns)
transformed_features = collections.OrderedDict()
for fc in feature_columns:
# pylint: disable=protected-access
if use_core_columns:
# pylint: disable=protected-access
tensor = fc_core._transform_features(features, [fc])[fc]
transformed_features[fc.name] = tensor
elif isinstance(fc, feature_column_lib._EmbeddingColumn):
# pylint: enable=protected-access
transformed_features[fc.name] = fc_core.input_layer(
features, [fc], weight_collections=[scope])
else:
result = feature_column_ops.transform_features(features, [fc])
if len(result) > 1:
raise ValueError("Unexpected number of output features")
transformed_features[fc.name] = result[list(result.keys())[0]]
features = transformed_features
dense_float_names = []
dense_floats = []
sparse_float_names = []
sparse_float_indices = []
sparse_float_values = []
sparse_float_shapes = []
sparse_int_names = []
sparse_int_indices = []
sparse_int_values = []
sparse_int_shapes = []
for key in sorted(features.keys()):
tensor = features[key]
# TODO(nponomareva): consider iterating over feature columns instead.
if isinstance(tensor, tuple):
# Weighted categorical feature.
categorical_tensor = tensor[0]
weight_tensor = tensor[1]
shape = categorical_tensor.dense_shape
indices = array_ops.concat([
array_ops.slice(categorical_tensor.indices, [0, 0], [-1, 1]),
array_ops.expand_dims(
math_ops.to_int64(categorical_tensor.values), -1)
], 1)
tensor = sparse_tensor.SparseTensor(
indices=indices, values=weight_tensor.values, dense_shape=shape)
if isinstance(tensor, sparse_tensor.SparseTensor):
if tensor.values.dtype == dtypes.float32:
sparse_float_names.append(key)
sparse_float_indices.append(tensor.indices)
sparse_float_values.append(tensor.values)
sparse_float_shapes.append(tensor.dense_shape)
elif tensor.values.dtype == dtypes.int64:
sparse_int_names.append(key)
sparse_int_indices.append(tensor.indices)
sparse_int_values.append(tensor.values)
sparse_int_shapes.append(tensor.dense_shape)
else:
raise ValueError("Unsupported sparse feature %s with dtype %s." %
(tensor.indices.name, tensor.dtype))
else:
if tensor.dtype == dtypes.float32:
if len(tensor.shape) > 1 and tensor.shape[1] > 1:
unstacked = array_ops.unstack(tensor, axis=1)
for i in range(len(unstacked)):
dense_float_names.append(_FEATURE_NAME_TEMPLATE % (key, i))
dense_floats.append(array_ops.reshape(unstacked[i], [-1, 1]))
else:
dense_float_names.append(key)
dense_floats.append(tensor)
else:
raise ValueError("Unsupported dense feature %s with dtype %s." %
(tensor.name, tensor.dtype))
# Feature columns are logically organized into incrementing slots starting
# from dense floats, then sparse floats then sparse ints.
fc_names = (dense_float_names + sparse_float_names + sparse_int_names)
return (fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices, sparse_int_values,
sparse_int_shapes)
def _dropout_params(mode, ensemble_stats):
"""Returns parameters relevant for dropout.
Args:
mode: Train/Eval/Infer
ensemble_stats: A TreeEnsembleStatsOp result tuple.
Returns:
Whether to apply dropout and a dropout seed.
"""
if mode == learn.ModeKeys.TRAIN:
# Do dropout only during training.
apply_dropout = True
seed = ensemble_stats.attempted_trees
else:
seed = -1
apply_dropout = False
return apply_dropout, seed
class GradientBoostedDecisionTreeModel(object):
"""A GBDT model function."""
def __init__(self,
is_chief,
num_ps_replicas,
ensemble_handle,
center_bias,
examples_per_layer,
learner_config,
features,
logits_dimension,
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS,
feature_columns=None,
use_core_columns=False,
output_leaf_index=False,
output_leaf_index_modes=None,
num_quantiles=100):
"""Construct a new GradientBoostedDecisionTreeModel function.
Args:
is_chief: Whether to build the chief graph.
num_ps_replicas: Number of parameter server replicas, can be 0.
ensemble_handle: A handle to the ensemble variable.
center_bias: Whether to center the bias before growing trees.
examples_per_layer: Number of examples to accumulate before growing a tree
layer. It can also be a function that computes the number of examples
based on the depth of the layer that's being built.
learner_config: A learner config.
features: `dict` of `Tensor` objects.
logits_dimension: An int, the dimension of logits.
loss_reduction: Either `SUM_OVER_NONZERO_WEIGHTS` (mean) or `SUM`.
feature_columns: A list of feature columns.
use_core_columns: A boolean specifying whether core feature columns are
used.
output_leaf_index: A boolean variable indicating whether to output leaf
index into predictions dictionary.
output_leaf_index_modes: A list of modes from (TRAIN, EVAL, INFER) which
dictates when leaf indices will be outputted. By default, leaf indices
are only outputted in INFER mode.
num_quantiles: Number of quantiles to build for numeric feature values.
Raises:
ValueError: if inputs are not valid.
"""
if ensemble_handle is None:
raise ValueError("ensemble_handle must be specified.")
if learner_config is None:
raise ValueError("learner_config must be specified.")
if learner_config.num_classes < 2:
raise ValueError("Number of classes must be >=2")
self._logits_dimension = logits_dimension
self._is_chief = is_chief
self._num_ps_replicas = num_ps_replicas
self._ensemble_handle = ensemble_handle
self._center_bias = center_bias
self._examples_per_layer = examples_per_layer
# Check loss reduction value.
if (loss_reduction != losses.Reduction.SUM and
loss_reduction != losses.Reduction.SUM_OVER_NONZERO_WEIGHTS):
raise ValueError(
"Invalid loss reduction is provided: %s." % loss_reduction)
self._loss_reduction = loss_reduction
# Fill in the defaults.
if (learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.MULTI_CLASS_STRATEGY_UNSPECIFIED):
if logits_dimension == 1:
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.TREE_PER_CLASS)
else:
learner_config.multi_class_strategy = (
learner_pb2.LearnerConfig.DIAGONAL_HESSIAN)
if logits_dimension == 1 or learner_config.multi_class_strategy == (
learner_pb2.LearnerConfig.TREE_PER_CLASS):
self._gradient_shape = tensor_shape.scalar()
self._hessian_shape = tensor_shape.scalar()
else:
if center_bias:
raise ValueError("Center bias should be False for multiclass.")
self._gradient_shape = tensor_shape.TensorShape([logits_dimension])
if (learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.FULL_HESSIAN):
self._hessian_shape = tensor_shape.TensorShape(
([logits_dimension, logits_dimension]))
else:
# Diagonal hessian strategy.
self._hessian_shape = tensor_shape.TensorShape(([logits_dimension]))
if (learner_config.growing_mode ==
learner_pb2.LearnerConfig.GROWING_MODE_UNSPECIFIED):
learner_config.growing_mode = learner_pb2.LearnerConfig.LAYER_BY_LAYER
if (learner_config.pruning_mode ==
learner_pb2.LearnerConfig.PRUNING_MODE_UNSPECIFIED):
learner_config.pruning_mode = learner_pb2.LearnerConfig.POST_PRUNE
if learner_config.constraints.max_tree_depth == 0:
# Use 6 as the default maximum depth.
learner_config.constraints.max_tree_depth = 6
tuner = learner_config.learning_rate_tuner.WhichOneof("tuner")
if not tuner:
learner_config.learning_rate_tuner.fixed.learning_rate = 0.1
self._learner_config = learner_config
self._feature_columns = feature_columns
self._learner_config_serialized = learner_config.SerializeToString()
self._num_quantiles = num_quantiles
self._max_tree_depth = variables.VariableV1(
initial_value=self._learner_config.constraints.max_tree_depth)
self._attempted_trees = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
trainable=False,
name="attempted_trees")
self._finalized_trees = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
trainable=False,
name="finalized_trees")
if not features:
raise ValueError("Features dictionary must be specified.")
(fc_names, dense_floats, sparse_float_indices, sparse_float_values,
sparse_float_shapes, sparse_int_indices,
sparse_int_values, sparse_int_shapes) = extract_features(
features, self._feature_columns, use_core_columns)
logging.info("Active Feature Columns: " + str(fc_names))
logging.info("Learner config: " + str(learner_config))
self._fc_names = fc_names
self._dense_floats = dense_floats
self._sparse_float_indices = sparse_float_indices
self._sparse_float_values = sparse_float_values
self._sparse_float_shapes = sparse_float_shapes
self._sparse_int_indices = sparse_int_indices
self._sparse_int_values = sparse_int_values
self._sparse_int_shapes = sparse_int_shapes
self._reduce_dim = (
self._learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.TREE_PER_CLASS and
learner_config.num_classes == 2)
if output_leaf_index_modes is None:
output_leaf_index_modes = [learn.ModeKeys.INFER]
elif not all(
mode in (learn.ModeKeys.TRAIN, learn.ModeKeys.EVAL,
learn.ModeKeys.INFER) for mode in output_leaf_index_modes):
raise ValueError("output_leaf_index_modes should only contain ModeKeys.")
self._output_leaf_index = output_leaf_index
self._output_leaf_index_modes = output_leaf_index_modes
def _predict_and_return_dict(self, ensemble_handle, ensemble_stamp, mode):
"""Runs prediction and returns a dictionary of the prediction results.
Args:
ensemble_handle: ensemble resource handle.
ensemble_stamp: stamp of ensemble resource.
mode: learn.ModeKeys.TRAIN or EVAL or INFER.
Returns:
a dictionary of prediction results -
ENSEMBLE_STAMP, PREDICTION, PARTITION_IDS,
NUM_LAYER_ATTEMPTED, NUM_TREES_ATTEMPTED.
"""
ensemble_stats = training_ops.tree_ensemble_stats(ensemble_handle,
ensemble_stamp)
num_handlers = (
len(self._dense_floats) + len(self._sparse_float_shapes) + len(
self._sparse_int_shapes))
# Used during feature selection.
used_handlers = model_ops.tree_ensemble_used_handlers(
ensemble_handle, ensemble_stamp, num_all_handlers=num_handlers)
# We don't need dropout info - we can always restore it based on the
# seed.
apply_dropout, seed = _dropout_params(mode, ensemble_stats)
# Make sure ensemble stats run. This will check that the ensemble has
# the right stamp.
with ops.control_dependencies(ensemble_stats):
leaf_index = None
if self._output_leaf_index and mode in self._output_leaf_index_modes:
predictions, _, leaf_index = (
prediction_ops).gradient_trees_prediction_verbose(
ensemble_handle,
seed,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
learner_config=self._learner_config_serialized,
apply_dropout=apply_dropout,
apply_averaging=mode != learn.ModeKeys.TRAIN,
use_locking=True,
center_bias=self._center_bias,
reduce_dim=self._reduce_dim)
else:
leaf_index = None
predictions, _ = prediction_ops.gradient_trees_prediction(
ensemble_handle,
seed,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
learner_config=self._learner_config_serialized,
apply_dropout=apply_dropout,
apply_averaging=mode != learn.ModeKeys.TRAIN,
use_locking=True,
center_bias=self._center_bias,
reduce_dim=self._reduce_dim)
partition_ids = prediction_ops.gradient_trees_partition_examples(
ensemble_handle,
self._dense_floats,
self._sparse_float_indices,
self._sparse_float_values,
self._sparse_float_shapes,
self._sparse_int_indices,
self._sparse_int_values,
self._sparse_int_shapes,
use_locking=True)
return _make_predictions_dict(ensemble_stamp, predictions, partition_ids,
ensemble_stats, used_handlers, leaf_index)
def predict(self, mode):
"""Returns predictions given the features and mode.
Args:
mode: Mode the graph is running in (train|predict|eval).
Returns:
A dict of predictions tensors.
Raises:
ValueError: if features is not valid.
"""
# Use the current ensemble to predict on the current batch of input.
# For faster prediction we check if the inputs are on the same device
# as the model. If not, we create a copy of the model on the worker.
input_deps = (
self._dense_floats + self._sparse_float_indices +
self._sparse_int_indices)
if not input_deps:
raise ValueError("No input tensors for prediction.")
# Get most current model stamp.
ensemble_stamp = model_ops.tree_ensemble_stamp_token(self._ensemble_handle)
# Determine if ensemble is colocated with the inputs.
if self._ensemble_handle.device != input_deps[0].device:
# Create a local ensemble and get its local stamp.
with ops.name_scope("local_ensemble", "TreeEnsembleVariable") as name:
local_ensemble_handle = (
gen_model_ops.decision_tree_ensemble_resource_handle_op(name=name))
create_op = gen_model_ops.create_tree_ensemble_variable(
local_ensemble_handle, stamp_token=-1, tree_ensemble_config="")
with ops.control_dependencies([create_op]):
local_stamp = model_ops.tree_ensemble_stamp_token(
local_ensemble_handle)
# Determine whether the local ensemble is stale and update it if needed.
def _refresh_local_ensemble_fn():
# Serialize the model from parameter server after reading the inputs.
with ops.control_dependencies([input_deps[0]]):
(ensemble_stamp, serialized_model) = (
model_ops.tree_ensemble_serialize(self._ensemble_handle))
# Update local ensemble with the serialized model from parameter server.
with ops.control_dependencies([create_op]):
return model_ops.tree_ensemble_deserialize(
local_ensemble_handle,
stamp_token=ensemble_stamp,
tree_ensemble_config=serialized_model), ensemble_stamp
refresh_local_ensemble, ensemble_stamp = control_flow_ops.cond(
math_ops.not_equal(ensemble_stamp,
local_stamp), _refresh_local_ensemble_fn,
lambda: (control_flow_ops.no_op(), ensemble_stamp))
# Once updated, use the local model for prediction.
with ops.control_dependencies([refresh_local_ensemble]):
return self._predict_and_return_dict(local_ensemble_handle,
ensemble_stamp, mode)
else:
# Use ensemble_handle directly, if colocated.
with ops.device(self._ensemble_handle.device):
return self._predict_and_return_dict(self._ensemble_handle,
ensemble_stamp, mode)
def _get_class_id(self, predictions_dict):
# Handle different multiclass strategies.
if (self._learner_config.multi_class_strategy ==
learner_pb2.LearnerConfig.TREE_PER_CLASS and
self._logits_dimension != 1):
# Choose the class for which the tree is built (one vs rest).
return math_ops.to_int32(
predictions_dict[NUM_TREES_ATTEMPTED] % self._logits_dimension)
return constant_op.constant(-1, dtype=dtypes.int32)
def update_stats(self, loss, predictions_dict):
"""Update the accumulators with stats from this batch.
Args:
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
Returns:
Three values:
- An op that adds a new tree to the ensemble, and
- An op that increments the stamp but removes all the trees and resets
the handlers. This can be used to reset the state of the ensemble.
- A dict containing the training state.
Raises:
ValueError: if inputs are not valid.
"""
# Get the worker device from input dependencies.
input_deps = (
self._dense_floats + self._sparse_float_indices +
self._sparse_int_indices)
worker_device = input_deps[0].device
# Get tensors relevant for training and form the loss.
predictions = predictions_dict[PREDICTIONS]
partition_ids = predictions_dict[PARTITION_IDS]
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
gradients = gradients_impl.gradients(
loss,
predictions,
name="Gradients",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
strategy = self._learner_config.multi_class_strategy
class_id = self._get_class_id(predictions_dict)
# Handle different multiclass strategies.
if strategy == learner_pb2.LearnerConfig.TREE_PER_CLASS:
# We build one vs rest trees.
if self._logits_dimension == 1:
# We have only 1 score, gradients is of shape [batch, 1].
hessians = gradients_impl.gradients(
gradients,
predictions,
name="Hessian",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
squeezed_gradients = array_ops.squeeze(gradients, axis=[1])
squeezed_hessians = array_ops.squeeze(hessians, axis=[1])
else:
hessian_list = self._diagonal_hessian(gradients, predictions)
# Assemble hessian list into a tensor.
hessians = array_ops.stack(hessian_list, axis=1)
# Use class id tensor to get the column with that index from gradients
# and hessians.
squeezed_gradients = array_ops.squeeze(
_get_column_by_index(gradients, class_id))
squeezed_hessians = array_ops.squeeze(
_get_column_by_index(hessians, class_id))
else:
# Other multiclass strategies.
if strategy == learner_pb2.LearnerConfig.FULL_HESSIAN:
hessian_list = self._full_hessian(gradients, predictions)
else:
# Diagonal hessian strategy.
hessian_list = self._diagonal_hessian(gradients, predictions)
squeezed_gradients = gradients
hessians = array_ops.stack(hessian_list, axis=1)
squeezed_hessians = hessians
# Get the weights for each example for quantiles calculation,
weights = self._get_weights(self._hessian_shape, squeezed_hessians)
# Create all handlers ensuring resources are evenly allocated across PS.
fc_name_idx = 0
handlers = []
init_stamp_token = constant_op.constant(0, dtype=dtypes.int64)
l1_regularization = constant_op.constant(
self._learner_config.regularization.l1, dtypes.float32)
l2_regularization = constant_op.constant(
self._learner_config.regularization.l2, dtypes.float32)
tree_complexity_regularization = constant_op.constant(
self._learner_config.regularization.tree_complexity, dtypes.float32)
min_node_weight = constant_op.constant(
self._learner_config.constraints.min_node_weight, dtypes.float32)
loss_uses_sum_reduction = self._loss_reduction == losses.Reduction.SUM
loss_uses_sum_reduction = constant_op.constant(loss_uses_sum_reduction)
weak_learner_type = constant_op.constant(
self._learner_config.weak_learner_type)
num_quantiles = self._num_quantiles
epsilon = 1.0 / num_quantiles
strategy_tensor = constant_op.constant(strategy)
with ops.device(self._get_replica_device_setter(worker_device)):
# Create handlers for dense float columns
for dense_float_column_idx in range(len(self._dense_floats)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
ordinal_split_handler.DenseSplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
dense_float_column_idx),
epsilon=epsilon,
num_quantiles=num_quantiles,
dense_float_column=self._dense_floats[dense_float_column_idx],
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction,
weak_learner_type=weak_learner_type,
))
fc_name_idx += 1
# Create handlers for sparse float columns.
for sparse_float_column_idx in range(len(self._sparse_float_indices)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
ordinal_split_handler.SparseSplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
sparse_float_column_idx),
epsilon=epsilon,
num_quantiles=num_quantiles,
sparse_float_column=sparse_tensor.SparseTensor(
self._sparse_float_indices[sparse_float_column_idx],
self._sparse_float_values[sparse_float_column_idx],
self._sparse_float_shapes[sparse_float_column_idx]),
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction))
fc_name_idx += 1
# Create handlers for sparse int columns.
for sparse_int_column_idx in range(len(self._sparse_int_indices)):
fc_name = self._fc_names[fc_name_idx]
handlers.append(
categorical_split_handler.EqualitySplitHandler(
l1_regularization=l1_regularization,
l2_regularization=l2_regularization,
tree_complexity_regularization=tree_complexity_regularization,
min_node_weight=min_node_weight,
feature_column_group_id=constant_op.constant(
sparse_int_column_idx),
sparse_int_column=sparse_tensor.SparseTensor(
self._sparse_int_indices[sparse_int_column_idx],
self._sparse_int_values[sparse_int_column_idx],
self._sparse_int_shapes[sparse_int_column_idx]),
name=fc_name,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
multiclass_strategy=strategy_tensor,
init_stamp_token=init_stamp_token,
loss_uses_sum_reduction=loss_uses_sum_reduction,
weak_learner_type=weak_learner_type))
fc_name_idx += 1
# Create ensemble stats variables.
num_layer_examples = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layer_examples",
trainable=False)
num_layer_steps = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layer_steps",
trainable=False)
num_layers = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="num_layers",
trainable=False)
active_tree = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="active_tree",
trainable=False)
active_layer = variables.VariableV1(
initial_value=array_ops.zeros([], dtypes.int64),
name="active_layer",
trainable=False)
# Variable that becomes false once bias centering is done.
continue_centering = variables.VariableV1(
initial_value=self._center_bias,
name="continue_centering",
trainable=False)
# Create bias stats accumulator.
bias_stats_accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=self._gradient_shape,
hessian_shape=self._hessian_shape,
name="BiasAccumulator")
# Create steps accumulator.
steps_accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar(),
name="StepsAccumulator")
# Create ensemble stats summaries.
summary.scalar("layer_stats/num_examples", num_layer_examples)
summary.scalar("layer_stats/num_steps", num_layer_steps)
summary.scalar("ensemble_stats/active_tree", active_tree)
summary.scalar("ensemble_stats/active_layer", active_layer)
# Update bias stats.
stats_update_ops = []
stats_update_ops.append(
control_flow_ops.cond(
continue_centering,
self._make_update_bias_stats_fn(
ensemble_stamp, predictions, gradients,
bias_stats_accumulator), control_flow_ops.no_op))
# Update handler stats.
handler_reads = collections.OrderedDict()
for handler in handlers:
handler_reads[handler] = handler.scheduled_reads()
handler_results = batch_ops_utils.run_handler_scheduled_ops(
handler_reads, ensemble_stamp, worker_device)
per_handler_updates = collections.OrderedDict()
# Two values per handler. First one is if the handler is active for the
# current layer. The second one is if the handler is going to be active
# for the next layer.
subsampling_type = self._learner_config.WhichOneof("feature_fraction")
if subsampling_type == "feature_fraction_per_level":
seed = predictions_dict[NUM_LAYERS_ATTEMPTED]
active_handlers_current_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed, 1])
active_handlers_next_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed + 1, 1])
active_handlers = array_ops.stack(
[active_handlers_current_layer, active_handlers_next_layer], axis=1)
active_handlers = (
active_handlers < self._learner_config.feature_fraction_per_level)
elif subsampling_type == "feature_fraction_per_tree":
seed = predictions_dict[NUM_TREES_ATTEMPTED]
active_handlers_current_layer = stateless.stateless_random_uniform(
shape=[len(handlers)], seed=[seed, 2])
active_handlers_current_layer = (
active_handlers_current_layer <
self._learner_config.feature_fraction_per_tree)
active_handlers = array_ops.stack(
[
active_handlers_current_layer,
array_ops.ones([len(handlers)], dtype=dtypes.bool)
],
axis=1)
else:
active_handlers = array_ops.ones([len(handlers), 2], dtype=dtypes.bool)
if self._learner_config.constraints.max_number_of_unique_feature_columns:
target = (
self._learner_config.constraints.max_number_of_unique_feature_columns)
def _feature_selection_active_handlers():
# The active list for current and the next iteration.
used_handlers = array_ops.reshape(predictions_dict[USED_HANDLERS_MASK],
[-1, 1])
used_handlers = array_ops.concat([used_handlers, used_handlers], axis=1)
return math_ops.logical_and(used_handlers, active_handlers)
active_handlers = (
control_flow_ops.cond(predictions_dict[NUM_USED_HANDLERS] >= target,
_feature_selection_active_handlers,
lambda: active_handlers))
# Prepare empty gradients and hessians when handlers are not ready.
empty_hess_shape = [1] + self._hessian_shape.as_list()
empty_grad_shape = [1] + self._gradient_shape.as_list()
empty_gradients = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_grad_shape)
empty_hessians = constant_op.constant(
[], dtype=dtypes.float32, shape=empty_hess_shape)
active_handlers = array_ops.unstack(active_handlers, axis=0)
for handler_idx in range(len(handlers)):
handler = handlers[handler_idx]
is_active = active_handlers[handler_idx]
updates, scheduled_updates = handler.update_stats(
ensemble_stamp, partition_ids, squeezed_gradients, squeezed_hessians,
empty_gradients, empty_hessians, weights, is_active,
handler_results[handler])
stats_update_ops.append(updates)
per_handler_updates[handler] = scheduled_updates
update_results = batch_ops_utils.run_handler_scheduled_ops(
per_handler_updates, ensemble_stamp, worker_device)
for update in update_results.values():
stats_update_ops += update
training_state = GBDTTrainingState(
num_layer_examples=num_layer_examples,
num_layer_steps=num_layer_steps,
num_layers=num_layers,
active_tree=active_tree,
active_layer=active_layer,
continue_centering=continue_centering,
bias_stats_accumulator=bias_stats_accumulator,
steps_accumulator=steps_accumulator,
handlers=handlers)
reset_op = control_flow_ops.no_op()
if self._is_chief:
# Advance the ensemble stamp to throw away staggered workers.
stamp_token, _ = model_ops.tree_ensemble_serialize(self._ensemble_handle)
next_stamp_token = stamp_token + 1
reset_ops = []
for handler in handlers:
reset_ops.append(handler.reset(stamp_token, next_stamp_token))
if self._center_bias:
reset_ops.append(
bias_stats_accumulator.flush(stamp_token, next_stamp_token))
reset_ops.append(steps_accumulator.flush(stamp_token, next_stamp_token))
reset_ops.append(self._finalized_trees.assign(0).op)
reset_ops.append(self._attempted_trees.assign(0).op)
reset_ops.append(
model_ops.tree_ensemble_deserialize(
self._ensemble_handle,
stamp_token=next_stamp_token,
tree_ensemble_config="",
name="reset_gbdt"))
reset_op = control_flow_ops.group([reset_ops])
return stats_update_ops, reset_op, training_state
def increment_step_counter_and_maybe_update_ensemble(self, predictions_dict,
training_state):
"""Increments number of visited examples and grows the ensemble.
If the number of visited examples reaches the target examples_per_layer,
ensemble is updated.
Args:
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
training_state: `dict` returned by update_stats.
Returns:
An op that updates the counters and potientially grows the ensemble.
"""
batch_size = math_ops.cast(
array_ops.shape(predictions_dict[PREDICTIONS])[0], dtypes.float32)
ensemble_stamp = predictions_dict[ENSEMBLE_STAMP]
# Accumulate a step after updating stats.
steps_accumulator = training_state.steps_accumulator
num_layer_examples = training_state.num_layer_examples
num_layer_steps = training_state.num_layer_steps
active_layer = training_state.active_layer
add_step_op = steps_accumulator.add(
ensemble_stamp, [0], [[0, 0]], [batch_size], [1.0])
# After adding the step, decide if further processing is needed.
ensemble_update_ops = [add_step_op]
class_id = self._get_class_id(predictions_dict)
with ops.control_dependencies([add_step_op]):
if self._is_chief:
dropout_seed = predictions_dict[NUM_TREES_ATTEMPTED]
# Get accumulated steps and examples for the current layer.
_, _, _, _, acc_examples, acc_steps = (
steps_accumulator.serialize())
acc_examples = math_ops.cast(acc_examples[0], dtypes.int64)
acc_steps = math_ops.cast(acc_steps[0], dtypes.int64)
ensemble_update_ops.append(
num_layer_examples.assign(acc_examples))
ensemble_update_ops.append(num_layer_steps.assign(acc_steps))
# Determine whether we need to update tree ensemble.
examples_per_layer = self._examples_per_layer
if callable(examples_per_layer):
examples_per_layer = examples_per_layer(active_layer)
ensemble_update_ops.append(
control_flow_ops.cond(
acc_examples >= examples_per_layer,
self.make_update_ensemble_fn(ensemble_stamp, training_state,
dropout_seed, class_id),
control_flow_ops.no_op))
# Note, the loss is calculated from the prediction considering dropouts, so
# that the value might look staggering over steps when the dropout ratio is
# high. eval_loss might be referred instead in the aspect of convergence.
return control_flow_ops.group(*ensemble_update_ops)
def make_update_ensemble_fn(self, ensemble_stamp, training_state,
dropout_seed, class_id):
"""A method to create the function which updates the tree ensemble."""
# Determine learning rate.
learning_rate_tuner = self._learner_config.learning_rate_tuner.WhichOneof(
"tuner")
if learning_rate_tuner == "fixed" or learning_rate_tuner == "dropout":
tuner = getattr(self._learner_config.learning_rate_tuner,
learning_rate_tuner)
learning_rate = tuner.learning_rate
else:
# TODO(nponomareva, soroush) do the line search.
raise ValueError("Line search learning rate is not yet supported.")
def _update_ensemble():
"""A method to update the tree ensemble."""
# Get next stamp token.
next_ensemble_stamp = ensemble_stamp + 1
# Finalize bias stats.
_, _, _, bias_grads, bias_hess = (
training_state.bias_stats_accumulator.flush(ensemble_stamp,
next_ensemble_stamp))
# Finalize handler splits.
are_splits_ready_list = []
partition_ids_list = []
gains_list = []
split_info_list = []
for handler in training_state.handlers:
(are_splits_ready,
partition_ids, gains, split_info) = handler.make_splits(
ensemble_stamp, next_ensemble_stamp, class_id)
are_splits_ready_list.append(are_splits_ready)
partition_ids_list.append(partition_ids)
gains_list.append(gains)
split_info_list.append(split_info)
# Stack all the inputs to one tensor per type.
# This is a workaround for the slowness of graph building in tf.cond.
# See (b/36554864).
split_sizes = array_ops.reshape(
array_ops.shape_n(partition_ids_list), [len(partition_ids_list)])
partition_ids = array_ops.concat(partition_ids_list, axis=0)
gains = array_ops.concat(gains_list, axis=0)
split_infos = array_ops.concat(split_info_list, axis=0)
# Determine if all splits are ready.
are_all_splits_ready = math_ops.reduce_all(
array_ops.stack(
are_splits_ready_list, axis=0, name="stack_handler_readiness"))
# Define bias centering update operation.
def _center_bias_fn():
# Center tree ensemble bias.
delta_updates = array_ops.where(bias_hess > 0, -bias_grads / bias_hess,
array_ops.zeros_like(bias_grads))
center_bias = training_ops.center_tree_ensemble_bias(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
delta_updates=delta_updates,
learner_config=self._learner_config_serialized)
return training_state.continue_centering.assign(center_bias)
# Define ensemble growing operations.
def _grow_ensemble_ready_fn():
# Grow the ensemble given the current candidates.
sizes = array_ops.unstack(split_sizes)
partition_ids_list = list(array_ops.split(partition_ids, sizes, axis=0))
# When using the oblivious decision tree as weak learner, it produces
# one gain and one split per handler and not number of partitions.
if self._learner_config.weak_learner_type == (
learner_pb2.LearnerConfig.OBLIVIOUS_DECISION_TREE):
sizes = len(training_state.handlers)
gains_list = list(array_ops.split(gains, sizes, axis=0))
split_info_list = list(array_ops.split(split_infos, sizes, axis=0))
return training_ops.grow_tree_ensemble(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
learning_rate=learning_rate,
partition_ids=partition_ids_list,
gains=gains_list,
splits=split_info_list,
learner_config=self._learner_config_serialized,
dropout_seed=dropout_seed,
center_bias=self._center_bias,
max_tree_depth=self._max_tree_depth,
weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_not_ready_fn():
# Don't grow the ensemble, just update the stamp.
return training_ops.grow_tree_ensemble(
tree_ensemble_handle=self._ensemble_handle,
stamp_token=ensemble_stamp,
next_stamp_token=next_ensemble_stamp,
learning_rate=0,
partition_ids=[],
gains=[],
splits=[],
learner_config=self._learner_config_serialized,
dropout_seed=dropout_seed,
center_bias=self._center_bias,
max_tree_depth=self._max_tree_depth,
weak_learner_type=self._learner_config.weak_learner_type)
def _grow_ensemble_fn():
# Conditionally grow an ensemble depending on whether the splits
# from all the handlers are ready.
return control_flow_ops.cond(are_all_splits_ready,
_grow_ensemble_ready_fn,
_grow_ensemble_not_ready_fn)
# Update ensemble.
update_ops = [are_all_splits_ready]
if self._center_bias:
update_model = control_flow_ops.cond(training_state.continue_centering,
_center_bias_fn, _grow_ensemble_fn)
else:
update_model = _grow_ensemble_fn()
update_ops.append(update_model)
# Update ensemble stats.
with ops.control_dependencies([update_model]):
stats = training_ops.tree_ensemble_stats(
self._ensemble_handle, stamp_token=next_ensemble_stamp)
update_ops.append(self._finalized_trees.assign(stats.num_trees))
update_ops.append(self._attempted_trees.assign(stats.attempted_trees))
update_ops.append(training_state.num_layers.assign(stats.num_layers))
update_ops.append(training_state.active_tree.assign(stats.active_tree))
update_ops.append(
training_state.active_layer.assign(stats.active_layer))
# Flush step stats.
update_ops.extend(
training_state.steps_accumulator.flush(ensemble_stamp,
next_ensemble_stamp))
return control_flow_ops.group(*update_ops, name="update_ensemble")
return _update_ensemble
def get_number_of_trees_tensor(self):
return self._finalized_trees, self._attempted_trees
def get_max_tree_depth(self):
return self._max_tree_depth
def train(self, loss, predictions_dict, labels):
"""Updates the accumalator stats and grows the ensemble.
Args:
loss: A scalar tensor representing average loss of examples.
predictions_dict: Dictionary of Rank 2 `Tensor` representing information
about predictions per example.
labels: Rank 2 `Tensor` representing labels per example. Has no effect
on the training and is only kept for backward compatibility.
Returns:
An op that adds a new tree to the ensemble.
Raises:
ValueError: if inputs are not valid.
"""
del labels # unused; kept for backward compatibility.
update_op, _, training_state = self.update_stats(loss, predictions_dict)
with ops.control_dependencies(update_op):
return self.increment_step_counter_and_maybe_update_ensemble(
predictions_dict, training_state)
def _get_weights(self, hessian_shape, hessians):
"""Derives weights to be used based on hessians and multiclass strategy."""
if hessian_shape == tensor_shape.scalar():
# This is tree per class.
weights = hessians
elif len(hessian_shape.dims) == 1:
# This is diagonal hessian.
weights = math_ops.reduce_sum(hessians, axis=1)
else:
# This is full hessian.
weights = math_ops.trace(hessians)
return weights
def _full_hessian(self, grads, predictions):
"""Prepares hessians for full-hessian multiclass strategy."""
# Because of
# https://github.com/tensorflow/tensorflow/issues/675, we can't just
# compute the full hessian with a single call to gradients, but instead
# must compute it row-by-row.
gradients_list = array_ops.unstack(
grads, num=self._logits_dimension, axis=1)
hessian_rows = []
for row in range(self._logits_dimension):
# If current row is i, K is number of classes,each row returns a tensor of
# size batch_size x K representing for each example dx_i dx_1, dx_i dx_2
# etc dx_i dx_K
hessian_row = gradients_impl.gradients(
gradients_list[row],
predictions,
name="Hessian_%d" % row,
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)
# hessian_row is of dimension 1, batch_size, K, => trim first dimension
# to get batch_size x K
hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])
hessian_rows.append(hessian_row)
return hessian_rows
def _diagonal_hessian(self, grads, predictions):
"""Prepares hessians for diagonal-hessian multiclass mode."""
diag_hessian_list = []
gradients_list = array_ops.unstack(
grads, num=self._logits_dimension, axis=1)
for row, row_grads in enumerate(gradients_list):
# If current row is i, K is number of classes,each row returns a tensor of
# size batch_size x K representing for each example dx_i dx_1, dx_1 dx_2
# etc dx_i dx_K
hessian_row = gradients_impl.gradients(
row_grads,
predictions,
name="Hessian_%d" % row,
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)
# hessian_row is of dimension 1, batch_size, K, => trim first dimension
# to get batch_size x K
hessian_row = array_ops.squeeze(array_ops.unstack(hessian_row), [0])
# Get dx_i^2 for the whole batch.
elem = array_ops.transpose(hessian_row)[row]
diag_hessian_list.append(elem)
return diag_hessian_list
def _get_replica_device_setter(self, worker_device):
"""Creates a replica device setter."""
ps_tasks = self._num_ps_replicas
ps_ops = [
"Variable",
"VariableV2",
"DecisionTreeEnsembleResourceHandleOp",
"StatsAccumulatorScalarResourceHandleOp",
"StatsAccumulatorTensorResourceHandleOp",
]
ps_strategy = _OpRoundRobinStrategy(ps_ops, ps_tasks)
return device_setter.replica_device_setter(
worker_device=worker_device,
ps_tasks=ps_tasks,
merge_devices=True,
ps_ops=ps_ops,
ps_strategy=ps_strategy)
def _make_update_bias_stats_fn(self, ensemble_stamp, predictions, gradients,
bias_stats_accumulator):
"""A method to create the function which updates the bias stats."""
def _update_bias_stats():
"""A method to update the bias stats."""
# Get reduced gradients and hessians.
grads_sum = math_ops.reduce_sum(gradients, 0)
hess = gradients_impl.gradients(
grads_sum,
predictions,
name="Hessians",
colocate_gradients_with_ops=False,
gate_gradients=0,
aggregation_method=None)[0]
hess_sum = math_ops.reduce_sum(hess, 0)
# Accumulate gradients and hessians.
partition_ids = math_ops.range(self._logits_dimension)
feature_ids = array_ops.zeros(
[self._logits_dimension, 2], dtype=dtypes.int64)
add_stats_op = bias_stats_accumulator.add(
ensemble_stamp, partition_ids, feature_ids, grads_sum, hess_sum)
return control_flow_ops.group(*[add_stats_op], name="update_bias_stats")
return _update_bias_stats
|
cpennington/edx-platform
|
refs/heads/master
|
common/lib/capa/capa/xqueue_interface.py
|
4
|
#
# LMS Interface to external queueing system (xqueue)
#
import hashlib
import json
import logging
import requests
import six
log = logging.getLogger(__name__)
dateformat = '%Y%m%d%H%M%S'
XQUEUE_METRIC_NAME = 'edxapp.xqueue'
# Wait time for response from Xqueue.
XQUEUE_TIMEOUT = 35 # seconds
CONNECT_TIMEOUT = 3.05 # seconds
READ_TIMEOUT = 10 # seconds
def make_hashkey(seed):
"""
Generate a string key by hashing
"""
h = hashlib.md5()
h.update(six.b(str(seed)))
return h.hexdigest()
def make_xheader(lms_callback_url, lms_key, queue_name):
"""
Generate header for delivery and reply of queue request.
Xqueue header is a JSON-serialized dict:
{ 'lms_callback_url': url to which xqueue will return the request (string),
'lms_key': secret key used by LMS to protect its state (string),
'queue_name': designate a specific queue within xqueue server, e.g. 'MITx-6.00x' (string)
}
"""
return json.dumps({
'lms_callback_url': lms_callback_url,
'lms_key': lms_key,
'queue_name': queue_name
})
def parse_xreply(xreply):
"""
Parse the reply from xqueue. Messages are JSON-serialized dict:
{ 'return_code': 0 (success), 1 (fail)
'content': Message from xqueue (string)
}
"""
try:
xreply = json.loads(xreply)
except ValueError as err:
log.error(err)
return (1, 'unexpected reply from server')
return_code = xreply['return_code']
content = xreply['content']
return (return_code, content)
class XQueueInterface(object):
"""
Interface to the external grading system
"""
def __init__(self, url, django_auth, requests_auth=None):
self.url = six.text_type(url)
self.auth = django_auth
self.session = requests.Session()
self.session.auth = requests_auth
def send_to_queue(self, header, body, files_to_upload=None):
"""
Submit a request to xqueue.
header: JSON-serialized dict in the format described in 'xqueue_interface.make_xheader'
body: Serialized data for the receipient behind the queueing service. The operation of
xqueue is agnostic to the contents of 'body'
files_to_upload: List of file objects to be uploaded to xqueue along with queue request
Returns (error_code, msg) where error_code != 0 indicates an error
"""
# log the send to xqueue
header_info = json.loads(header)
queue_name = header_info.get('queue_name', u'')
# Attempt to send to queue
(error, msg) = self._send_to_queue(header, body, files_to_upload)
# Log in, then try again
if error and (msg == 'login_required'):
(error, content) = self._login()
if error != 0:
# when the login fails
log.debug("Failed to login to queue: %s", content)
return (error, content)
if files_to_upload is not None:
# Need to rewind file pointers
for f in files_to_upload:
f.seek(0)
(error, msg) = self._send_to_queue(header, body, files_to_upload)
return error, msg
def _login(self):
payload = {
'username': self.auth['username'],
'password': self.auth['password']
}
return self._http_post(self.url + '/xqueue/login/', payload)
def _send_to_queue(self, header, body, files_to_upload):
payload = {
'xqueue_header': header,
'xqueue_body': body
}
files = {}
if files_to_upload is not None:
for f in files_to_upload:
files.update({f.name: f})
return self._http_post(self.url + '/xqueue/submit/', payload, files=files)
def _http_post(self, url, data, files=None):
try:
response = self.session.post(
url, data=data, files=files, timeout=(CONNECT_TIMEOUT, READ_TIMEOUT)
)
except requests.exceptions.ConnectionError as err:
log.error(err)
return 1, 'cannot connect to server'
except requests.exceptions.ReadTimeout as err:
log.error(err)
return 1, 'failed to read from the server'
if response.status_code not in [200]:
return 1, 'unexpected HTTP status code [%d]' % response.status_code
return parse_xreply(response.text)
|
davisein/jitsudone
|
refs/heads/master
|
django/django/conf/locale/nn/formats.py
|
170
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
'%Y-%m-%d', # '2006-10-25',
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = u'\xa0' # non-breaking space
NUMBER_GROUPING = 3
|
AnthonyTruchet/breathe
|
refs/heads/master
|
breathe/__init__.py
|
2
|
__version__ = '4.0.0'
def setup(app):
# We can't do the import at the module scope as setup.py has to be able to
# import this file to read __version__ without hitting any syntax errors
# from both Python 2 & Python 3.
# By the time this function is called, the directives code will have been
# converted with 2to3 if appropriate
from . import directives
directives.setup(app)
|
m3brown/pantheon
|
refs/heads/master
|
devdash/osw/pipeline.py
|
5
|
from social.pipeline.partial import partial
from django.shortcuts import redirect
from django.http import HttpResponse
from django.conf import settings
from github.client import GitHubAdmin, get_org_name, is_org_member, is_2fa_enabled
gha = GitHubAdmin()
@partial
def join_org(request, backend, user, details, **kwargs):
if backend.name != 'github':
return
if not user:
return HttpResponse('Unauthorized', status=401)
gh_details = request.session.get('gh_details', {})
if not gh_details.get('org_name'):
gh_details['org_name'] = get_org_name(gha, settings.GH_ORG_IDS[0])
request.session['gh_details'] = gh_details
if not gh_details.get('is_member'):
gh_details['is_member'] = is_org_member(gha, details['username'], gh_details['org_name'])
request.session['gh_details'] = gh_details
if gh_details['is_member']:
return None
else:
return redirect('osw:join_org')
@partial
def enable_2fa(request, backend, user, details, **kwargs):
if backend.name != 'github':
return
if not user:
return HttpResponse('Unauthorized', status=401)
gh_details = request.session['gh_details']
if not gh_details.get('is_2fa_enabled'):
gh_details['is_2fa_enabled'] = is_2fa_enabled(gha, details['username'], gh_details['org_name'])
request.session['gh_details'] = gh_details
if gh_details['is_2fa_enabled']:
gh_details['is_public_member'] = is_org_member(gha, details['username'], gh_details['org_name'], public=True)
request.session['gh_details'] = gh_details
return None
else:
return redirect('osw:enable_2fa')
@partial
def github_details(request, backend, user, details, **kwargs):
if backend.name != 'github':
return
if not user:
return HttpResponse('Unauthorized', status=401)
gh_details = request.session.get('gh_details', {})
if gh_details:
return redirect('osw:gh_details')
else:
return None
|
StevenBlack/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/common/lru_cache.py
|
134
|
# Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class Node():
def __init__(self, key, value):
self.key = key
self.value = value
self.prev = None
self.next = None
class LRUCache():
"""An implementation of Least Recently Used (LRU) Cache."""
def __init__(self, capacity):
"""Initializes a lru cache with the given capacity.
Args:
capacity: The capacity of the cache.
"""
assert capacity > 0, "capacity (%s) must be greater than zero." % capacity
self._first = None
self._last = None
self._dict = {}
self._capacity = capacity
def __setitem__(self, key, value):
if key in self._dict:
self.__delitem__(key)
if not self._first:
self._one_node(key, value)
return
if len(self._dict) >= self._capacity:
del self._dict[self._last.key]
if self._capacity == 1:
self._one_node(key, value)
return
self._last = self._last.next
self._last.prev = None
node = Node(key, value)
node.prev = self._first
self._first.next = node
self._first = node
self._dict[key] = node
def _one_node(self, key, value):
node = Node(key, value)
self._dict[key] = node
self._first = node
self._last = node
def __getitem__(self, key):
if not self._first:
raise KeyError(str(key))
if self._first.key == key:
return self._first.value
if self._last.key == key:
next_last = self._last.next
next_last.prev = None
next_first = self._last
next_first.prev = self._first
next_first.next = None
self._first.next = next_first
self._first = next_first
self._last = next_last
return self._first.value
node = self._dict[key]
node.next.prev = node.prev
node.prev.next = node.next
node.prev = self._first
node.next = None
self._first.next = node
self._first = node
return self._first.value
def __delitem__(self, key):
node = self._dict[key]
del self._dict[key]
if self._first is self._last:
self._last = None
self._first = None
return
if self._first is node:
self._first = node.prev
self._first.next = None
return
if self._last is node:
self._last = node.next
self._last.prev = None
return
node.next.prev = node.prev
node.prev.next = node.next
def __len__(self):
return len(self._dict)
def __contains__(self, key):
return key in self._dict
def __iter__(self):
return iter(self._dict)
def items(self):
return [(key, node.value) for key, node in self._dict.items()]
def values(self):
return [node.value for node in self._dict.values()]
def keys(self):
return self._dict.keys()
|
rvalieris/bioconda-recipes
|
refs/heads/master
|
recipes/biopet-scatterregions/0.2/biopet-scatterregions.py
|
72
|
#!/usr/bin/env python
#
# Wrapper script for starting the biopet-scatterregions JAR package
#
# This script is written for use with the Conda package manager and is copied
# from the peptide-shaker wrapper. Only the parameters are changed.
# (https://github.com/bioconda/bioconda-recipes/blob/master/recipes/peptide-shaker/peptide-shaker.py)
#
# This file was automatically generated by the sbt-bioconda plugin.
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'ScatterRegions-assembly-0.2.jar'
default_jvm_mem_opts = []
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
"""
PeptideShaker updates files relative to the path of the jar file.
In a multiuser setting, the option --exec_dir="exec_dir"
can be used as the location for the peptide-shaker distribution.
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
java = java_executable()
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
|
gpoesia/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/pytest/doc/en/example/py2py3/conftest.py
|
233
|
import sys
import pytest
py3 = sys.version_info[0] >= 3
class DummyCollector(pytest.collect.File):
def collect(self):
return []
def pytest_pycollect_makemodule(path, parent):
bn = path.basename
if "py3" in bn and not py3 or ("py2" in bn and py3):
return DummyCollector(path, parent=parent)
|
dongsenfo/pymatgen
|
refs/heads/master
|
pymatgen/analysis/tests/test_molecule_structure_comparator.py
|
3
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import os
from unittest import TestCase
import unittest
from pymatgen.analysis.molecule_structure_comparator import \
MoleculeStructureComparator
from pymatgen.core.structure import Molecule
from pymatgen.io.qchem_deprecated import QcOutput
__author__ = 'xiaohuiqu'
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
"test_files", "molecules", "structural_change")
class TestMoleculeStructureComparator(TestCase):
def test_are_equal(self):
msc1 = MoleculeStructureComparator()
mol1 = Molecule.from_file(os.path.join(test_dir, "t1.xyz"))
mol2 = Molecule.from_file(os.path.join(test_dir, "t2.xyz"))
mol3 = Molecule.from_file(os.path.join(test_dir, "t3.xyz"))
self.assertFalse(msc1.are_equal(mol1, mol2))
self.assertTrue(msc1.are_equal(mol2, mol3))
thio1 = Molecule.from_file(os.path.join(test_dir, "thiophene1.xyz"))
thio2 = Molecule.from_file(os.path.join(test_dir, "thiophene2.xyz"))
# noinspection PyProtectedMember
msc2 = MoleculeStructureComparator(
priority_bonds=msc1._get_bonds(thio1))
self.assertTrue(msc2.are_equal(thio1, thio2))
hal1 = Molecule.from_file(os.path.join(test_dir, "molecule_with_halogen_bonds_1.xyz"))
hal2 = Molecule.from_file(os.path.join(test_dir, "molecule_with_halogen_bonds_2.xyz"))
msc3 = MoleculeStructureComparator(priority_bonds=msc1._get_bonds(hal1))
self.assertTrue(msc3.are_equal(hal1, hal2))
def test_get_bonds(self):
mol1 = Molecule.from_file(os.path.join(test_dir, "t1.xyz"))
msc = MoleculeStructureComparator()
# noinspection PyProtectedMember
bonds = msc._get_bonds(mol1)
bonds_ref = [(0, 1), (0, 2), (0, 3), (0, 23), (3, 4), (3, 5), (5, 6),
(5, 7), (7, 8), (7, 9), (7, 21), (9, 10), (9, 11),
(9, 12), (12, 13), (12, 14), (12, 15), (15, 16), (15, 17),
(15, 18), (18, 19), (18, 20), (18, 21), (21, 22),
(21, 23), (23, 24), (23, 25)]
self.assertEqual(bonds, bonds_ref)
mol2 = Molecule.from_file(os.path.join(test_dir, "MgBH42.xyz"))
bonds = msc._get_bonds(mol2)
self.assertEqual(bonds, [(1, 3), (2, 3), (3, 4), (3, 5), (6, 8), (7, 8),
(8, 9), (8, 10)])
msc = MoleculeStructureComparator(ignore_ionic_bond=False)
bonds = msc._get_bonds(mol2)
self.assertEqual(bonds, [(0, 1), (0, 2), (0, 3), (0, 5), (0, 6), (0, 7),
(0, 8), (0, 9), (1, 3), (2, 3), (3, 4), (3, 5),
(6, 8), (7, 8), (8, 9), (8, 10)])
mol1 = Molecule.from_file(os.path.join(test_dir, "molecule_with_halogen_bonds_1.xyz"))
msc = MoleculeStructureComparator()
# noinspection PyProtectedMember
bonds = msc._get_bonds(mol1)
self.assertEqual(bonds, [(0, 12), (0, 13), (0, 14), (0, 15), (1, 12), (1, 16),
(1, 17), (1, 18), (2, 4), (2, 11), (2, 19), (3, 5),
(3, 10), (3, 20), (4, 6), (4, 10), (5, 11), (5, 12),
(6, 7), (6, 8), (6, 9)])
def test_to_and_from_dict(self):
msc1 = MoleculeStructureComparator()
d1 = msc1.as_dict()
d2 = MoleculeStructureComparator.from_dict(d1).as_dict()
self.assertEqual(d1, d2)
thio1 = Molecule.from_file(os.path.join(test_dir, "thiophene1.xyz"))
# noinspection PyProtectedMember
msc2 = MoleculeStructureComparator(
bond_length_cap=0.2,
priority_bonds=msc1._get_bonds(thio1),
priority_cap=0.5)
d1 = msc2.as_dict()
d2 = MoleculeStructureComparator.from_dict(d1).as_dict()
self.assertEqual(d1, d2)
def test_structural_change_in_geom_opt(self):
qcout_path = os.path.join(test_dir, "mol_1_3_bond.qcout")
qcout = QcOutput(qcout_path)
mol1 = qcout.data[0]["molecules"][0]
mol2 = qcout.data[0]["molecules"][-1]
priority_bonds = [[0, 1], [0, 2], [1, 3], [1, 4], [1, 7], [2, 5], [2, 6], [2, 8], [4, 6], [4, 10], [6, 9]]
msc = MoleculeStructureComparator(priority_bonds=priority_bonds)
self.assertTrue(msc.are_equal(mol1, mol2))
def test_get_13_bonds(self):
priority_bonds = [[0, 1], [0, 2], [1, 3], [1, 4], [1, 7], [2, 5], [2, 6], [2, 8], [4, 6], [4, 10], [6, 9]]
bonds_13 = MoleculeStructureComparator.get_13_bonds(priority_bonds)
ans = ((0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (1, 2), (1, 6), (1, 10), (2, 4), (2, 9), (3, 4),
(3, 7), (4, 7), (4, 9), (5, 6), (5, 8), (6, 8), (6, 10))
self.assertEqual(bonds_13, tuple(ans))
if __name__ == '__main__':
unittest.main()
|
ArianaGashi/Techstitution
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/__main__.py
|
834
|
from __future__ import absolute_import
import os
import sys
# If we are running from a wheel, add the wheel to sys.path
# This allows the usage python pip-*.whl/pip install pip-*.whl
if __package__ == '':
# __file__ is pip-*.whl/pip/__main__.py
# first dirname call strips of '/__main__.py', second strips off '/pip'
# Resulting path is the name of the wheel itself
# Add that to sys.path so we can import pip
path = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(0, path)
import pip # noqa
if __name__ == '__main__':
sys.exit(pip.main())
|
cundi/pyramid_sacrud
|
refs/heads/master
|
pyramid_sacrud/includes/routes.py
|
1
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 uralbash <root@uralbash.ru>
#
# Distributed under terms of the MIT license.
"""
Routes for pyramid_sacrud
"""
from ..common import pkg_prefix
from ..security import (PYRAMID_SACRUD_CREATE, PYRAMID_SACRUD_DELETE,
PYRAMID_SACRUD_HOME, PYRAMID_SACRUD_LIST,
PYRAMID_SACRUD_MASS_ACTION, PYRAMID_SACRUD_UPDATE)
def includeme(config):
prefix = pkg_prefix(config)
config.add_route(PYRAMID_SACRUD_HOME, prefix)
config.add_route(PYRAMID_SACRUD_LIST, prefix + '{table}/')
config.add_route(PYRAMID_SACRUD_CREATE, prefix + '{table}/create/')
config.add_route(PYRAMID_SACRUD_UPDATE, prefix + '{table}/update/*pk')
config.add_route(PYRAMID_SACRUD_DELETE, prefix + '{table}/delete/*pk')
config.add_route(PYRAMID_SACRUD_MASS_ACTION, prefix + '{table}/action/')
|
dmlc/tvm
|
refs/heads/main
|
python/tvm/topi/rocm/batch_matmul.py
|
3
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-variable, unused-argument
"""Schedule for batch_matmul operator"""
from tvm import autotvm
from tvm.contrib import rocblas
from .. import generic
from ..utils import get_const_tuple
@autotvm.register_topi_compute("batch_matmul_rocblas.rocm")
def batch_matmul_rocblas(cfg, x, y, out_shape=None):
"""Computes matrix multiplication of `x` and `y` via rocblas when
`x` and `y` are batched matrices.
Parameters
----------
cfg : ConfigSpace
Autotvm tuning space config file
x : tvm.te.Tensor
3-D with shape [batch, M, K]
y : tvm.te.Tensor
3-D with shape [batch, N, K]
Returns
-------
output : tvm.te.Tensor
3-D with shape [batch, M, N]
"""
batch, M, K = get_const_tuple(x.shape)
_, N, _ = get_const_tuple(y.shape)
if out_shape is not None:
assert out_shape[0] == batch, "Input and output batch sizes must match"
assert out_shape[1] == M and out_shape[2] == N, "Invalid output shape"
result = rocblas.batch_matmul(x, y, False, True)
cfg.add_flop(batch * M * N * K * 2)
return result
@autotvm.register_topi_schedule("batch_matmul_rocblas.rocm")
def schedule_batch_matmul_rocblas(_, outs):
"""Schedule for batch_matmul operator with rocm cblas"""
return generic.schedule_extern(outs)
|
fmacias64/Dato-Core
|
refs/heads/master
|
src/unity/python/graphlab/data_structures/__init__.py
|
13
|
"""
GraphLab Create offers several data structures for data analysis.
Concise descriptions of the data structures and their methods are contained in
the API documentation, along with a small number of simple examples. For more
detailed descriptions and examples, please see the `User Guide
<https://dato.com/learn/userguide>`_, `API Translator
<https://dato.com/learn/translator>`_, `How-Tos
<https://dato.com/learn/how-to>`_, and data science `Gallery
<https://dato.com/learn/gallery>`_.
"""
'''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
__all__ = ['sframe', 'sarray', 'sgraph', 'sketch', 'image']
import sframe
import sarray
import sgraph
import sketch
import image
|
kubaszostak/gdal-dragndrop
|
refs/heads/master
|
osgeo/apps/Python27/Lib/site-packages/numpy/matrixlib/tests/test_multiarray.py
|
15
|
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_, assert_equal, assert_array_equal
class TestView(object):
def test_type(self):
x = np.array([1, 2, 3])
assert_(isinstance(x.view(np.matrix), np.matrix))
def test_keywords(self):
x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
# We must be specific about the endianness here:
y = x.view(dtype='<i2', type=np.matrix)
assert_array_equal(y, [[513]])
assert_(isinstance(y, np.matrix))
assert_equal(y.dtype, np.dtype('<i2'))
|
securechain/securechain-wallet
|
refs/heads/master
|
share/qt/extract_strings_qt.py
|
2945
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
|
lipis/life-line
|
refs/heads/master
|
main/control/error.py
|
2
|
# coding: utf-8
import logging
from flask.ext.babel import lazy_gettext as _
import flask
from api import helpers
import config
from main import app
@app.errorhandler(400) # Bad Request
@app.errorhandler(401) # Unauthorized
@app.errorhandler(403) # Forbidden
@app.errorhandler(404) # Not Found
@app.errorhandler(405) # Method Not Allowed
@app.errorhandler(410) # Gone
@app.errorhandler(418) # I'm a Teapot
@app.errorhandler(500) # Internal Server Error
def error_handler(e):
logging.exception(e)
try:
e.code
except AttributeError:
e.code = 500
e.name = 'Internal Server Error'
if flask.request.path.startswith('/api/'):
return helpers.handle_error(e)
return flask.render_template(
'error.html',
title=_('Error %(code)d (%(name)s)!!1', code=e.code, name=e.name),
html_class='error-page',
error=e,
), e.code
if config.PRODUCTION:
@app.errorhandler(Exception)
def production_error_handler(e):
return error_handler(e)
|
tusimbe/APM
|
refs/heads/master
|
Tools/autotest/apm_unit_tests/mustpass/arducopter_takeoff.py
|
250
|
import arducopter
def unit_test(mavproxy, mav):
'''A scripted flight plan'''
if (
arducopter.calibrate_level(mavproxy, mav) and
arducopter.arm_motors(mavproxy, mav) and
arducopter.takeoff(mavproxy,mav, alt_min=30, takeoff_throttle=1510)):
return True
return False
|
usc-isi/horizon-old
|
refs/heads/hpc-horizon
|
horizon/horizon/api/glance.py
|
1
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
import urlparse
from glance import client as glance_client
from horizon.api.base import *
LOG = logging.getLogger(__name__)
class Image(APIDictWrapper):
"""Simple wrapper around glance image dictionary"""
_attrs = ['checksum', 'container_format', 'created_at', 'deleted',
'deleted_at', 'disk_format', 'id', 'is_public', 'location',
'name', 'properties', 'size', 'status', 'updated_at', 'owner']
def __getattr__(self, attrname):
if attrname == "properties":
return ImageProperties(super(Image, self).__getattr__(attrname))
else:
return super(Image, self).__getattr__(attrname)
class ImageProperties(APIDictWrapper):
"""Simple wrapper around glance image properties dictionary"""
_attrs = ['architecture', 'image_location', 'image_state', 'kernel_id',
'project_id', 'ramdisk_id']
def glance_api(request):
o = urlparse.urlparse(url_for(request, 'image'))
LOG.debug('glance_api connection created for host "%s:%d"' %
(o.hostname, o.port))
return glance_client.Client(o.hostname,
o.port,
auth_tok=request.user.token)
def image_create(request, image_meta, image_file):
return Image(glance_api(request).add_image(image_meta, image_file))
def image_delete(request, image_id):
return glance_api(request).delete_image(image_id)
def image_get(request, image_id):
return Image(glance_api(request).get_image(image_id)[0])
def image_list_detailed(request):
return [Image(i) for i in glance_api(request).get_images_detailed()]
def image_update(request, image_id, image_meta=None):
image_meta = image_meta and image_meta or {}
return Image(glance_api(request).update_image(image_id,
image_meta=image_meta))
def snapshot_list_detailed(request):
filters = {}
filters['property-image_type'] = 'snapshot'
filters['is_public'] = 'none'
return [Image(i) for i in glance_api(request)
.get_images_detailed(filters=filters)]
|
takeflight/wagtail
|
refs/heads/master
|
wagtail/tests/testapp/migrations/0050_customimagewithauthor_customrenditionwithauthor.py
|
9
|
# Generated by Django 2.2 on 2019-04-26 15:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import taggit.managers
import wagtail.core.models
import wagtail.images.models
import wagtail.search.index
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
('taggit', '0002_auto_20150616_2121'),
('tests', '0049_rawhtmlblock'),
]
operations = [
migrations.CreateModel(
name='CustomImageWithAuthor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('file', models.ImageField(height_field='height', upload_to=wagtail.images.models.get_upload_to, verbose_name='file', width_field='width')),
('width', models.IntegerField(editable=False, verbose_name='width')),
('height', models.IntegerField(editable=False, verbose_name='height')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created at')),
('focal_point_x', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_y', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_width', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_height', models.PositiveIntegerField(blank=True, null=True)),
('file_size', models.PositiveIntegerField(editable=False, null=True)),
('file_hash', models.CharField(blank=True, editable=False, max_length=40)),
('author', models.CharField(max_length=255)),
('collection', models.ForeignKey(default=wagtail.core.models.get_root_collection_id, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Collection', verbose_name='collection')),
('tags', taggit.managers.TaggableManager(blank=True, help_text=None, through='taggit.TaggedItem', to='taggit.Tag', verbose_name='tags')),
('uploaded_by_user', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='uploaded by user')),
],
options={
'abstract': False,
},
bases=(wagtail.search.index.Indexed, models.Model),
),
migrations.CreateModel(
name='CustomRenditionWithAuthor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('filter_spec', models.CharField(db_index=True, max_length=255)),
('file', models.ImageField(height_field='height', upload_to=wagtail.images.models.get_rendition_upload_to, width_field='width')),
('width', models.IntegerField(editable=False)),
('height', models.IntegerField(editable=False)),
('focal_point_key', models.CharField(blank=True, default='', editable=False, max_length=16)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='renditions', to='tests.CustomImageWithAuthor')),
],
options={
'unique_together': {('image', 'filter_spec', 'focal_point_key')},
},
),
]
|
mbedmicro/mbed
|
refs/heads/master
|
storage/filesystem/littlefsv2/littlefs/scripts/test.py
|
13
|
#!/usr/bin/env python3
# This script manages littlefs tests, which are configured with
# .toml files stored in the tests directory.
#
import toml
import glob
import re
import os
import io
import itertools as it
import collections.abc as abc
import subprocess as sp
import base64
import sys
import copy
import shlex
import pty
import errno
import signal
TESTDIR = 'tests'
RULES = """
define FLATTEN
tests/%$(subst /,.,$(target)): $(target)
./scripts/explode_asserts.py $$< -o $$@
endef
$(foreach target,$(SRC),$(eval $(FLATTEN)))
-include tests/*.d
.SECONDARY:
%.test: %.test.o $(foreach f,$(subst /,.,$(SRC:.c=.o)),%.$f)
$(CC) $(CFLAGS) $^ $(LFLAGS) -o $@
"""
GLOBALS = """
//////////////// AUTOGENERATED TEST ////////////////
#include "lfs2.h"
#include "bd/lfs2_testbd.h"
#include <stdio.h>
extern const char *lfs2_testbd_path;
extern uint32_t lfs2_testbd_cycles;
"""
DEFINES = {
'LFS2_READ_SIZE': 16,
'LFS2_PROG_SIZE': 'LFS2_READ_SIZE',
'LFS2_BLOCK_SIZE': 512,
'LFS2_BLOCK_COUNT': 1024,
'LFS2_BLOCK_CYCLES': -1,
'LFS2_CACHE_SIZE': '(64 % LFS2_PROG_SIZE == 0 ? 64 : LFS2_PROG_SIZE)',
'LFS2_LOOKAHEAD_SIZE': 16,
'LFS2_ERASE_VALUE': 0xff,
'LFS2_ERASE_CYCLES': 0,
'LFS2_BADBLOCK_BEHAVIOR': 'LFS2_TESTBD_BADBLOCK_PROGERROR',
}
PROLOGUE = """
// prologue
__attribute__((unused)) lfs2_t lfs2;
__attribute__((unused)) lfs2_testbd_t bd;
__attribute__((unused)) lfs2_file_t file;
__attribute__((unused)) lfs2_dir_t dir;
__attribute__((unused)) struct lfs2_info info;
__attribute__((unused)) char path[1024];
__attribute__((unused)) uint8_t buffer[1024];
__attribute__((unused)) lfs2_size_t size;
__attribute__((unused)) int err;
__attribute__((unused)) const struct lfs2_config cfg = {
.context = &bd,
.read = lfs2_testbd_read,
.prog = lfs2_testbd_prog,
.erase = lfs2_testbd_erase,
.sync = lfs2_testbd_sync,
.read_size = LFS2_READ_SIZE,
.prog_size = LFS2_PROG_SIZE,
.block_size = LFS2_BLOCK_SIZE,
.block_count = LFS2_BLOCK_COUNT,
.block_cycles = LFS2_BLOCK_CYCLES,
.cache_size = LFS2_CACHE_SIZE,
.lookahead_size = LFS2_LOOKAHEAD_SIZE,
};
__attribute__((unused)) const struct lfs2_testbd_config bdcfg = {
.erase_value = LFS2_ERASE_VALUE,
.erase_cycles = LFS2_ERASE_CYCLES,
.badblock_behavior = LFS2_BADBLOCK_BEHAVIOR,
.power_cycles = lfs2_testbd_cycles,
};
lfs2_testbd_createcfg(&cfg, lfs2_testbd_path, &bdcfg) => 0;
"""
EPILOGUE = """
// epilogue
lfs2_testbd_destroy(&cfg) => 0;
"""
PASS = '\033[32m✓\033[0m'
FAIL = '\033[31m✗\033[0m'
class TestFailure(Exception):
def __init__(self, case, returncode=None, stdout=None, assert_=None):
self.case = case
self.returncode = returncode
self.stdout = stdout
self.assert_ = assert_
class TestCase:
def __init__(self, config, filter=filter,
suite=None, caseno=None, lineno=None, **_):
self.config = config
self.filter = filter
self.suite = suite
self.caseno = caseno
self.lineno = lineno
self.code = config['code']
self.code_lineno = config['code_lineno']
self.defines = config.get('define', {})
self.if_ = config.get('if', None)
self.in_ = config.get('in', None)
def __str__(self):
if hasattr(self, 'permno'):
if any(k not in self.case.defines for k in self.defines):
return '%s#%d#%d (%s)' % (
self.suite.name, self.caseno, self.permno, ', '.join(
'%s=%s' % (k, v) for k, v in self.defines.items()
if k not in self.case.defines))
else:
return '%s#%d#%d' % (
self.suite.name, self.caseno, self.permno)
else:
return '%s#%d' % (
self.suite.name, self.caseno)
def permute(self, class_=None, defines={}, permno=None, **_):
ncase = (class_ or type(self))(self.config)
for k, v in self.__dict__.items():
setattr(ncase, k, v)
ncase.case = self
ncase.perms = [ncase]
ncase.permno = permno
ncase.defines = defines
return ncase
def build(self, f, **_):
# prologue
for k, v in sorted(self.defines.items()):
if k not in self.suite.defines:
f.write('#define %s %s\n' % (k, v))
f.write('void test_case%d(%s) {' % (self.caseno, ','.join(
'\n'+8*' '+'__attribute__((unused)) intmax_t %s' % k
for k in sorted(self.perms[0].defines)
if k not in self.defines)))
f.write(PROLOGUE)
f.write('\n')
f.write(4*' '+'// test case %d\n' % self.caseno)
f.write(4*' '+'#line %d "%s"\n' % (self.code_lineno, self.suite.path))
# test case goes here
f.write(self.code)
# epilogue
f.write(EPILOGUE)
f.write('}\n')
for k, v in sorted(self.defines.items()):
if k not in self.suite.defines:
f.write('#undef %s\n' % k)
def shouldtest(self, **args):
if (self.filter is not None and
len(self.filter) >= 1 and
self.filter[0] != self.caseno):
return False
elif (self.filter is not None and
len(self.filter) >= 2 and
self.filter[1] != self.permno):
return False
elif args.get('no_internal', False) and self.in_ is not None:
return False
elif self.if_ is not None:
if_ = self.if_
while True:
for k, v in sorted(self.defines.items(),
key=lambda x: len(x[0]), reverse=True):
if k in if_:
if_ = if_.replace(k, '(%s)' % v)
break
else:
break
if_ = (
re.sub('(\&\&|\?)', ' and ',
re.sub('(\|\||:)', ' or ',
re.sub('!(?!=)', ' not ', if_))))
return eval(if_)
else:
return True
def test(self, exec=[], persist=False, cycles=None,
gdb=False, failure=None, disk=None, **args):
# build command
cmd = exec + ['./%s.test' % self.suite.path,
repr(self.caseno), repr(self.permno)]
# persist disk or keep in RAM for speed?
if persist:
if not disk:
disk = self.suite.path + '.disk'
if persist != 'noerase':
try:
with open(disk, 'w') as f:
f.truncate(0)
if args.get('verbose', False):
print('truncate --size=0', disk)
except FileNotFoundError:
pass
cmd.append(disk)
# simulate power-loss after n cycles?
if cycles:
cmd.append(str(cycles))
# failed? drop into debugger?
if gdb and failure:
ncmd = ['gdb']
if gdb == 'assert':
ncmd.extend(['-ex', 'r'])
if failure.assert_:
ncmd.extend(['-ex', 'up 2'])
elif gdb == 'main':
ncmd.extend([
'-ex', 'b %s:%d' % (self.suite.path, self.code_lineno),
'-ex', 'r'])
ncmd.extend(['--args'] + cmd)
if args.get('verbose', False):
print(' '.join(shlex.quote(c) for c in ncmd))
signal.signal(signal.SIGINT, signal.SIG_IGN)
sys.exit(sp.call(ncmd))
# run test case!
mpty, spty = pty.openpty()
if args.get('verbose', False):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
os.close(spty)
mpty = os.fdopen(mpty, 'r', 1)
stdout = []
assert_ = None
try:
while True:
try:
line = mpty.readline()
except OSError as e:
if e.errno == errno.EIO:
break
raise
stdout.append(line)
if args.get('verbose', False):
sys.stdout.write(line)
# intercept asserts
m = re.match(
'^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
.format('(?:\033\[[\d;]*.| )*', 'assert'),
line)
if m and assert_ is None:
try:
with open(m.group(1)) as f:
lineno = int(m.group(2))
line = (next(it.islice(f, lineno-1, None))
.strip('\n'))
assert_ = {
'path': m.group(1),
'line': line,
'lineno': lineno,
'message': m.group(3)}
except:
pass
except KeyboardInterrupt:
raise TestFailure(self, 1, stdout, None)
proc.wait()
# did we pass?
if proc.returncode != 0:
raise TestFailure(self, proc.returncode, stdout, assert_)
else:
return PASS
class ValgrindTestCase(TestCase):
def __init__(self, config, **args):
self.leaky = config.get('leaky', False)
super().__init__(config, **args)
def shouldtest(self, **args):
return not self.leaky and super().shouldtest(**args)
def test(self, exec=[], **args):
verbose = args.get('verbose', False)
uninit = (self.defines.get('LFS2_ERASE_VALUE', None) == -1)
exec = [
'valgrind',
'--leak-check=full',
] + (['--undef-value-errors=no'] if uninit else []) + [
] + (['--track-origins=yes'] if not uninit else []) + [
'--error-exitcode=4',
'--error-limit=no',
] + (['--num-callers=1'] if not verbose else []) + [
'-q'] + exec
return super().test(exec=exec, **args)
class ReentrantTestCase(TestCase):
def __init__(self, config, **args):
self.reentrant = config.get('reentrant', False)
super().__init__(config, **args)
def shouldtest(self, **args):
return self.reentrant and super().shouldtest(**args)
def test(self, persist=False, gdb=False, failure=None, **args):
for cycles in it.count(1):
# clear disk first?
if cycles == 1 and persist != 'noerase':
persist = 'erase'
else:
persist = 'noerase'
# exact cycle we should drop into debugger?
if gdb and failure and failure.cycleno == cycles:
return super().test(gdb=gdb, persist=persist, cycles=cycles,
failure=failure, **args)
# run tests, but kill the program after prog/erase has
# been hit n cycles. We exit with a special return code if the
# program has not finished, since this isn't a test failure.
try:
return super().test(persist=persist, cycles=cycles, **args)
except TestFailure as nfailure:
if nfailure.returncode == 33:
continue
else:
nfailure.cycleno = cycles
raise
class TestSuite:
def __init__(self, path, classes=[TestCase], defines={},
filter=None, **args):
self.name = os.path.basename(path)
if self.name.endswith('.toml'):
self.name = self.name[:-len('.toml')]
self.path = path
self.classes = classes
self.defines = defines.copy()
self.filter = filter
with open(path) as f:
# load tests
config = toml.load(f)
# find line numbers
f.seek(0)
linenos = []
code_linenos = []
for i, line in enumerate(f):
if re.match(r'\[\[\s*case\s*\]\]', line):
linenos.append(i+1)
if re.match(r'code\s*=\s*(\'\'\'|""")', line):
code_linenos.append(i+2)
code_linenos.reverse()
# grab global config
for k, v in config.get('define', {}).items():
if k not in self.defines:
self.defines[k] = v
self.code = config.get('code', None)
if self.code is not None:
self.code_lineno = code_linenos.pop()
# create initial test cases
self.cases = []
for i, (case, lineno) in enumerate(zip(config['case'], linenos)):
# code lineno?
if 'code' in case:
case['code_lineno'] = code_linenos.pop()
# merge conditions if necessary
if 'if' in config and 'if' in case:
case['if'] = '(%s) && (%s)' % (config['if'], case['if'])
elif 'if' in config:
case['if'] = config['if']
# initialize test case
self.cases.append(TestCase(case, filter=filter,
suite=self, caseno=i+1, lineno=lineno, **args))
def __str__(self):
return self.name
def __lt__(self, other):
return self.name < other.name
def permute(self, **args):
for case in self.cases:
# lets find all parameterized definitions, in one of [args.D,
# suite.defines, case.defines, DEFINES]. Note that each of these
# can be either a dict of defines, or a list of dicts, expressing
# an initial set of permutations.
pending = [{}]
for inits in [self.defines, case.defines, DEFINES]:
if not isinstance(inits, list):
inits = [inits]
npending = []
for init, pinit in it.product(inits, pending):
ninit = pinit.copy()
for k, v in init.items():
if k not in ninit:
try:
ninit[k] = eval(v)
except:
ninit[k] = v
npending.append(ninit)
pending = npending
# expand permutations
pending = list(reversed(pending))
expanded = []
while pending:
perm = pending.pop()
for k, v in sorted(perm.items()):
if not isinstance(v, str) and isinstance(v, abc.Iterable):
for nv in reversed(v):
nperm = perm.copy()
nperm[k] = nv
pending.append(nperm)
break
else:
expanded.append(perm)
# generate permutations
case.perms = []
for i, (class_, defines) in enumerate(
it.product(self.classes, expanded)):
case.perms.append(case.permute(
class_, defines, permno=i+1, **args))
# also track non-unique defines
case.defines = {}
for k, v in case.perms[0].defines.items():
if all(perm.defines[k] == v for perm in case.perms):
case.defines[k] = v
# track all perms and non-unique defines
self.perms = []
for case in self.cases:
self.perms.extend(case.perms)
self.defines = {}
for k, v in self.perms[0].defines.items():
if all(perm.defines.get(k, None) == v for perm in self.perms):
self.defines[k] = v
return self.perms
def build(self, **args):
# build test files
tf = open(self.path + '.test.c.t', 'w')
tf.write(GLOBALS)
if self.code is not None:
tf.write('#line %d "%s"\n' % (self.code_lineno, self.path))
tf.write(self.code)
tfs = {None: tf}
for case in self.cases:
if case.in_ not in tfs:
tfs[case.in_] = open(self.path+'.'+
case.in_.replace('/', '.')+'.t', 'w')
tfs[case.in_].write('#line 1 "%s"\n' % case.in_)
with open(case.in_) as f:
for line in f:
tfs[case.in_].write(line)
tfs[case.in_].write('\n')
tfs[case.in_].write(GLOBALS)
tfs[case.in_].write('\n')
case.build(tfs[case.in_], **args)
tf.write('\n')
tf.write('const char *lfs2_testbd_path;\n')
tf.write('uint32_t lfs2_testbd_cycles;\n')
tf.write('int main(int argc, char **argv) {\n')
tf.write(4*' '+'int case_ = (argc > 1) ? atoi(argv[1]) : 0;\n')
tf.write(4*' '+'int perm = (argc > 2) ? atoi(argv[2]) : 0;\n')
tf.write(4*' '+'lfs2_testbd_path = (argc > 3) ? argv[3] : NULL;\n')
tf.write(4*' '+'lfs2_testbd_cycles = (argc > 4) ? atoi(argv[4]) : 0;\n')
for perm in self.perms:
# test declaration
tf.write(4*' '+'extern void test_case%d(%s);\n' % (
perm.caseno, ', '.join(
'intmax_t %s' % k for k in sorted(perm.defines)
if k not in perm.case.defines)))
# test call
tf.write(4*' '+
'if (argc < 3 || (case_ == %d && perm == %d)) {'
' test_case%d(%s); '
'}\n' % (perm.caseno, perm.permno, perm.caseno, ', '.join(
str(v) for k, v in sorted(perm.defines.items())
if k not in perm.case.defines)))
tf.write('}\n')
for tf in tfs.values():
tf.close()
# write makefiles
with open(self.path + '.mk', 'w') as mk:
mk.write(RULES.replace(4*' ', '\t'))
mk.write('\n')
# add truely global defines globally
for k, v in sorted(self.defines.items()):
mk.write('%s: override CFLAGS += -D%s=%r\n' % (
self.path+'.test', k, v))
for path in tfs:
if path is None:
mk.write('%s: %s | %s\n' % (
self.path+'.test.c',
self.path,
self.path+'.test.c.t'))
else:
mk.write('%s: %s %s | %s\n' % (
self.path+'.'+path.replace('/', '.'),
self.path, path,
self.path+'.'+path.replace('/', '.')+'.t'))
mk.write('\t./scripts/explode_asserts.py $| -o $@\n')
self.makefile = self.path + '.mk'
self.target = self.path + '.test'
return self.makefile, self.target
def test(self, **args):
# run test suite!
if not args.get('verbose', True):
sys.stdout.write(self.name + ' ')
sys.stdout.flush()
for perm in self.perms:
if not perm.shouldtest(**args):
continue
try:
result = perm.test(**args)
except TestFailure as failure:
perm.result = failure
if not args.get('verbose', True):
sys.stdout.write(FAIL)
sys.stdout.flush()
if not args.get('keep_going', False):
if not args.get('verbose', True):
sys.stdout.write('\n')
raise
else:
perm.result = PASS
if not args.get('verbose', True):
sys.stdout.write(PASS)
sys.stdout.flush()
if not args.get('verbose', True):
sys.stdout.write('\n')
def main(**args):
# figure out explicit defines
defines = {}
for define in args['D']:
k, v, *_ = define.split('=', 2) + ['']
defines[k] = v
# and what class of TestCase to run
classes = []
if args.get('normal', False):
classes.append(TestCase)
if args.get('reentrant', False):
classes.append(ReentrantTestCase)
if args.get('valgrind', False):
classes.append(ValgrindTestCase)
if not classes:
classes = [TestCase]
suites = []
for testpath in args['testpaths']:
# optionally specified test case/perm
testpath, *filter = testpath.split('#')
filter = [int(f) for f in filter]
# figure out the suite's toml file
if os.path.isdir(testpath):
testpath = testpath + '/test_*.toml'
elif os.path.isfile(testpath):
testpath = testpath
elif testpath.endswith('.toml'):
testpath = TESTDIR + '/' + testpath
else:
testpath = TESTDIR + '/' + testpath + '.toml'
# find tests
for path in glob.glob(testpath):
suites.append(TestSuite(path, classes, defines, filter, **args))
# sort for reproducability
suites = sorted(suites)
# generate permutations
for suite in suites:
suite.permute(**args)
# build tests in parallel
print('====== building ======')
makefiles = []
targets = []
for suite in suites:
makefile, target = suite.build(**args)
makefiles.append(makefile)
targets.append(target)
cmd = (['make', '-f', 'Makefile'] +
list(it.chain.from_iterable(['-f', m] for m in makefiles)) +
[target for target in targets])
mpty, spty = pty.openpty()
if args.get('verbose', False):
print(' '.join(shlex.quote(c) for c in cmd))
proc = sp.Popen(cmd, stdout=spty, stderr=spty)
os.close(spty)
mpty = os.fdopen(mpty, 'r', 1)
stdout = []
while True:
try:
line = mpty.readline()
except OSError as e:
if e.errno == errno.EIO:
break
raise
stdout.append(line)
if args.get('verbose', False):
sys.stdout.write(line)
# intercept warnings
m = re.match(
'^{0}([^:]+):(\d+):(?:\d+:)?{0}{1}:{0}(.*)$'
.format('(?:\033\[[\d;]*.| )*', 'warning'),
line)
if m and not args.get('verbose', False):
try:
with open(m.group(1)) as f:
lineno = int(m.group(2))
line = next(it.islice(f, lineno-1, None)).strip('\n')
sys.stdout.write(
"\033[01m{path}:{lineno}:\033[01;35mwarning:\033[m "
"{message}\n{line}\n\n".format(
path=m.group(1), line=line, lineno=lineno,
message=m.group(3)))
except:
pass
proc.wait()
if proc.returncode != 0:
if not args.get('verbose', False):
for line in stdout:
sys.stdout.write(line)
sys.exit(-3)
print('built %d test suites, %d test cases, %d permutations' % (
len(suites),
sum(len(suite.cases) for suite in suites),
sum(len(suite.perms) for suite in suites)))
filtered = 0
for suite in suites:
for perm in suite.perms:
filtered += perm.shouldtest(**args)
if filtered != sum(len(suite.perms) for suite in suites):
print('filtered down to %d permutations' % filtered)
# only requested to build?
if args.get('build', False):
return 0
print('====== testing ======')
try:
for suite in suites:
suite.test(**args)
except TestFailure:
pass
print('====== results ======')
passed = 0
failed = 0
for suite in suites:
for perm in suite.perms:
if not hasattr(perm, 'result'):
continue
if perm.result == PASS:
passed += 1
else:
sys.stdout.write(
"\033[01m{path}:{lineno}:\033[01;31mfailure:\033[m "
"{perm} failed with {returncode}\n".format(
perm=perm, path=perm.suite.path, lineno=perm.lineno,
returncode=perm.result.returncode or 0))
if perm.result.stdout:
if perm.result.assert_:
stdout = perm.result.stdout[:-1]
else:
stdout = perm.result.stdout
for line in stdout[-5:]:
sys.stdout.write(line)
if perm.result.assert_:
sys.stdout.write(
"\033[01m{path}:{lineno}:\033[01;31massert:\033[m "
"{message}\n{line}\n".format(
**perm.result.assert_))
sys.stdout.write('\n')
failed += 1
if args.get('gdb', False):
failure = None
for suite in suites:
for perm in suite.perms:
if getattr(perm, 'result', PASS) != PASS:
failure = perm.result
if failure is not None:
print('======= gdb ======')
# drop into gdb
failure.case.test(failure=failure, **args)
sys.exit(0)
print('tests passed: %d' % passed)
print('tests failed: %d' % failed)
return 1 if failed > 0 else 0
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(
description="Run parameterized tests in various configurations.")
parser.add_argument('testpaths', nargs='*', default=[TESTDIR],
help="Description of test(s) to run. By default, this is all tests \
found in the \"{0}\" directory. Here, you can specify a different \
directory of tests, a specific file, a suite by name, and even a \
specific test case by adding brackets. For example \
\"test_dirs[0]\" or \"{0}/test_dirs.toml[0]\".".format(TESTDIR))
parser.add_argument('-D', action='append', default=[],
help="Overriding parameter definitions.")
parser.add_argument('-v', '--verbose', action='store_true',
help="Output everything that is happening.")
parser.add_argument('-k', '--keep-going', action='store_true',
help="Run all tests instead of stopping on first error. Useful for CI.")
parser.add_argument('-p', '--persist', choices=['erase', 'noerase'],
nargs='?', const='erase',
help="Store disk image in a file.")
parser.add_argument('-b', '--build', action='store_true',
help="Only build the tests, do not execute.")
parser.add_argument('-g', '--gdb', choices=['init', 'main', 'assert'],
nargs='?', const='assert',
help="Drop into gdb on test failure.")
parser.add_argument('--no-internal', action='store_true',
help="Don't run tests that require internal knowledge.")
parser.add_argument('-n', '--normal', action='store_true',
help="Run tests normally.")
parser.add_argument('-r', '--reentrant', action='store_true',
help="Run reentrant tests with simulated power-loss.")
parser.add_argument('-V', '--valgrind', action='store_true',
help="Run non-leaky tests under valgrind to check for memory leaks.")
parser.add_argument('-e', '--exec', default=[], type=lambda e: e.split(' '),
help="Run tests with another executable prefixed on the command line.")
parser.add_argument('-d', '--disk',
help="Specify a file to use for persistent/reentrant tests.")
sys.exit(main(**vars(parser.parse_args())))
|
yanchen036/tensorflow
|
refs/heads/master
|
tensorflow/python/data/kernel_tests/list_files_dataset_op_test.py
|
12
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import shutil
import tempfile
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class ListFilesDatasetOpTest(test.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir, ignore_errors=True)
def _touchTempFiles(self, filenames):
for filename in filenames:
open(path.join(self.tmp_dir, filename), 'a').close()
def testEmptyDirectory(self):
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))
with self.test_session() as sess:
itr = dataset.make_one_shot_iterator()
next_element = itr.get_next()
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testSimpleDirectory(self):
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(path.join(self.tmp_dir, '*'))
with self.test_session() as sess:
itr = dataset.make_one_shot_iterator()
next_element = itr.get_next()
full_filenames = []
produced_filenames = []
for filename in filenames:
full_filenames.append(
compat.as_bytes(path.join(self.tmp_dir, filename)))
produced_filenames.append(compat.as_bytes(sess.run(next_element)))
self.assertItemsEqual(full_filenames, produced_filenames)
with self.assertRaises(errors.OutOfRangeError):
sess.run(itr.get_next())
def testSimpleDirectoryNotShuffled(self):
filenames = ['b', 'c', 'a']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(
path.join(self.tmp_dir, '*'), shuffle=False)
with self.test_session() as sess:
itr = dataset.make_one_shot_iterator()
next_element = itr.get_next()
for filename in sorted(filenames):
self.assertEqual(compat.as_bytes(path.join(self.tmp_dir, filename)),
sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(itr.get_next())
def testFixedSeedResultsInRepeatableOrder(self):
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
dataset = dataset_ops.Dataset.list_files(
path.join(self.tmp_dir, '*'), shuffle=True, seed=37)
with self.test_session() as sess:
itr = dataset.make_initializable_iterator()
next_element = itr.get_next()
full_filenames = [compat.as_bytes(path.join(self.tmp_dir, filename))
for filename in filenames]
all_produced_filenames = []
for _ in range(3):
produced_filenames = []
sess.run(itr.initializer)
try:
while True:
produced_filenames.append(sess.run(next_element))
except errors.OutOfRangeError:
pass
all_produced_filenames.append(produced_filenames)
# Each run should produce the same set of filenames, which may be
# different from the order of `full_filenames`.
self.assertItemsEqual(full_filenames, all_produced_filenames[0])
# However, the different runs should produce filenames in the same order
# as each other.
self.assertEqual(all_produced_filenames[0], all_produced_filenames[1])
self.assertEqual(all_produced_filenames[0], all_produced_filenames[2])
def testEmptyDirectoryInitializer(self):
filename_placeholder = array_ops.placeholder(dtypes.string, shape=[])
dataset = dataset_ops.Dataset.list_files(filename_placeholder)
with self.test_session() as sess:
itr = dataset.make_initializable_iterator()
next_element = itr.get_next()
sess.run(
itr.initializer,
feed_dict={filename_placeholder: path.join(self.tmp_dir, '*')})
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
def testSimpleDirectoryInitializer(self):
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
filename_placeholder = array_ops.placeholder(dtypes.string, shape=[])
dataset = dataset_ops.Dataset.list_files(filename_placeholder)
with self.test_session() as sess:
itr = dataset.make_initializable_iterator()
next_element = itr.get_next()
sess.run(
itr.initializer,
feed_dict={filename_placeholder: path.join(self.tmp_dir, '*')})
full_filenames = []
produced_filenames = []
for filename in filenames:
full_filenames.append(
compat.as_bytes(path.join(self.tmp_dir, filename)))
produced_filenames.append(compat.as_bytes(sess.run(next_element)))
self.assertItemsEqual(full_filenames, produced_filenames)
with self.assertRaises(errors.OutOfRangeError):
sess.run(itr.get_next())
def testFileSuffixes(self):
filenames = ['a.txt', 'b.py', 'c.py', 'd.pyc']
self._touchTempFiles(filenames)
filename_placeholder = array_ops.placeholder(dtypes.string, shape=[])
dataset = dataset_ops.Dataset.list_files(filename_placeholder)
with self.test_session() as sess:
itr = dataset.make_initializable_iterator()
next_element = itr.get_next()
sess.run(
itr.initializer,
feed_dict={filename_placeholder: path.join(self.tmp_dir, '*.py')})
full_filenames = []
produced_filenames = []
for filename in filenames[1:-1]:
full_filenames.append(
compat.as_bytes(path.join(self.tmp_dir, filename)))
produced_filenames.append(compat.as_bytes(sess.run(next_element)))
self.assertItemsEqual(full_filenames, produced_filenames)
with self.assertRaises(errors.OutOfRangeError):
sess.run(itr.get_next())
def testFileMiddles(self):
filenames = ['a.txt', 'b.py', 'c.pyc']
self._touchTempFiles(filenames)
filename_placeholder = array_ops.placeholder(dtypes.string, shape=[])
dataset = dataset_ops.Dataset.list_files(filename_placeholder)
with self.test_session() as sess:
itr = dataset.make_initializable_iterator()
next_element = itr.get_next()
sess.run(
itr.initializer,
feed_dict={filename_placeholder: path.join(self.tmp_dir, '*.py*')})
full_filenames = []
produced_filenames = []
for filename in filenames[1:]:
full_filenames.append(
compat.as_bytes(path.join(self.tmp_dir, filename)))
produced_filenames.append(compat.as_bytes(sess.run(next_element)))
self.assertItemsEqual(full_filenames, produced_filenames)
with self.assertRaises(errors.OutOfRangeError):
sess.run(itr.get_next())
def testNoShuffle(self):
filenames = ['a', 'b', 'c']
self._touchTempFiles(filenames)
# Repeat the list twice and ensure that the order is the same each time.
# NOTE(mrry): This depends on an implementation detail of `list_files()`,
# which is that the list of files is captured when the iterator is
# initialized. Otherwise, or if e.g. the iterator were initialized more than
# once, it's possible that the non-determinism of `tf.matching_files()`
# would cause this test to fail. However, it serves as a useful confirmation
# that the `shuffle=False` argument is working as intended.
# TODO(b/73959787): Provide some ordering guarantees so that this test is
# more meaningful.
dataset = dataset_ops.Dataset.list_files(
path.join(self.tmp_dir, '*'), shuffle=False).repeat(2)
with self.test_session() as sess:
itr = dataset.make_one_shot_iterator()
next_element = itr.get_next()
full_filenames = []
produced_filenames = []
for filename in filenames * 2:
full_filenames.append(
compat.as_bytes(path.join(self.tmp_dir, filename)))
produced_filenames.append(compat.as_bytes(sess.run(next_element)))
with self.assertRaises(errors.OutOfRangeError):
sess.run(itr.get_next())
self.assertItemsEqual(full_filenames, produced_filenames)
self.assertEqual(produced_filenames[:len(filenames)],
produced_filenames[len(filenames):])
if __name__ == '__main__':
test.main()
|
wangcy6/storm_app
|
refs/heads/master
|
frame/c++/grpc-master/src/python/grpcio_reflection/grpc_version.py
|
3
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# AUTO-GENERATED FROM `$REPO_ROOT/templates/src/python/grpcio_reflection/grpc_version.py.template`!!!
VERSION='1.8.0.dev0'
|
google-code/android-scripting
|
refs/heads/master
|
python/src/Lib/test/test_future1.py
|
432
|
"""This is a test"""
# Import the name nested_scopes twice to trigger SF bug #407394 (regression).
from __future__ import nested_scopes, nested_scopes
def f(x):
def g(y):
return x + y
return g
result = f(2)(4)
|
yashodhank/erpnext
|
refs/heads/develop
|
erpnext/patches/v6_3/convert_applicable_territory.py
|
42
|
import frappe
def execute():
frappe.reload_doc("stock", "doctype", "price_list_country")
frappe.reload_doc("accounts", "doctype", "shipping_rule_country")
frappe.reload_doctype("Price List")
frappe.reload_doctype("Shipping Rule")
frappe.reload_doctype("shopping_cart", "doctype", "shopping_cart_settings")
# for price list
countries = frappe.db.sql_list("select name from tabCountry")
for doctype in ("Price List", "Shipping Rule"):
for at in frappe.db.sql("""select name, parent, territory from `tabApplicable Territory` where
parenttype = %s """, doctype, as_dict=True):
if at.territory in countries:
parent = frappe.get_doc(doctype, at.parent)
if not parent.countries:
parent.append("countries", {"country": at.territory})
parent.save()
frappe.delete_doc("DocType", "Applicable Territory")
|
wfxiang08/django190
|
refs/heads/master
|
tests/conditional_processing/urls.py
|
360
|
from django.conf.urls import url
from . import views
urlpatterns = [
url('^condition/$', views.index),
url('^condition/last_modified/$', views.last_modified_view1),
url('^condition/last_modified2/$', views.last_modified_view2),
url('^condition/etag/$', views.etag_view1),
url('^condition/etag2/$', views.etag_view2),
]
|
kemalakyol48/python-for-android
|
refs/heads/master
|
python3-alpha/extra_modules/gdata/apps/emailsettings/data.py
|
81
|
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data model classes for the Email Settings API."""
__author__ = 'Claudio Cherubino <ccherubino@google.com>'
import atom.data
import gdata.apps
import gdata.apps_property
import gdata.data
# This is required to work around a naming conflict between the Google
# Spreadsheets API and Python's built-in property function
pyproperty = property
# The apps:property label of the label property
LABEL_NAME = 'label'
# The apps:property from of the filter property
FILTER_FROM_NAME = 'from'
# The apps:property to of the filter property
FILTER_TO_NAME = 'to'
# The apps:property subject of the filter property
FILTER_SUBJECT_NAME = 'subject'
# The apps:property hasTheWord of the filter property
FILTER_HAS_THE_WORD_NAME = 'hasTheWord'
# The apps:property doesNotHaveTheWord of the filter property
FILTER_DOES_NOT_HAVE_THE_WORD_NAME = 'doesNotHaveTheWord'
# The apps:property hasAttachment of the filter property
FILTER_HAS_ATTACHMENTS_NAME = 'hasAttachment'
# The apps:property label of the filter action property
FILTER_LABEL = 'label'
# The apps:property shouldMarkAsRead of the filter action property
FILTER_MARK_AS_READ = 'shouldMarkAsRead'
# The apps:property shouldArchive of the filter action property
FILTER_ARCHIVE = 'shouldArchive'
# The apps:property name of the send-as alias property
SENDAS_ALIAS_NAME = 'name'
# The apps:property address of the send-as alias property
SENDAS_ALIAS_ADDRESS = 'address'
# The apps:property replyTo of the send-as alias property
SENDAS_ALIAS_REPLY_TO = 'replyTo'
# The apps:property makeDefault of the send-as alias property
SENDAS_ALIAS_MAKE_DEFAULT = 'makeDefault'
# The apps:property enable of the webclip property
WEBCLIP_ENABLE = 'enable'
# The apps:property enable of the forwarding property
FORWARDING_ENABLE = 'enable'
# The apps:property forwardTo of the forwarding property
FORWARDING_TO = 'forwardTo'
# The apps:property action of the forwarding property
FORWARDING_ACTION = 'action'
# The apps:property enable of the POP property
POP_ENABLE = 'enable'
# The apps:property enableFor of the POP property
POP_ENABLE_FOR = 'enableFor'
# The apps:property action of the POP property
POP_ACTION = 'action'
# The apps:property enable of the IMAP property
IMAP_ENABLE = 'enable'
# The apps:property enable of the vacation responder property
VACATION_RESPONDER_ENABLE = 'enable'
# The apps:property subject of the vacation responder property
VACATION_RESPONDER_SUBJECT = 'subject'
# The apps:property message of the vacation responder property
VACATION_RESPONDER_MESSAGE = 'message'
# The apps:property startDate of the vacation responder property
VACATION_RESPONDER_STARTDATE = 'startDate'
# The apps:property endDate of the vacation responder property
VACATION_RESPONDER_ENDDATE = 'endDate'
# The apps:property contactsOnly of the vacation responder property
VACATION_RESPONDER_CONTACTS_ONLY = 'contactsOnly'
# The apps:property domainOnly of the vacation responder property
VACATION_RESPONDER_DOMAIN_ONLY = 'domainOnly'
# The apps:property signature of the signature property
SIGNATURE_VALUE = 'signature'
# The apps:property language of the language property
LANGUAGE_TAG = 'language'
# The apps:property pageSize of the general settings property
GENERAL_PAGE_SIZE = 'pageSize'
# The apps:property shortcuts of the general settings property
GENERAL_SHORTCUTS = 'shortcuts'
# The apps:property arrows of the general settings property
GENERAL_ARROWS = 'arrows'
# The apps:prgdata.appsoperty snippets of the general settings property
GENERAL_SNIPPETS = 'snippets'
# The apps:property uniAppsProcode of the general settings property
GENERAL_UNICODE = 'unicode'
# The apps:property delegationId of the email delegation property
DELEGATION_ID = 'delegationId'
# The apps:property address of the email delegation property
DELEGATION_ADDRESS = 'address'
# The apps:property delegate of the email delegation property
DELEGATION_DELEGATE = 'delegate'
# The apps:property status of the email delegation property
DELEGATION_STATUS = 'status'
class EmailSettingsEntry(gdata.data.GDEntry):
"""Represents an Email Settings entry in object form."""
property = [gdata.apps_property.AppsProperty]
def _GetProperty(self, name):
"""Get the apps:property value with the given name.
Args:
name: string Name of the apps:property value to get.
Returns:
The apps:property value with the given name, or None if the name was
invalid.
"""
value = None
for p in self.property:
if p.name == name:
value = p.value
break
return value
def _SetProperty(self, name, value):
"""Set the apps:property value with the given name to the given value.
Args:
name: string Name of the apps:property value to set.
value: string Value to give the apps:property value with the given name.
"""
found = False
for i in range(len(self.property)):
if self.property[i].name == name:
self.property[i].value = value
found = True
break
if not found:
self.property.append(
gdata.apps_property.AppsProperty(name=name, value=value))
def find_edit_link(self):
return self.uri
class EmailSettingsLabel(EmailSettingsEntry):
"""Represents a Label in object form."""
def GetName(self):
"""Get the name of the Label object.
Returns:
The name of this Label object as a string or None.
"""
return self._GetProperty(LABEL_NAME)
def SetName(self, value):
"""Set the name of this Label object.
Args:
value: string The new label name to give this object.
"""
self._SetProperty(LABEL_NAME, value)
name = pyproperty(GetName, SetName)
def __init__(self, uri=None, name=None, *args, **kwargs):
"""Constructs a new EmailSettingsLabel object with the given arguments.
Args:
uri: string (optional) The uri of this object for HTTP requests.
name: string (optional) The name to give this new object.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsLabel, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if name:
self.name = name
class EmailSettingsFilter(EmailSettingsEntry):
"""Represents an Email Settings Filter in object form."""
def GetFrom(self):
"""Get the From value of the Filter object.
Returns:
The From value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_FROM_NAME)
def SetFrom(self, value):
"""Set the From value of this Filter object.
Args:
value: string The new From value to give this object.
"""
self._SetProperty(FILTER_FROM_NAME, value)
from_address = pyproperty(GetFrom, SetFrom)
def GetTo(self):
"""Get the To value of the Filter object.
Returns:
The To value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_TO_NAME)
def SetTo(self, value):
"""Set the To value of this Filter object.
Args:
value: string The new To value to give this object.
"""
self._SetProperty(FILTER_TO_NAME, value)
to_address = pyproperty(GetTo, SetTo)
def GetSubject(self):
"""Get the Subject value of the Filter object.
Returns:
The Subject value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_SUBJECT_NAME)
def SetSubject(self, value):
"""Set the Subject value of this Filter object.
Args:
value: string The new Subject value to give this object.
"""
self._SetProperty(FILTER_SUBJECT_NAME, value)
subject = pyproperty(GetSubject, SetSubject)
def GetHasTheWord(self):
"""Get the HasTheWord value of the Filter object.
Returns:
The HasTheWord value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_HAS_THE_WORD_NAME)
def SetHasTheWord(self, value):
"""Set the HasTheWord value of this Filter object.
Args:
value: string The new HasTheWord value to give this object.
"""
self._SetProperty(FILTER_HAS_THE_WORD_NAME, value)
has_the_word = pyproperty(GetHasTheWord, SetHasTheWord)
def GetDoesNotHaveTheWord(self):
"""Get the DoesNotHaveTheWord value of the Filter object.
Returns:
The DoesNotHaveTheWord value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_DOES_NOT_HAVE_THE_WORD_NAME)
def SetDoesNotHaveTheWord(self, value):
"""Set the DoesNotHaveTheWord value of this Filter object.
Args:
value: string The new DoesNotHaveTheWord value to give this object.
"""
self._SetProperty(FILTER_DOES_NOT_HAVE_THE_WORD_NAME, value)
does_not_have_the_word = pyproperty(GetDoesNotHaveTheWord,
SetDoesNotHaveTheWord)
def GetHasAttachments(self):
"""Get the HasAttachments value of the Filter object.
Returns:
The HasAttachments value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_HAS_ATTACHMENTS_NAME)
def SetHasAttachments(self, value):
"""Set the HasAttachments value of this Filter object.
Args:
value: string The new HasAttachments value to give this object.
"""
self._SetProperty(FILTER_HAS_ATTACHMENTS_NAME, value)
has_attachments = pyproperty(GetHasAttachments,
SetHasAttachments)
def GetLabel(self):
"""Get the Label value of the Filter object.
Returns:
The Label value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_LABEL)
def SetLabel(self, value):
"""Set the Label value of this Filter object.
Args:
value: string The new Label value to give this object.
"""
self._SetProperty(FILTER_LABEL, value)
label = pyproperty(GetLabel, SetLabel)
def GetMarkAsRead(self):
"""Get the MarkAsRead value of the Filter object.
Returns:
The MarkAsRead value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_MARK_AS_READ)
def SetMarkAsRead(self, value):
"""Set the MarkAsRead value of this Filter object.
Args:
value: string The new MarkAsRead value to give this object.
"""
self._SetProperty(FILTER_MARK_AS_READ, value)
mark_as_read = pyproperty(GetMarkAsRead, SetMarkAsRead)
def GetArchive(self):
"""Get the Archive value of the Filter object.
Returns:
The Archive value of this Filter object as a string or None.
"""
return self._GetProperty(FILTER_ARCHIVE)
def SetArchive(self, value):
"""Set the Archive value of this Filter object.
Args:
value: string The new Archive value to give this object.
"""
self._SetProperty(FILTER_ARCHIVE, value)
archive = pyproperty(GetArchive, SetArchive)
def __init__(self, uri=None, from_address=None, to_address=None,
subject=None, has_the_word=None, does_not_have_the_word=None,
has_attachments=None, label=None, mark_as_read=None,
archive=None, *args, **kwargs):
"""Constructs a new EmailSettingsFilter object with the given arguments.
Args:
uri: string (optional) The uri of this object for HTTP requests.
from_address: string (optional) The source email address for the filter.
to_address: string (optional) The destination email address for
the filter.
subject: string (optional) The value the email must have in its
subject to be filtered.
has_the_word: string (optional) The value the email must have in its
subject or body to be filtered.
does_not_have_the_word: string (optional) The value the email cannot
have in its subject or body to be filtered.
has_attachments: Boolean (optional) Whether or not the email must
have an attachment to be filtered.
label: string (optional) The name of the label to apply to
messages matching the filter criteria.
mark_as_read: Boolean (optional) Whether or not to mark messages
matching the filter criteria as read.
archive: Boolean (optional) Whether or not to move messages
matching to Archived state.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsFilter, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if from_address:
self.from_address = from_address
if to_address:
self.to_address = to_address
if subject:
self.subject = subject
if has_the_word:
self.has_the_word = has_the_word
if does_not_have_the_word:
self.does_not_have_the_word = does_not_have_the_word
if has_attachments is not None:
self.has_attachments = str(has_attachments)
if label:
self.label = label
if mark_as_read is not None:
self.mark_as_read = str(mark_as_read)
if archive is not None:
self.archive = str(archive)
class EmailSettingsSendAsAlias(EmailSettingsEntry):
"""Represents an Email Settings send-as Alias in object form."""
def GetName(self):
"""Get the Name of the send-as Alias object.
Returns:
The Name of this send-as Alias object as a string or None.
"""
return self._GetProperty(SENDAS_ALIAS_NAME)
def SetName(self, value):
"""Set the Name of this send-as Alias object.
Args:
value: string The new Name to give this object.
"""
self._SetProperty(SENDAS_ALIAS_NAME, value)
name = pyproperty(GetName, SetName)
def GetAddress(self):
"""Get the Address of the send-as Alias object.
Returns:
The Address of this send-as Alias object as a string or None.
"""
return self._GetProperty(SENDAS_ALIAS_ADDRESS)
def SetAddress(self, value):
"""Set the Address of this send-as Alias object.
Args:
value: string The new Address to give this object.
"""
self._SetProperty(SENDAS_ALIAS_ADDRESS, value)
address = pyproperty(GetAddress, SetAddress)
def GetReplyTo(self):
"""Get the ReplyTo address of the send-as Alias object.
Returns:
The ReplyTo address of this send-as Alias object as a string or None.
"""
return self._GetProperty(SENDAS_ALIAS_REPLY_TO)
def SetReplyTo(self, value):
"""Set the ReplyTo address of this send-as Alias object.
Args:
value: string The new ReplyTo address to give this object.
"""
self._SetProperty(SENDAS_ALIAS_REPLY_TO, value)
reply_to = pyproperty(GetReplyTo, SetReplyTo)
def GetMakeDefault(self):
"""Get the MakeDefault value of the send-as Alias object.
Returns:
The MakeDefault value of this send-as Alias object as a string or None.
"""
return self._GetProperty(SENDAS_ALIAS_MAKE_DEFAULT)
def SetMakeDefault(self, value):
"""Set the MakeDefault value of this send-as Alias object.
Args:
value: string The new MakeDefault value to give this object.
"""
self._SetProperty(SENDAS_ALIAS_MAKE_DEFAULT, value)
make_default = pyproperty(GetMakeDefault, SetMakeDefault)
def __init__(self, uri=None, name=None, address=None, reply_to=None,
make_default=None, *args, **kwargs):
"""Constructs a new EmailSettingsSendAsAlias object with the given
arguments.
Args:
uri: string (optional) The uri f this object for HTTP requests.
name: string (optional) The name that will appear in the "From" field
for this user.
address: string (optional) The email address that appears as the
origination address for emails sent by this user.
reply_to: string (optional) The address to be used as the reply-to
address in email sent using the alias.
make_default: Boolean (optional) Whether or not this alias should
become the default alias for this user.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsSendAsAlias, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if name:
self.name = name
if address:
self.address = address
if reply_to:
self.reply_to = reply_to
if make_default is not None:
self.make_default = str(make_default)
class EmailSettingsWebClip(EmailSettingsEntry):
"""Represents a WebClip in object form."""
def GetEnable(self):
"""Get the Enable value of the WebClip object.
Returns:
The Enable value of this WebClip object as a string or None.
"""
return self._GetProperty(WEBCLIP_ENABLE)
def SetEnable(self, value):
"""Set the Enable value of this WebClip object.
Args:
value: string The new Enable value to give this object.
"""
self._SetProperty(WEBCLIP_ENABLE, value)
enable = pyproperty(GetEnable, SetEnable)
def __init__(self, uri=None, enable=None, *args, **kwargs):
"""Constructs a new EmailSettingsWebClip object with the given arguments.
Args:
uri: string (optional) The uri of this object for HTTP requests.
enable: Boolean (optional) Whether to enable showing Web clips.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsWebClip, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if enable is not None:
self.enable = str(enable)
class EmailSettingsForwarding(EmailSettingsEntry):
"""Represents Forwarding settings in object form."""
def GetEnable(self):
"""Get the Enable value of the Forwarding object.
Returns:
The Enable value of this Forwarding object as a string or None.
"""
return self._GetProperty(FORWARDING_ENABLE)
def SetEnable(self, value):
"""Set the Enable value of this Forwarding object.
Args:
value: string The new Enable value to give this object.
"""
self._SetProperty(FORWARDING_ENABLE, value)
enable = pyproperty(GetEnable, SetEnable)
def GetForwardTo(self):
"""Get the ForwardTo value of the Forwarding object.
Returns:
The ForwardTo value of this Forwarding object as a string or None.
"""
return self._GetProperty(FORWARDING_TO)
def SetForwardTo(self, value):
"""Set the ForwardTo value of this Forwarding object.
Args:
value: string The new ForwardTo value to give this object.
"""
self._SetProperty(FORWARDING_TO, value)
forward_to = pyproperty(GetForwardTo, SetForwardTo)
def GetAction(self):
"""Get the Action value of the Forwarding object.
Returns:
The Action value of this Forwarding object as a string or None.
"""
return self._GetProperty(FORWARDING_ACTION)
def SetAction(self, value):
"""Set the Action value of this Forwarding object.
Args:
value: string The new Action value to give this object.
"""
self._SetProperty(FORWARDING_ACTION, value)
action = pyproperty(GetAction, SetAction)
def __init__(self, uri=None, enable=None, forward_to=None, action=None,
*args, **kwargs):
"""Constructs a new EmailSettingsForwarding object with the given arguments.
Args:
uri: string (optional) The uri of this object for HTTP requests.
enable: Boolean (optional) Whether to enable incoming email forwarding.
forward_to: string (optional) The address email will be forwarded to.
action: string (optional) The action to perform after forwarding an
email ("KEEP", "ARCHIVE", "DELETE").
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsForwarding, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if enable is not None:
self.enable = str(enable)
if forward_to:
self.forward_to = forward_to
if action:
self.action = action
class EmailSettingsPop(EmailSettingsEntry):
"""Represents POP settings in object form."""
def GetEnable(self):
"""Get the Enable value of the POP object.
Returns:
The Enable value of this POP object as a string or None.
"""
return self._GetProperty(POP_ENABLE)
def SetEnable(self, value):
"""Set the Enable value of this POP object.
Args:
value: string The new Enable value to give this object.
"""
self._SetProperty(POP_ENABLE, value)
enable = pyproperty(GetEnable, SetEnable)
def GetEnableFor(self):
"""Get the EnableFor value of the POP object.
Returns:
The EnableFor value of this POP object as a string or None.
"""
return self._GetProperty(POP_ENABLE_FOR)
def SetEnableFor(self, value):
"""Set the EnableFor value of this POP object.
Args:
value: string The new EnableFor value to give this object.
"""
self._SetProperty(POP_ENABLE_FOR, value)
enable_for = pyproperty(GetEnableFor, SetEnableFor)
def GetPopAction(self):
"""Get the Action value of the POP object.
Returns:
The Action value of this POP object as a string or None.
"""
return self._GetProperty(POP_ACTION)
def SetPopAction(self, value):
"""Set the Action value of this POP object.
Args:
value: string The new Action value to give this object.
"""
self._SetProperty(POP_ACTION, value)
action = pyproperty(GetPopAction, SetPopAction)
def __init__(self, uri=None, enable=None, enable_for=None,
action=None, *args, **kwargs):
"""Constructs a new EmailSettingsPOP object with the given arguments.
Args:
uri: string (optional) The uri of this object for HTTP requests.
enable: Boolean (optional) Whether to enable incoming POP3 access.
enable_for: string (optional) Whether to enable POP3 for all mail
("ALL_MAIL"), or mail from now on ("MAIL_FROM_NOW_ON").
action: string (optional) What Google Mail should do with its copy
of the email after it is retrieved using POP
("KEEP", "ARCHIVE", or "DELETE").
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsPop, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if enable is not None:
self.enable = str(enable)
if enable_for:
self.enable_for = enable_for
if action:
self.action = action
class EmailSettingsImap(EmailSettingsEntry):
"""Represents IMAP settings in object form."""
def GetEnable(self):
"""Get the Enable value of the IMAP object.
Returns:
The Enable value of this IMAP object as a string or None.
"""
return self._GetProperty(IMAP_ENABLE)
def SetEnable(self, value):
"""Set the Enable value of this IMAP object.
Args:
value: string The new Enable value to give this object.
"""
self._SetProperty(IMAP_ENABLE, value)
enable = pyproperty(GetEnable, SetEnable)
def __init__(self, uri=None, enable=None, *args, **kwargs):
"""Constructs a new EmailSettingsImap object with the given arguments.
Args:
uri: string (optional) The uri of this object for HTTP requests.
enable: Boolean (optional) Whether to enable IMAP access.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsImap, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if enable is not None:
self.enable = str(enable)
class EmailSettingsVacationResponder(EmailSettingsEntry):
"""Represents Vacation Responder settings in object form."""
def GetEnable(self):
"""Get the Enable value of the Vacation Responder object.
Returns:
The Enable value of this Vacation Responder object as a string or None.
"""
return self._GetProperty(VACATION_RESPONDER_ENABLE)
def SetEnable(self, value):
"""Set the Enable value of this Vacation Responder object.
Args:
value: string The new Enable value to give this object.
"""
self._SetProperty(VACATION_RESPONDER_ENABLE, value)
enable = pyproperty(GetEnable, SetEnable)
def GetSubject(self):
"""Get the Subject value of the Vacation Responder object.
Returns:
The Subject value of this Vacation Responder object as a string or None.
"""
return self._GetProperty(VACATION_RESPONDER_SUBJECT)
def SetSubject(self, value):
"""Set the Subject value of this Vacation Responder object.
Args:
value: string The new Subject value to give this object.
"""
self._SetProperty(VACATION_RESPONDER_SUBJECT, value)
subject = pyproperty(GetSubject, SetSubject)
def GetMessage(self):
"""Get the Message value of the Vacation Responder object.
Returns:
The Message value of this Vacation Responder object as a string or None.
"""
return self._GetProperty(VACATION_RESPONDER_MESSAGE)
def SetMessage(self, value):
"""Set the Message value of this Vacation Responder object.
Args:
value: string The new Message value to give this object.
"""
self._SetProperty(VACATION_RESPONDER_MESSAGE, value)
message = pyproperty(GetMessage, SetMessage)
def GetStartDate(self):
"""Get the StartDate value of the Vacation Responder object.
Returns:
The StartDate value of this Vacation Responder object as a
string(YYYY-MM-DD) or None.
"""
return self._GetProperty(VACATION_RESPONDER_STARTDATE)
def SetStartDate(self, value):
"""Set the StartDate value of this Vacation Responder object.
Args:
value: string The new StartDate value to give this object.
"""
self._SetProperty(VACATION_RESPONDER_STARTDATE, value)
start_date = pyproperty(GetStartDate, SetStartDate)
def GetEndDate(self):
"""Get the EndDate value of the Vacation Responder object.
Returns:
The EndDate value of this Vacation Responder object as a
string(YYYY-MM-DD) or None.
"""
return self._GetProperty(VACATION_RESPONDER_ENDDATE)
def SetEndDate(self, value):
"""Set the EndDate value of this Vacation Responder object.
Args:
value: string The new EndDate value to give this object.
"""
self._SetProperty(VACATION_RESPONDER_ENDDATE, value)
end_date = pyproperty(GetEndDate, SetEndDate)
def GetContactsOnly(self):
"""Get the ContactsOnly value of the Vacation Responder object.
Returns:
The ContactsOnly value of this Vacation Responder object as a
string or None.
"""
return self._GetProperty(VACATION_RESPONDER_CONTACTS_ONLY)
def SetContactsOnly(self, value):
"""Set the ContactsOnly value of this Vacation Responder object.
Args:
value: string The new ContactsOnly value to give this object.
"""
self._SetProperty(VACATION_RESPONDER_CONTACTS_ONLY, value)
contacts_only = pyproperty(GetContactsOnly, SetContactsOnly)
def GetDomainOnly(self):
"""Get the DomainOnly value of the Vacation Responder object.
Returns:
The DomainOnly value of this Vacation Responder object as a
string or None.
"""
return self._GetProperty(VACATION_RESPONDER_DOMAIN_ONLY)
def SetDomainOnly(self, value):
"""Set the DomainOnly value of this Vacation Responder object.
Args:
value: string The new DomainOnly value to give this object.
"""
self._SetProperty(VACATION_RESPONDER_DOMAIN_ONLY, value)
domain_only = pyproperty(GetDomainOnly, SetDomainOnly)
def __init__(self, uri=None, enable=None, subject=None,
message=None, start_date=None, end_date=None, contacts_only=None,
domain_only=None, *args, **kwargs):
"""Constructs a new EmailSettingsVacationResponder object with the
given arguments.
Args:
uri: string (optional) The uri of this object for HTTP requests.
enable: Boolean (optional) Whether to enable the vacation responder.
subject: string (optional) The subject line of the vacation responder
autoresponse.
message: string (optional) The message body of the vacation responder
autoresponse.
start_date: string (optional) The start date of the vacation responder
autoresponse
end_date: string (optional) The end date of the vacation responder
autoresponse
contacts_only: Boolean (optional) Whether to only send autoresponses
to known contacts.
domain_only: Boolean (optional) Whether to only send autoresponses
to users in the same primary domain .
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsVacationResponder, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if enable is not None:
self.enable = str(enable)
if subject:
self.subject = subject
if message:
self.message = message
if start_date:
self.start_date = start_date
if end_date:
self.end_date = end_date
if contacts_only is not None:
self.contacts_only = str(contacts_only)
if domain_only is not None:
self.domain_only = str(domain_only)
class EmailSettingsSignature(EmailSettingsEntry):
"""Represents a Signature in object form."""
def GetValue(self):
"""Get the value of the Signature object.
Returns:
The value of this Signature object as a string or None.
"""
value = self._GetProperty(SIGNATURE_VALUE)
if value == ' ': # hack to support empty signature
return ''
else:
return value
def SetValue(self, value):
"""Set the name of this Signature object.
Args:
value: string The new signature value to give this object.
"""
if value == '': # hack to support empty signature
value = ' '
self._SetProperty(SIGNATURE_VALUE, value)
signature_value = pyproperty(GetValue, SetValue)
def __init__(self, uri=None, signature=None, *args, **kwargs):
"""Constructs a new EmailSettingsSignature object with the given arguments.
Args:
uri: string (optional) The uri of this object for HTTP requests.
signature: string (optional) The signature to be appended to outgoing
messages.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsSignature, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if signature is not None:
self.signature_value = signature
class EmailSettingsLanguage(EmailSettingsEntry):
"""Represents Language Settings in object form."""
def GetLanguage(self):
"""Get the tag of the Language object.
Returns:
The tag of this Language object as a string or None.
"""
return self._GetProperty(LANGUAGE_TAG)
def SetLanguage(self, value):
"""Set the tag of this Language object.
Args:
value: string The new tag value to give this object.
"""
self._SetProperty(LANGUAGE_TAG, value)
language_tag = pyproperty(GetLanguage, SetLanguage)
def __init__(self, uri=None, language=None, *args, **kwargs):
"""Constructs a new EmailSettingsLanguage object with the given arguments.
Args:
uri: string (optional) The uri of this object for HTTP requests.
language: string (optional) The language tag for Google Mail's display
language.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsLanguage, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if language:
self.language_tag = language
class EmailSettingsGeneral(EmailSettingsEntry):
"""Represents General Settings in object form."""
def GetPageSize(self):
"""Get the Page Size value of the General Settings object.
Returns:
The Page Size value of this General Settings object as a string or None.
"""
return self._GetProperty(GENERAL_PAGE_SIZE)
def SetPageSize(self, value):
"""Set the Page Size value of this General Settings object.
Args:
value: string The new Page Size value to give this object.
"""
self._SetProperty(GENERAL_PAGE_SIZE, value)
page_size = pyproperty(GetPageSize, SetPageSize)
def GetShortcuts(self):
"""Get the Shortcuts value of the General Settings object.
Returns:
The Shortcuts value of this General Settings object as a string or None.
"""
return self._GetProperty(GENERAL_SHORTCUTS)
def SetShortcuts(self, value):
"""Set the Shortcuts value of this General Settings object.
Args:
value: string The new Shortcuts value to give this object.
"""
self._SetProperty(GENERAL_SHORTCUTS, value)
shortcuts = pyproperty(GetShortcuts, SetShortcuts)
def GetArrows(self):
"""Get the Arrows value of the General Settings object.
Returns:
The Arrows value of this General Settings object as a string or None.
"""
return self._GetProperty(GENERAL_ARROWS)
def SetArrows(self, value):
"""Set the Arrows value of this General Settings object.
Args:
value: string The new Arrows value to give this object.
"""
self._SetProperty(GENERAL_ARROWS, value)
arrows = pyproperty(GetArrows, SetArrows)
def GetSnippets(self):
"""Get the Snippets value of the General Settings object.
Returns:
The Snippets value of this General Settings object as a string or None.
"""
return self._GetProperty(GENERAL_SNIPPETS)
def SetSnippets(self, value):
"""Set the Snippets value of this General Settings object.
Args:
value: string The new Snippets value to give this object.
"""
self._SetProperty(GENERAL_SNIPPETS, value)
snippets = pyproperty(GetSnippets, SetSnippets)
def GetUnicode(self):
"""Get the Unicode value of the General Settings object.
Returns:
The Unicode value of this General Settings object as a string or None.
"""
return self._GetProperty(GENERAL_UNICODE)
def SetUnicode(self, value):
"""Set the Unicode value of this General Settings object.
Args:
value: string The new Unicode value to give this object.
"""
self._SetProperty(GENERAL_UNICODE, value)
use_unicode = pyproperty(GetUnicode, SetUnicode)
def __init__(self, uri=None, page_size=None, shortcuts=None,
arrows=None, snippets=None, use_unicode=None, *args, **kwargs):
"""Constructs a new EmailSettingsGeneral object with the given arguments.
Args:
uri: string (optional) The uri of this object for HTTP requests.
page_size: int (optional) The number of conversations to be shown per page.
shortcuts: Boolean (optional) Whether to enable keyboard shortcuts.
arrows: Boolean (optional) Whether to display arrow-shaped personal
indicators next to email sent specifically to the user.
snippets: Boolean (optional) Whether to display snippets of the messages
in the inbox and when searching.
use_unicode: Boolean (optional) Whether to use UTF-8 (unicode) encoding
for all outgoing messages.
args: The other parameters to pass to gdata.entry.GDEntry constructor.
kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
"""
super(EmailSettingsGeneral, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if page_size is not None:
self.page_size = str(page_size)
if shortcuts is not None:
self.shortcuts = str(shortcuts)
if arrows is not None:
self.arrows = str(arrows)
if snippets is not None:
self.snippets = str(snippets)
if use_unicode is not None:
self.use_unicode = str(use_unicode)
class EmailSettingsDelegation(EmailSettingsEntry):
"""Represents an Email Settings delegation entry in object form."""
def GetAddress(self):
"""Get the email address of the delegated user.
Returns:
The email address of the delegated user as a string or None.
"""
return self._GetProperty(DELEGATION_ADDRESS)
def SetAddress(self, value):
"""Set the email address of the delegated user.
Args:
value: string The email address of another user on the same domain
"""
self._SetProperty(DELEGATION_ADDRESS, value)
address = pyproperty(GetAddress, SetAddress)
def __init__(self, uri=None, address=None, *args, **kwargs):
"""Constructs a new EmailSettingsDelegation object with the given
arguments.
Args:
uri: string (optional) The uri of this object for HTTP requests.
address: string The email address of the delegated user.
"""
super(EmailSettingsDelegation, self).__init__(*args, **kwargs)
if uri:
self.uri = uri
if address:
self.address = address
class EmailSettingsLabelFeed(gdata.data.GDFeed):
"""Main feed containing a list of labels."""
entry = [EmailSettingsLabel]
class EmailSettingsSendAsAliasFeed(gdata.data.GDFeed):
"""Main feed containing a list of send-as aliases."""
entry = [EmailSettingsSendAsAlias]
class EmailSettingsDelegationFeed(gdata.data.GDFeed):
"""Main feed containing a list of email delegation entries."""
entry = [EmailSettingsDelegation]
|
emk/pyjamas
|
refs/heads/master
|
pygtkweb/demos/053-actions.py
|
13
|
#!/usr/bin/env python
import pygtk
pygtk.require('2.0')
import gtk
class ActionExample:
def __init__(self):
# Create the toplevel window
window = gtk.Window()
window.connect('destroy', lambda w: gtk.main_quit())
window.set_size_request(300, -1)
vbox = gtk.VBox()
window.add(vbox)
# Create an accelerator group
accelgroup = gtk.AccelGroup()
# Add the accelerator group to the toplevel window
window.add_accel_group(accelgroup)
# Create an ActionGroup named ActionExample
actiongroup = gtk.ActionGroup('ActionExample')
# Create an action for quitting the program using a stock item
quitaction = gtk.Action('Quit', '_Quit me!', 'Quit the Program',
gtk.STOCK_QUIT)
quitaction.set_property('short-label', '_Quit')
# Connect a callback to the action
quitaction.connect('activate', self.quit_cb)
# Add the action to the actiongroup with an accelerator
# None means use the stock item accelerator
actiongroup.add_action_with_accel(quitaction, None)
# Have the action use accelgroup
quitaction.set_accel_group(accelgroup)
# Connect the accelerator to the action
quitaction.connect_accelerator()
# Create a ToggleAction, etc.
muteaction = gtk.ToggleAction('Mute', '_Mute', 'Mute the volume', None)
actiongroup.add_action_with_accel(muteaction, '<Control>m')
muteaction.set_accel_group(accelgroup)
muteaction.connect_accelerator()
muteaction.connect('toggled', self.mute_cb)
# Create some RadioActions
amaction = gtk.RadioAction('AM', '_AM', 'AM Radio', None, 0)
actiongroup.add_action_with_accel(amaction, '<Control>a')
amaction.set_accel_group(accelgroup)
amaction.connect_accelerator()
amaction.set_active(True)
fmaction = gtk.RadioAction('FM', '_FM', 'FM Radio', None, 1)
actiongroup.add_action_with_accel(fmaction, '<Control>f')
fmaction.set_accel_group(accelgroup)
fmaction.connect_accelerator()
fmaction.set_group(amaction)
ssbaction = gtk.RadioAction('SSB', 'SS_B', 'Single Sideband Radio',
None, 2)
actiongroup.add_action_with_accel(ssbaction, '<Control>s')
ssbaction.set_accel_group(accelgroup)
ssbaction.connect_accelerator()
ssbaction.connect('changed', self.radioband_cb)
ssbaction.set_group(amaction)
# Create a MenuBar
menubar = gtk.MenuBar()
vbox.pack_start(menubar, False)
# Create the File Action and MenuItem
file_action = gtk.Action('File', '_File', None, None)
actiongroup.add_action(file_action)
file_menuitem = file_action.create_menu_item()
menubar.append(file_menuitem)
# Create the File Menu
file_menu = gtk.Menu()
file_menuitem.set_submenu(file_menu)
# Create a proxy MenuItem
quititem = quitaction.create_menu_item()
file_menu.append(quititem)
# Create and populate the Sound menu with a Mute menuitem
sound_action = gtk.Action('Sound', '_Sound', None, None)
actiongroup.add_action(sound_action)
sound_menuitem = sound_action.create_menu_item()
menubar.append(sound_menuitem)
sound_menu = gtk.Menu()
sound_menuitem.set_submenu(sound_menu)
muteitem = muteaction.create_menu_item()
sound_menu.append(muteitem)
# Create and populate the RadioBand menu
radioband_action = gtk.Action('RadioBand', '_Radio Band', None, None)
actiongroup.add_action(radioband_action)
radioband_menuitem = radioband_action.create_menu_item()
menubar.append(radioband_menuitem)
radioband_menu = gtk.Menu()
radioband_menuitem.set_submenu(radioband_menu)
amitem = amaction.create_menu_item()
radioband_menu.append(amitem)
fmitem = fmaction.create_menu_item()
radioband_menu.append(fmitem)
ssbitem = ssbaction.create_menu_item()
radioband_menu.append(ssbitem)
# Create a Toolbar
toolbar = gtk.Toolbar()
vbox.pack_start(toolbar, False)
# Create a proxy ToolItem
quittoolitem = quitaction.create_tool_item()
toolbar.insert(quittoolitem, 0)
# Create a separator
separator = gtk.SeparatorToolItem()
toolbar.insert(separator, -1)
# Create toggle and radio tool items and add to toolbar
mutetoolitem = muteaction.create_tool_item()
toolbar.insert(mutetoolitem, -1)
separator = gtk.SeparatorToolItem()
toolbar.insert(separator, -1)
amtoolitem = amaction.create_tool_item()
toolbar.insert(amtoolitem, -1)
fmtoolitem = fmaction.create_tool_item()
toolbar.insert(fmtoolitem, -1)
ssbtoolitem = ssbaction.create_tool_item()
toolbar.insert(ssbtoolitem, -1)
# Create and pack two Labels
label = gtk.Label('Sound is not muted')
vbox.pack_start(label)
self.mutelabel = label
label = gtk.Label('Radio band is AM')
vbox.pack_start(label)
self.bandlabel = label
# Create a button to use as another proxy widget
quitbutton = gtk.Button()
# add it to the window
vbox.pack_start(quitbutton, False)
# Connect the action to its proxy widget
quitaction.connect_proxy(quitbutton)
# Have to set tooltip after toolitems are added to toolbar
for action in actiongroup.list_actions():
action.set_property('tooltip', action.get_property('tooltip'))
tooltips = gtk.Tooltips()
tooltips.set_tip(quitbutton, quitaction.get_property('tooltip'))
window.show_all()
return
def mute_cb(self, action):
# action has not toggled yet
text = ('muted', 'not muted')[action.get_active()==False]
self.mutelabel.set_text('Sound is %s' % text)
return
def radioband_cb(self, action, current):
text = ('AM', 'FM', 'SSB')[action.get_current_value()]
self.bandlabel.set_text('Radio band is %s' % text)
return
def quit_cb(self, b):
print 'Quitting program'
gtk.main_quit()
if __name__ == '__main__':
ba = ActionExample()
gtk.main()
|
BT-fgarbely/odoo
|
refs/heads/8.0
|
addons/mass_mailing/models/mass_mailing_report.py
|
364
|
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
from openerp import tools
class MassMailingReport(osv.Model):
_name = 'mail.statistics.report'
_auto = False
_description = 'Mass Mailing Statistics'
_columns = {
'scheduled_date': fields.datetime('Scheduled Date', readonly=True),
'name': fields.char('Mass Mail', readonly=True),
'campaign': fields.char('Mass Mail Campaign', readonly=True),
'sent': fields.integer('Sent', readonly=True),
'delivered': fields.integer('Delivered', readonly=True),
'opened': fields.integer('Opened', readonly=True),
'bounced': fields.integer('Bounced', readonly=True),
'replied': fields.integer('Replied', readonly=True),
'state': fields.selection(
[('draft', 'Draft'), ('test', 'Tested'), ('done', 'Sent')],
string='Status', readonly=True,
),
'email_from': fields.char('From', readonly=True),
}
def init(self, cr):
"""Mass Mail Statistical Report: based on mail.mail.statistics that models the various
statistics collected for each mailing, and mail.mass_mailing model that models the
various mailing performed. """
tools.drop_view_if_exists(cr, 'mail_statistics_report')
cr.execute("""
CREATE OR REPLACE VIEW mail_statistics_report AS (
SELECT
min(ms.id) as id,
ms.scheduled as scheduled_date,
mm.name as name,
mc.name as campaign,
count(ms.bounced) as bounced,
count(ms.sent) as sent,
(count(ms.sent) - count(ms.bounced)) as delivered,
count(ms.opened) as opened,
count(ms.replied) as replied,
mm.state,
mm.email_from
FROM
mail_mail_statistics as ms
left join mail_mass_mailing as mm ON (ms.mass_mailing_id=mm.id)
left join mail_mass_mailing_campaign as mc ON (ms.mass_mailing_campaign_id=mc.id)
GROUP BY ms.scheduled, mm.name, mc.name, mm.state, mm.email_from
)""")
|
DavideCanton/Python3
|
refs/heads/master
|
provaqt/prova.py
|
1
|
__author__ = 'davide'
import sys
from PyQt4.QtGui import QMainWindow, QApplication, QMessageBox
from finestra import Ui_finestra
class ProvaFinestra(QMainWindow, Ui_finestra):
def __init__(self):
QMainWindow.__init__(self)
# Set up the user interface from Designer.
self.setupUi(self)
# Connect up the buttons.
self.saluta.clicked.connect(self.saluta_fun)
def saluta_fun(self):
name = self.name_edit.toPlainText()
QMessageBox.information(self, "Saluto", "Ciao, {}!".format(name))
if __name__ == "__main__":
app = QApplication(sys.argv)
window = ProvaFinestra()
window.show()
sys.exit(app.exec_())
|
st-tu-dresden/inloop
|
refs/heads/main
|
inloop/solutions/migrations/0003_add_scoped_id.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-02 14:35
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tasks', '0003_task_timestamps'),
('solutions', '0002_remove_filename'),
]
operations = [
migrations.AddField(
model_name='solution',
name='scoped_id',
field=models.PositiveIntegerField(null=True, editable=False, help_text='Solution id unique for task and author'),
preserve_default=False,
),
]
|
RicardoJohann/um
|
refs/heads/master
|
erpnext/setup/doctype/country/country.py
|
41
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Country(Document):
pass
|
3nids/QGIS
|
refs/heads/master
|
tests/src/python/test_qgslayoutpicture.py
|
30
|
# -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsLayoutPicture.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2017 by Nyall Dawson'
__date__ = '23/10/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
import os
import socketserver
import threading
import http.server
from qgis.PyQt.QtCore import QRectF, QDir
from qgis.PyQt.QtTest import QSignalSpy
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsLayoutItemPicture,
QgsLayout,
QgsLayoutItemMap,
QgsRectangle,
QgsCoordinateReferenceSystem,
QgsProject,
QgsReadWriteContext
)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
from qgslayoutchecker import QgsLayoutChecker
from test_qgslayoutitem import LayoutItemTestCase
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsLayoutPicture(unittest.TestCase, LayoutItemTestCase):
@classmethod
def setUpClass(cls):
cls.item_class = QgsLayoutItemPicture
# Bring up a simple HTTP server, for remote picture tests
os.chdir(unitTestDataPath() + '')
handler = http.server.SimpleHTTPRequestHandler
cls.httpd = socketserver.TCPServer(('localhost', 0), handler)
cls.port = cls.httpd.server_address[1]
cls.httpd_thread = threading.Thread(target=cls.httpd.serve_forever)
cls.httpd_thread.setDaemon(True)
cls.httpd_thread.start()
def __init__(self, methodName):
"""Run once on class initialization."""
unittest.TestCase.__init__(self, methodName)
TEST_DATA_DIR = unitTestDataPath()
self.pngImage = TEST_DATA_DIR + "/sample_image.png"
self.svgImage = TEST_DATA_DIR + "/sample_svg.svg"
# create composition
self.layout = QgsLayout(QgsProject.instance())
self.layout.initializeDefaults()
self.picture = QgsLayoutItemPicture(self.layout)
self.picture.setPicturePath(self.pngImage)
self.picture.attemptSetSceneRect(QRectF(70, 70, 100, 100))
self.picture.setFrameEnabled(True)
self.layout.addLayoutItem(self.picture)
def setUp(self):
self.report = "<h1>Python QgsLayoutItemPicture Tests</h1>\n"
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def testMode(self):
pic = QgsLayoutItemPicture(self.layout)
# should default to unknown
self.assertEquals(pic.mode(), QgsLayoutItemPicture.FormatUnknown)
spy = QSignalSpy(pic.changed)
pic.setMode(QgsLayoutItemPicture.FormatRaster)
self.assertEquals(pic.mode(), QgsLayoutItemPicture.FormatRaster)
self.assertEqual(len(spy), 1)
pic.setMode(QgsLayoutItemPicture.FormatRaster)
self.assertEqual(len(spy), 1)
pic.setMode(QgsLayoutItemPicture.FormatSVG)
self.assertEqual(len(spy), 3) # ideally only 2!
self.assertEquals(pic.mode(), QgsLayoutItemPicture.FormatSVG)
# set picture path without explicit format
pic.setPicturePath(self.pngImage)
self.assertEquals(pic.mode(), QgsLayoutItemPicture.FormatRaster)
pic.setPicturePath(self.svgImage)
self.assertEquals(pic.mode(), QgsLayoutItemPicture.FormatSVG)
# forced format
pic.setPicturePath(self.pngImage, QgsLayoutItemPicture.FormatSVG)
self.assertEquals(pic.mode(), QgsLayoutItemPicture.FormatSVG)
pic.setPicturePath(self.pngImage, QgsLayoutItemPicture.FormatRaster)
self.assertEquals(pic.mode(), QgsLayoutItemPicture.FormatRaster)
pic.setPicturePath(self.svgImage, QgsLayoutItemPicture.FormatSVG)
self.assertEquals(pic.mode(), QgsLayoutItemPicture.FormatSVG)
pic.setPicturePath(self.svgImage, QgsLayoutItemPicture.FormatRaster)
self.assertEquals(pic.mode(), QgsLayoutItemPicture.FormatRaster)
def testReadWriteXml(self):
pr = QgsProject()
l = QgsLayout(pr)
pic = QgsLayoutItemPicture(l)
# mode should be saved/restored
pic.setMode(QgsLayoutItemPicture.FormatRaster)
# save original item to xml
doc = QDomDocument("testdoc")
elem = doc.createElement("test")
self.assertTrue(pic.writeXml(elem, doc, QgsReadWriteContext()))
pic2 = QgsLayoutItemPicture(l)
self.assertTrue(pic2.readXml(elem.firstChildElement(), doc, QgsReadWriteContext()))
self.assertEqual(pic2.mode(), QgsLayoutItemPicture.FormatRaster)
pic.setMode(QgsLayoutItemPicture.FormatSVG)
elem = doc.createElement("test2")
self.assertTrue(pic.writeXml(elem, doc, QgsReadWriteContext()))
pic3 = QgsLayoutItemPicture(l)
self.assertTrue(pic3.readXml(elem.firstChildElement(), doc, QgsReadWriteContext()))
self.assertEqual(pic3.mode(), QgsLayoutItemPicture.FormatSVG)
def testResizeZoom(self):
"""Test picture resize zoom mode."""
self.picture.setResizeMode(QgsLayoutItemPicture.Zoom)
checker = QgsLayoutChecker('composerpicture_resize_zoom', self.layout)
checker.setControlPathPrefix("composer_picture")
testResult, message = checker.testLayout()
self.report += checker.report()
assert testResult, message
def testRemoteImage(self):
"""Test fetching remote picture."""
self.picture.setPicturePath(
'http://localhost:' + str(TestQgsLayoutPicture.port) + '/qgis_local_server/logo.png')
checker = QgsLayoutChecker('composerpicture_remote', self.layout)
checker.setControlPathPrefix("composer_picture")
testResult, message = checker.testLayout()
self.report += checker.report()
self.picture.setPicturePath(self.pngImage)
assert testResult, message
def testNorthArrowWithMapItemRotation(self):
"""Test picture rotation when map item is also rotated"""
layout = QgsLayout(QgsProject.instance())
map = QgsLayoutItemMap(layout)
map.setExtent(QgsRectangle(0, -256, 256, 0))
layout.addLayoutItem(map)
picture = QgsLayoutItemPicture(layout)
layout.addLayoutItem(picture)
picture.setLinkedMap(map)
self.assertEqual(picture.linkedMap(), map)
picture.setNorthMode(QgsLayoutItemPicture.GridNorth)
map.setItemRotation(45)
self.assertEqual(picture.pictureRotation(), 45)
map.setMapRotation(-34)
self.assertEqual(picture.pictureRotation(), 11)
# add an offset
picture.setNorthOffset(-10)
self.assertEqual(picture.pictureRotation(), 1)
map.setItemRotation(55)
self.assertEqual(picture.pictureRotation(), 11)
def testGridNorth(self):
"""Test syncing picture to grid north"""
layout = QgsLayout(QgsProject.instance())
map = QgsLayoutItemMap(layout)
map.setExtent(QgsRectangle(0, -256, 256, 0))
layout.addLayoutItem(map)
picture = QgsLayoutItemPicture(layout)
layout.addLayoutItem(picture)
picture.setLinkedMap(map)
self.assertEqual(picture.linkedMap(), map)
picture.setNorthMode(QgsLayoutItemPicture.GridNorth)
map.setMapRotation(45)
self.assertEqual(picture.pictureRotation(), 45)
# add an offset
picture.setNorthOffset(-10)
self.assertEqual(picture.pictureRotation(), 35)
def testTrueNorth(self):
"""Test syncing picture to true north"""
layout = QgsLayout(QgsProject.instance())
map = QgsLayoutItemMap(layout)
map.attemptSetSceneRect(QRectF(0, 0, 10, 10))
map.setCrs(QgsCoordinateReferenceSystem.fromEpsgId(3575))
map.setExtent(QgsRectangle(-2126029.962, -2200807.749, -119078.102, -757031.156))
layout.addLayoutItem(map)
picture = QgsLayoutItemPicture(layout)
layout.addLayoutItem(picture)
picture.setLinkedMap(map)
self.assertEqual(picture.linkedMap(), map)
picture.setNorthMode(QgsLayoutItemPicture.TrueNorth)
self.assertAlmostEqual(picture.pictureRotation(), 37.20, 1)
# shift map
map.setExtent(QgsRectangle(2120672.293, -3056394.691, 2481640.226, -2796718.780))
self.assertAlmostEqual(picture.pictureRotation(), -38.18, 1)
# rotate map
map.setMapRotation(45)
self.assertAlmostEqual(picture.pictureRotation(), -38.18 + 45, 1)
# add an offset
picture.setNorthOffset(-10)
self.assertAlmostEqual(picture.pictureRotation(), -38.18 + 35, 1)
if __name__ == '__main__':
unittest.main()
|
xuewei4d/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_mini_batch_kmeans.py
|
23
|
"""
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets import make_blobs
# #############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
# #############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
# #############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
# #############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
k_means_cluster_centers = k_means.cluster_centers_
order = pairwise_distances_argmin(k_means.cluster_centers_,
mbk.cluster_centers_)
mbk_means_cluster_centers = mbk.cluster_centers_[order]
k_means_labels = pairwise_distances_argmin(X, k_means_cluster_centers)
mbk_means_labels = pairwise_distances_argmin(X, mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == k
cluster_center = mbk_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for k in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == k))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
|
msebire/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyArgumentListInspection/TimetupleOnAssertedDate/_datetime.py
|
30
|
class date:
def timetuple(self):
pass
|
Seagate/swift
|
refs/heads/master
|
test/probe/__init__.py
|
52
|
from test import get_config
from swift.common.utils import config_true_value
config = get_config('probe_test')
CHECK_SERVER_TIMEOUT = int(config.get('check_server_timeout', 30))
VALIDATE_RSYNC = config_true_value(config.get('validate_rsync', False))
|
steromano87/Woodpecker
|
refs/heads/develop
|
setup.py
|
1
|
from setuptools import setup
import versioneer
def readme():
with open('README.rst') as f:
return f.read()
def requirements():
with open('requirements.txt') as f:
return f.readlines()
setup(
name='woodpecker',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Lightweight Load Test and Analysis Tool',
long_description=readme(),
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Information Technology',
' '.join(('License :: OSI Approved ::',
'GNU Lesser General Public License v3 (LGPLv3)')),
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing :: Traffic Generation',
'Topic :: System :: Networking'
],
keywords='load test http analysis loadrunner jmeter transaction',
url='https://github.com/steromano87/Woodpecker',
author='Stefano Romano\'',
author_email='rumix87@gmail.com',
license='LGPLv3',
packages=['woodpecker'],
package_data={'': ['*.sql']},
install_requires=requirements(),
include_package_data=True,
zip_safe=False
)
|
wrouesnel/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/cloudstack/cs_portforward.py
|
73
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_portforward
short_description: Manages port forwarding rules on Apache CloudStack based clouds.
description:
- Create, update and remove port forwarding rules.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
ip_address:
description:
- Public IP address the rule is assigned to.
required: true
vm:
description:
- Name of virtual machine which we make the port forwarding rule for.
- Required if C(state=present).
state:
description:
- State of the port forwarding rule.
default: present
choices: [ present, absent ]
protocol:
description:
- Protocol of the port forwarding rule.
default: tcp
choices: [ tcp, udp ]
public_port:
description:
- Start public port for this rule.
required: true
public_end_port:
description:
- End public port for this rule.
- If not specified equal C(public_port).
private_port:
description:
- Start private port for this rule.
required: true
private_end_port:
description:
- End private port for this rule.
- If not specified equal C(private_port).
open_firewall:
description:
- Whether the firewall rule for public port should be created, while creating the new rule.
- Use M(cs_firewall) for managing firewall rules.
default: false
vm_guest_ip:
description:
- VM guest NIC secondary IP address for the port forwarding rule.
default: false
network:
description:
- Name of the network.
version_added: "2.3"
vpc:
description:
- Name of the VPC.
version_added: "2.3"
domain:
description:
- Domain the C(vm) is related to.
account:
description:
- Account the C(vm) is related to.
project:
description:
- Name of the project the C(vm) is located in.
zone:
description:
- Name of the zone in which the virtual machine is in.
- If not set, default zone is used.
poll_async:
description:
- Poll async jobs until job has finished.
default: true
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "To delete all tags, set a empty list e.g. C(tags: [])."
aliases: [ tag ]
version_added: "2.4"
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: 1.2.3.4:80 -> web01:8080
local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: web01
public_port: 80
private_port: 8080
- name: forward SSH and open firewall
local_action:
module: cs_portforward
ip_address: '{{ public_ip }}'
vm: '{{ inventory_hostname }}'
public_port: '{{ ansible_ssh_port }}'
private_port: 22
open_firewall: true
- name: forward DNS traffic, but do not open firewall
local_action:
module: cs_portforward
ip_address: 1.2.3.4
vm: '{{ inventory_hostname }}'
public_port: 53
private_port: 53
protocol: udp
- name: remove ssh port forwarding
local_action:
module: cs_portforward
ip_address: 1.2.3.4
public_port: 22
private_port: 22
state: absent
'''
RETURN = '''
---
id:
description: UUID of the public IP address.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
ip_address:
description: Public IP address.
returned: success
type: string
sample: 1.2.3.4
protocol:
description: Protocol.
returned: success
type: string
sample: tcp
private_port:
description: Start port on the virtual machine's IP address.
returned: success
type: int
sample: 80
private_end_port:
description: End port on the virtual machine's IP address.
returned: success
type: int
public_port:
description: Start port on the public IP address.
returned: success
type: int
sample: 80
public_end_port:
description: End port on the public IP address.
returned: success
type: int
sample: 80
tags:
description: Tags related to the port forwarding.
returned: success
type: list
sample: []
vm_name:
description: Name of the virtual machine.
returned: success
type: string
sample: web-01
vm_display_name:
description: Display name of the virtual machine.
returned: success
type: string
sample: web-01
vm_guest_ip:
description: IP of the virtual machine.
returned: success
type: string
sample: 10.101.65.152
vpc:
description: Name of the VPC.
returned: success
type: string
sample: my_vpc
network:
description: Name of the network.
returned: success
type: string
sample: dmz
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import AnsibleCloudStack, cs_argument_spec, cs_required_together
class AnsibleCloudStackPortforwarding(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackPortforwarding, self).__init__(module)
self.returns = {
'virtualmachinedisplayname': 'vm_display_name',
'virtualmachinename': 'vm_name',
'ipaddress': 'ip_address',
'vmguestip': 'vm_guest_ip',
'publicip': 'public_ip',
'protocol': 'protocol',
}
# these values will be casted to int
self.returns_to_int = {
'publicport': 'public_port',
'publicendport': 'public_end_port',
'privateport': 'private_port',
'privateendport': 'private_end_port',
}
self.portforwarding_rule = None
def get_portforwarding_rule(self):
if not self.portforwarding_rule:
protocol = self.module.params.get('protocol')
public_port = self.module.params.get('public_port')
args = {
'ipaddressid': self.get_ip_address(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
}
portforwarding_rules = self.query_api('listPortForwardingRules', **args)
if portforwarding_rules and 'portforwardingrule' in portforwarding_rules:
for rule in portforwarding_rules['portforwardingrule']:
if (protocol == rule['protocol'] and
public_port == int(rule['publicport'])):
self.portforwarding_rule = rule
break
return self.portforwarding_rule
def present_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.update_portforwarding_rule(portforwarding_rule)
else:
portforwarding_rule = self.create_portforwarding_rule()
if portforwarding_rule:
portforwarding_rule = self.ensure_tags(resource=portforwarding_rule, resource_type='PortForwardingRule')
self.portforwarding_rule = portforwarding_rule
return portforwarding_rule
def create_portforwarding_rule(self):
args = {
'protocol': self.module.params.get('protocol'),
'publicport': self.module.params.get('public_port'),
'publicendport': self.get_or_fallback('public_end_port', 'public_port'),
'privateport': self.module.params.get('private_port'),
'privateendport': self.get_or_fallback('private_end_port', 'private_port'),
'openfirewall': self.module.params.get('open_firewall'),
'vmguestip': self.get_vm_guest_ip(),
'ipaddressid': self.get_ip_address(key='id'),
'virtualmachineid': self.get_vm(key='id'),
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'networkid': self.get_network(key='id'),
}
portforwarding_rule = None
self.result['changed'] = True
if not self.module.check_mode:
portforwarding_rule = self.query_api('createPortForwardingRule', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def update_portforwarding_rule(self, portforwarding_rule):
args = {
'protocol': self.module.params.get('protocol'),
'publicport': self.module.params.get('public_port'),
'publicendport': self.get_or_fallback('public_end_port', 'public_port'),
'privateport': self.module.params.get('private_port'),
'privateendport': self.get_or_fallback('private_end_port', 'private_port'),
'vmguestip': self.get_vm_guest_ip(),
'ipaddressid': self.get_ip_address(key='id'),
'virtualmachineid': self.get_vm(key='id'),
'networkid': self.get_network(key='id'),
}
if self.has_changed(args, portforwarding_rule):
self.result['changed'] = True
if not self.module.check_mode:
# API broken in 4.2.1?, workaround using remove/create instead of update
# portforwarding_rule = self.query_api('updatePortForwardingRule', **args)
self.absent_portforwarding_rule()
portforwarding_rule = self.query_api('createPortForwardingRule', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
portforwarding_rule = self.poll_job(portforwarding_rule, 'portforwardingrule')
return portforwarding_rule
def absent_portforwarding_rule(self):
portforwarding_rule = self.get_portforwarding_rule()
if portforwarding_rule:
self.result['changed'] = True
args = {
'id': portforwarding_rule['id'],
}
if not self.module.check_mode:
res = self.query_api('deletePortForwardingRule', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'portforwardingrule')
return portforwarding_rule
def get_result(self, portforwarding_rule):
super(AnsibleCloudStackPortforwarding, self).get_result(portforwarding_rule)
if portforwarding_rule:
for search_key, return_key in self.returns_to_int.items():
if search_key in portforwarding_rule:
self.result[return_key] = int(portforwarding_rule[search_key])
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
ip_address=dict(required=True),
protocol=dict(choices=['tcp', 'udp'], default='tcp'),
public_port=dict(type='int', required=True),
public_end_port=dict(type='int'),
private_port=dict(type='int', required=True),
private_end_port=dict(type='int'),
state=dict(choices=['present', 'absent'], default='present'),
open_firewall=dict(type='bool', default=False),
vm_guest_ip=dict(),
vm=dict(),
vpc=dict(),
network=dict(),
zone=dict(),
domain=dict(),
account=dict(),
project=dict(),
poll_async=dict(type='bool', default=True),
tags=dict(type='list', aliases=['tag']),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_pf = AnsibleCloudStackPortforwarding(module)
state = module.params.get('state')
if state in ['absent']:
pf_rule = acs_pf.absent_portforwarding_rule()
else:
pf_rule = acs_pf.present_portforwarding_rule()
result = acs_pf.get_result(pf_rule)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
neumerance/deploy
|
refs/heads/master
|
.venv/lib/python2.7/site-packages/django/contrib/localflavor/it/it_province.py
|
110
|
# -*- coding: utf-8 -*
from __future__ import unicode_literals
PROVINCE_CHOICES = (
('AG', 'Agrigento'),
('AL', 'Alessandria'),
('AN', 'Ancona'),
('AO', 'Aosta'),
('AR', 'Arezzo'),
('AP', 'Ascoli Piceno'),
('AT', 'Asti'),
('AV', 'Avellino'),
('BA', 'Bari'),
('BT', 'Barletta-Andria-Trani'), # active starting from 2009
('BL', 'Belluno'),
('BN', 'Benevento'),
('BG', 'Bergamo'),
('BI', 'Biella'),
('BO', 'Bologna'),
('BZ', 'Bolzano/Bozen'),
('BS', 'Brescia'),
('BR', 'Brindisi'),
('CA', 'Cagliari'),
('CL', 'Caltanissetta'),
('CB', 'Campobasso'),
('CI', 'Carbonia-Iglesias'),
('CE', 'Caserta'),
('CT', 'Catania'),
('CZ', 'Catanzaro'),
('CH', 'Chieti'),
('CO', 'Como'),
('CS', 'Cosenza'),
('CR', 'Cremona'),
('KR', 'Crotone'),
('CN', 'Cuneo'),
('EN', 'Enna'),
('FM', 'Fermo'), # active starting from 2009
('FE', 'Ferrara'),
('FI', 'Firenze'),
('FG', 'Foggia'),
('FC', 'Forlì-Cesena'),
('FR', 'Frosinone'),
('GE', 'Genova'),
('GO', 'Gorizia'),
('GR', 'Grosseto'),
('IM', 'Imperia'),
('IS', 'Isernia'),
('SP', 'La Spezia'),
('AQ', 'L’Aquila'),
('LT', 'Latina'),
('LE', 'Lecce'),
('LC', 'Lecco'),
('LI', 'Livorno'),
('LO', 'Lodi'),
('LU', 'Lucca'),
('MC', 'Macerata'),
('MN', 'Mantova'),
('MS', 'Massa-Carrara'),
('MT', 'Matera'),
('VS', 'Medio Campidano'),
('ME', 'Messina'),
('MI', 'Milano'),
('MO', 'Modena'),
('MB', 'Monza e Brianza'), # active starting from 2009
('NA', 'Napoli'),
('NO', 'Novara'),
('NU', 'Nuoro'),
('OG', 'Ogliastra'),
('OT', 'Olbia-Tempio'),
('OR', 'Oristano'),
('PD', 'Padova'),
('PA', 'Palermo'),
('PR', 'Parma'),
('PV', 'Pavia'),
('PG', 'Perugia'),
('PU', 'Pesaro e Urbino'),
('PE', 'Pescara'),
('PC', 'Piacenza'),
('PI', 'Pisa'),
('PT', 'Pistoia'),
('PN', 'Pordenone'),
('PZ', 'Potenza'),
('PO', 'Prato'),
('RG', 'Ragusa'),
('RA', 'Ravenna'),
('RC', 'Reggio Calabria'),
('RE', 'Reggio Emilia'),
('RI', 'Rieti'),
('RN', 'Rimini'),
('RM', 'Roma'),
('RO', 'Rovigo'),
('SA', 'Salerno'),
('SS', 'Sassari'),
('SV', 'Savona'),
('SI', 'Siena'),
('SR', 'Siracusa'),
('SO', 'Sondrio'),
('TA', 'Taranto'),
('TE', 'Teramo'),
('TR', 'Terni'),
('TO', 'Torino'),
('TP', 'Trapani'),
('TN', 'Trento'),
('TV', 'Treviso'),
('TS', 'Trieste'),
('UD', 'Udine'),
('VA', 'Varese'),
('VE', 'Venezia'),
('VB', 'Verbano Cusio Ossola'),
('VC', 'Vercelli'),
('VR', 'Verona'),
('VV', 'Vibo Valentia'),
('VI', 'Vicenza'),
('VT', 'Viterbo'),
)
|
dcroc16/skunk_works
|
refs/heads/master
|
google_appengine/lib/django-1.4/tests/modeltests/generic_relations/models.py
|
36
|
"""
34. Generic relations
Generic relations let an object have a foreign key to any object through a
content-type/object-id field. A ``GenericForeignKey`` field can point to any
object, be it animal, vegetable, or mineral.
The canonical example is tags (although this example implementation is *far*
from complete).
"""
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
class TaggedItem(models.Model):
"""A tag on an item."""
tag = models.SlugField()
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey()
class Meta:
ordering = ["tag", "content_type__name"]
def __unicode__(self):
return self.tag
class ValuableTaggedItem(TaggedItem):
value = models.PositiveIntegerField()
class Comparison(models.Model):
"""
A model that tests having multiple GenericForeignKeys
"""
comparative = models.CharField(max_length=50)
content_type1 = models.ForeignKey(ContentType, related_name="comparative1_set")
object_id1 = models.PositiveIntegerField()
content_type2 = models.ForeignKey(ContentType, related_name="comparative2_set")
object_id2 = models.PositiveIntegerField()
first_obj = generic.GenericForeignKey(ct_field="content_type1", fk_field="object_id1")
other_obj = generic.GenericForeignKey(ct_field="content_type2", fk_field="object_id2")
def __unicode__(self):
return u"%s is %s than %s" % (self.first_obj, self.comparative, self.other_obj)
class Animal(models.Model):
common_name = models.CharField(max_length=150)
latin_name = models.CharField(max_length=150)
tags = generic.GenericRelation(TaggedItem)
comparisons = generic.GenericRelation(Comparison,
object_id_field="object_id1",
content_type_field="content_type1")
def __unicode__(self):
return self.common_name
class Vegetable(models.Model):
name = models.CharField(max_length=150)
is_yucky = models.BooleanField(default=True)
tags = generic.GenericRelation(TaggedItem)
def __unicode__(self):
return self.name
class Mineral(models.Model):
name = models.CharField(max_length=150)
hardness = models.PositiveSmallIntegerField()
# note the lack of an explicit GenericRelation here...
def __unicode__(self):
return self.name
class GeckoManager(models.Manager):
def get_query_set(self):
return super(GeckoManager, self).get_query_set().filter(has_tail=True)
class Gecko(models.Model):
has_tail = models.BooleanField()
objects = GeckoManager()
|
yamahata/neutron
|
refs/heads/master
|
neutron/services/metering/agents/__init__.py
|
252
|
# Copyright (C) 2013 eNovance SAS <licensing@enovance.com>
#
# Author: Sylvain Afchain <sylvain.afchain@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
|
vitan/hue
|
refs/heads/master
|
desktop/core/src/desktop/api2.py
|
4
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
import json
import time
from collections import defaultdict
from django.core.urlresolvers import reverse
from django.utils import html
from django.utils.translation import ugettext as _
from desktop.lib.django_util import JsonResponse
from desktop.lib.i18n import force_unicode
from desktop.models import Document2, DocumentTag
LOG = logging.getLogger(__name__)
def get_document(request):
if request.GET.get('id'):
doc = Document2.objects.get(id=request.GET['id'])
else:
doc = Document2.objects.get(uuid=request.GET['uuid'])
response = _massage_doc_for_json(doc, request.user, with_data=request.GET.get('with_data'))
return JsonResponse(response)
def _massage_doc_for_json(document, user, with_data=False):
massaged_doc = {
'id': document.id,
'uuid': document.uuid,
'owner': document.owner.username,
'type': html.conditional_escape(document.type),
'name': html.conditional_escape(document.name),
'description': html.conditional_escape(document.description),
'isMine': document.owner == user,
'lastModified': document.last_modified.strftime("%x %X"),
'lastModifiedInMillis': time.mktime(document.last_modified.timetuple()),
'version': document.version,
'is_history': document.is_history,
# tags
# dependencies
}
if with_data:
massaged_doc['data'] = document.data_dict
return massaged_doc
|
vitan/hue
|
refs/heads/master
|
apps/hbase/src/hbase/server/__init__.py
|
646
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
rmoorman/sqlalchemy-utils
|
refs/heads/master
|
sqlalchemy_utils/utils.py
|
6
|
import sys
from collections import Iterable
import six
def str_coercible(cls):
if sys.version_info[0] >= 3: # Python 3
def __str__(self):
return self.__unicode__()
else: # Python 2
def __str__(self):
return self.__unicode__().encode('utf8')
cls.__str__ = __str__
return cls
def is_sequence(value):
return (
isinstance(value, Iterable) and not isinstance(value, six.string_types)
)
|
6112/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/html5lib/html5lib/filters/lint.py
|
979
|
from __future__ import absolute_import, division, unicode_literals
from gettext import gettext
_ = gettext
from . import _base
from ..constants import cdataElements, rcdataElements, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class LintError(Exception):
pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("StartTag not in PCDATA content model flag: %(tag)s") % {"tag": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if type == "StartTag" and name in voidElements:
raise LintError(_("Void element reported as StartTag token: %(tag)s") % {"tag": name})
elif type == "EmptyTag" and name not in voidElements:
raise LintError(_("Non-void element reported as EmptyTag token: %(tag)s") % {"tag": token["name"]})
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": name})
if not name:
raise LintError(_("Empty attribute name"))
if not isinstance(value, str):
raise LintError(_("Attribute value is not a string: %(value)r") % {"value": value})
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if name in voidElements:
raise LintError(_("Void element reported as EndTag token: %(tag)s") % {"tag": name})
start_name = open_elements.pop()
if start_name != name:
raise LintError(_("EndTag (%(end)s) does not match StartTag (%(start)s)") % {"end": name, "start": start_name})
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError(_("Comment not in PCDATA content model flag"))
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": data})
if not data:
raise LintError(_("%(type)s token with empty data") % {"type": type})
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError(_("Non-space character(s) found in SpaceCharacters token: %(token)r") % {"token": data})
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("Doctype not in PCDATA content model flag: %(name)s") % {"name": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError(_("Unknown token type: %(type)s") % {"type": type})
yield token
|
hchen13/ethanbit
|
refs/heads/master
|
projects/admin.py
|
1
|
from django.contrib import admin
from .models import *
admin.site.register(Project)
|
dhp-denero/LibrERP
|
refs/heads/master
|
l10n_it_sale/__init__.py
|
2
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010 Associazione OpenERP Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import stock
from . import sale
from . import wizard
|
brettwooldridge/buck
|
refs/heads/master
|
third-party/py/unittest2/unittest2/loader.py
|
139
|
"""Loading unittests."""
import os
import re
import sys
import traceback
import types
import unittest
from fnmatch import fnmatch
from unittest2 import case, suite
try:
from os.path import relpath
except ImportError:
from unittest2.compatibility import relpath
__unittest = True
def _CmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) == -1
return K
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s' % name
if hasattr(traceback, 'format_exc'):
# Python 2.3 compatibility
# format_exc returns two frames of discover.py as well
message += '\n%s' % traceback.format_exc()
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
class TestLoader(unittest.TestLoader):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = cmp
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite."
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception, e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, unittest.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.UnboundMethodType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
return self.suiteClass([parent(obj.__name__)])
elif isinstance(obj, unittest.TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, unittest.TestSuite):
return test
elif isinstance(test, unittest.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
hasattr(getattr(testCaseClass, attrname), '__call__')
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__)))
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_name_from_path(self, path):
path = os.path.splitext(os.path.normpath(path))[0]
_relpath = relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = os.path.splitext(mod_file)[0]
fullpath_noext = os.path.splitext(full_path)[0]
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception, e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
def findTestCases(module, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
|
EraYaN/CouchPotatoServer
|
refs/heads/master
|
couchpotato/core/downloaders/synology.py
|
34
|
import json
import traceback
from couchpotato.core._base.downloader.main import DownloaderBase
from couchpotato.core.helpers.encoding import isInt
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
import requests
log = CPLog(__name__)
autoload = 'Synology'
class Synology(DownloaderBase):
protocol = ['nzb', 'torrent', 'torrent_magnet']
status_support = False
def download(self, data = None, media = None, filedata = None):
"""
Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
response = False
log.error('Sending "%s" (%s) to Synology.', (data['name'], data['protocol']))
# Load host from config and split out port.
host = cleanHost(self.conf('host'), protocol = False).split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
try:
# Send request to Synology
srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'), self.conf('destination'))
if data['protocol'] == 'torrent_magnet':
log.info('Adding torrent URL %s', data['url'])
response = srpc.create_task(url = data['url'])
elif data['protocol'] in ['nzb', 'torrent']:
log.info('Adding %s' % data['protocol'])
if not filedata:
log.error('No %s data found', data['protocol'])
else:
filename = data['name'] + '.' + data['protocol']
response = srpc.create_task(filename = filename, filedata = filedata)
except:
log.error('Exception while adding torrent: %s', traceback.format_exc())
finally:
return self.downloadReturnId('') if response else False
def test(self):
""" Check if connection works
:return: bool
"""
host = cleanHost(self.conf('host'), protocol = False).split(':')
try:
srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'))
test_result = srpc.test()
except:
return False
return test_result
def getEnabledProtocol(self):
if self.conf('use_for') == 'both':
return super(Synology, self).getEnabledProtocol()
elif self.conf('use_for') == 'torrent':
return ['torrent', 'torrent_magnet']
else:
return ['nzb']
def isEnabled(self, manual = False, data = None):
if not data: data = {}
for_protocol = ['both']
if data and 'torrent' in data.get('protocol'):
for_protocol.append('torrent')
elif data:
for_protocol.append(data.get('protocol'))
return super(Synology, self).isEnabled(manual, data) and\
((self.conf('use_for') in for_protocol))
class SynologyRPC(object):
"""SynologyRPC lite library"""
def __init__(self, host = 'localhost', port = 5000, username = None, password = None, destination = None):
super(SynologyRPC, self).__init__()
self.download_url = 'http://%s:%s/webapi/DownloadStation/task.cgi' % (host, port)
self.auth_url = 'http://%s:%s/webapi/auth.cgi' % (host, port)
self.sid = None
self.username = username
self.password = password
self.destination = destination
self.session_name = 'DownloadStation'
def _login(self):
if self.username and self.password:
args = {'api': 'SYNO.API.Auth', 'account': self.username, 'passwd': self.password, 'version': 2,
'method': 'login', 'session': self.session_name, 'format': 'sid'}
response = self._req(self.auth_url, args)
if response['success']:
self.sid = response['data']['sid']
log.debug('sid=%s', self.sid)
else:
log.error('Couldn\'t login to Synology, %s', response)
return response['success']
else:
log.error('User or password missing, not using authentication.')
return False
def _logout(self):
args = {'api':'SYNO.API.Auth', 'version':1, 'method':'logout', 'session':self.session_name, '_sid':self.sid}
return self._req(self.auth_url, args)
def _req(self, url, args, files = None):
response = {'success': False}
try:
req = requests.post(url, data = args, files = files, verify = False)
req.raise_for_status()
response = json.loads(req.text)
if response['success']:
log.info('Synology action successfull')
return response
except requests.ConnectionError as err:
log.error('Synology connection error, check your config %s', err)
except requests.HTTPError as err:
log.error('SynologyRPC HTTPError: %s', err)
except Exception as err:
log.error('Exception: %s', err)
finally:
return response
def create_task(self, url = None, filename = None, filedata = None):
""" Creates new download task in Synology DownloadStation. Either specify
url or pair (filename, filedata).
Returns True if task was created, False otherwise
"""
result = False
# login
if self._login():
args = {'api': 'SYNO.DownloadStation.Task',
'version': '1',
'method': 'create',
'_sid': self.sid}
if self.destination and len(self.destination) > 0:
args['destination'] = self.destination
if url:
log.info('Login success, adding torrent URI')
args['uri'] = url
response = self._req(self.download_url, args = args)
log.info('Response: %s', response)
result = response['success']
elif filename and filedata:
log.info('Login success, adding torrent')
files = {'file': (filename, filedata)}
response = self._req(self.download_url, args = args, files = files)
log.info('Response: %s', response)
result = response['success']
else:
log.error('Invalid use of SynologyRPC.create_task: either url or filename+filedata must be specified')
self._logout()
return result
def test(self):
return bool(self._login())
config = [{
'name': 'synology',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'synology',
'label': 'Synology',
'description': 'Use <a href="http://www.synology.com/dsm/home_home_applications_download_station.php" target="_blank">Synology Download Station</a> to download.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'nzb,torrent',
},
{
'name': 'host',
'default': 'localhost:5000',
'description': 'Hostname with port. Usually <strong>localhost:5000</strong>',
},
{
'name': 'username',
},
{
'name': 'password',
'type': 'password',
},
{
'name': 'destination',
'description': 'Specify <strong>existing</strong> destination share to where your files will be downloaded, usually <strong>Downloads</strong>',
'advanced': True,
},
{
'name': 'use_for',
'label': 'Use for',
'default': 'both',
'type': 'dropdown',
'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')],
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
],
}
],
}]
|
piyush1911/git-cola
|
refs/heads/master
|
cola/version.py
|
11
|
# Copyright (c) David Aguilar
"""Provide git-cola's version number"""
from __future__ import division, absolute_import, unicode_literals
import os
import sys
if __name__ == '__main__':
srcdir = os.path.dirname(os.path.dirname(__file__))
sys.path.insert(1, srcdir)
from cola.git import git
from cola.git import STDOUT
from cola.decorators import memoize
from cola._version import VERSION
# minimum version requirements
_versions = {
# git-diff learned --patience in 1.6.2
# git-mergetool learned --no-prompt in 1.6.2
# git-difftool moved out of contrib in git 1.6.3
'git': '1.6.3',
'python': '2.6',
'pyqt': '4.4',
'pyqt_qrunnable': '4.4',
'diff-submodule': '1.6.6',
}
def get(key):
"""Returns an entry from the known versions table"""
return _versions.get(key)
def version():
"""Returns the current version"""
return VERSION
@memoize
def check_version(min_ver, ver):
"""Check whether ver is greater or equal to min_ver
"""
min_ver_list = version_to_list(min_ver)
ver_list = version_to_list(ver)
return min_ver_list <= ver_list
@memoize
def check(key, ver):
"""Checks if a version is greater than the known version for <what>"""
return check_version(get(key), ver)
def version_to_list(version):
"""Convert a version string to a list of numbers or strings
"""
ver_list = []
for p in version.split('.'):
try:
n = int(p)
except ValueError:
n = p
ver_list.append(n)
return ver_list
@memoize
def git_version_str():
"""Returns the current GIT version"""
return git.version()[STDOUT].strip()
@memoize
def git_version():
"""Returns the current GIT version"""
parts = git_version_str().split()
if parts and len(parts) >= 3:
return parts[2]
else:
# minimum supported version
return '1.6.3'
def print_version(brief=False):
if brief:
print('%s' % version())
else:
print('cola version %s' % version())
if __name__ == '__main__':
print(version())
|
JorgeCoock/django
|
refs/heads/master
|
tests/model_validation/models.py
|
260
|
from django.db import models
class ThingItem(object):
def __init__(self, value, display):
self.value = value
self.display = display
def __iter__(self):
return (x for x in [self.value, self.display])
def __len__(self):
return 2
class Things(object):
def __iter__(self):
return (x for x in [ThingItem(1, 2), ThingItem(3, 4)])
class ThingWithIterableChoices(models.Model):
# Testing choices= Iterable of Iterables
# See: https://code.djangoproject.com/ticket/20430
thing = models.CharField(max_length=100, blank=True, choices=Things())
class Meta:
# Models created as unmanaged as these aren't ever queried
managed = False
class ManyToManyRel(models.Model):
thing1 = models.ManyToManyField(ThingWithIterableChoices, related_name='+')
thing2 = models.ManyToManyField(ThingWithIterableChoices, related_name='+')
class Meta:
# Models created as unmanaged as these aren't ever queried
managed = False
class FKRel(models.Model):
thing1 = models.ForeignKey(ThingWithIterableChoices, models.CASCADE, related_name='+')
thing2 = models.ForeignKey(ThingWithIterableChoices, models.CASCADE, related_name='+')
class Meta:
# Models created as unmanaged as these aren't ever queried
managed = False
|
ByStudent666/XsCrypto
|
refs/heads/master
|
XsCrypto/Des.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'ByStudent'
from Crypto.Cipher import DES
def de_DES(c,key,mode,iv=0):
"""
:param c: 密文
:param key: 密钥(8byte)
:param mode: 加密方式(ecb/cbc)
:param iv: iv
:return: 明文
"""
if mode == 'ecb':
cipher = DES.new(key, DES.MODE_ECB)
m = cipher.decrypt(c)
return m
else:
cipher = DES.new(key, DES.MODE_CBC,iv)
m = cipher.decrypt(c)
return m
def de_DES_baopo(c,key1=0,key2_l=0):
"""
在已知部分key或不知key的情况下爆破key解密DES
:param c: 密文
:param key1: 已知key
:param key2_l: 未知key长度
:return: 明文
"""
if key2_l == 1:
for x1 in range(256):
key = str(key1) + chr(x1)
cipher = DES.new(key, DES.MODE_ECB)
m = cipher.decrypt(c)
return "key="+str(key)+'\n'+"m="+m
elif key2_l == 2:
for x1 in range(256):
for x2 in range(256):
key = str(key1) + chr(x1) + chr(x2)
cipher = DES.new(key, DES.MODE_ECB)
m = cipher.decrypt(c)
return "key=" + str(key) + '\n' + "m=" + m
elif key2_l == 3:
for x1 in range(256):
for x2 in range(256):
for x3 in range(256):
key = str(key1) + chr(x1) + chr(x2) + chr(x3)
cipher = DES.new(key, DES.MODE_ECB)
m = cipher.decrypt(c)
return "key=" + str(key) + '\n' + "m=" + m
elif key2_l == 4:
for x1 in range(256):
for x2 in range(256):
for x3 in range(256):
for x4 in range(256):
key = str(key1) + chr(x1) + chr(x2) + chr(x3) + chr(x4)
cipher = DES.new(key, DES.MODE_ECB)
m = cipher.decrypt(c)
return "key=" + str(key) + '\n' + "m=" + m
elif key2_l == 5:
for x1 in range(256):
for x2 in range(256):
for x3 in range(256):
for x4 in range(256):
for x5 in range(256):
key = str(key1) + chr(x1) + chr(x2) + chr(x3) + chr(x4) + chr(x5)
cipher = DES.new(key, DES.MODE_ECB)
m = cipher.decrypt(c)
return "key=" + str(key) + '\n' + "m=" + m
elif key2_l == 6:
for x1 in range(256):
for x2 in range(256):
for x3 in range(256):
for x4 in range(256):
for x5 in range(256):
for x6 in range(256):
key = str(key1) + chr(x1) + chr(x2) + chr(x3) + chr(x4) + chr(x5) + chr(x6)
cipher = DES.new(key, DES.MODE_ECB)
m = cipher.decrypt(c)
return "key=" + str(key) + '\n' + "m=" + m
elif key2_l == 7:
for x1 in range(256):
for x2 in range(256):
for x3 in range(256):
for x4 in range(256):
for x5 in range(256):
for x6 in range(256):
for x7 in range(256):
key = str(key1) + chr(x1) + chr(x2) + chr(x3) + chr(x4) + chr(x5) + chr(x6) +chr(x7)
cipher = DES.new(key, DES.MODE_ECB)
m = cipher.decrypt(c)
return "key=" + str(key) + '\n' + "m=" + m
else:
for x1 in range(256):
for x2 in range(256):
for x3 in range(256):
for x4 in range(256):
for x5 in range(256):
for x6 in range(256):
for x7 in range(256):
for x8 in range(256):
key = str(key1) + chr(x1) + chr(x2) + chr(x3) + chr(x4) + chr(x5) + chr(x6) +chr(x7) + chr(x8)
cipher = DES.new(key, DES.MODE_ECB)
m = cipher.decrypt(c)
return "key=" + str(key) + '\n' + "m=" + m
def en_DES(m,key,mode,iv=0):
"""
:param m: 明文
:param key: 密钥(8byte)
:param mode: 加密方式(ecb/cbc)
:param iv: iv
:return: 密文
"""
if mode == 'ecb':
message = DES.new(key, DES.MODE_ECB)
c = message.encrypt(m)
return c
else:
message = DES.new(key, DES.MODE_CBC,iv)
c = message.encrypt(m)
return c
|
moutai/scikit-learn
|
refs/heads/master
|
sklearn/datasets/setup.py
|
24
|
import numpy
import os
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('datasets', parent_package, top_path)
config.add_data_dir('data')
config.add_data_dir('descr')
config.add_data_dir('images')
config.add_data_dir(os.path.join('tests', 'data'))
config.add_extension('_svmlight_format',
sources=['_svmlight_format.c'],
include_dirs=[numpy.get_include()])
config.add_subpackage('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
chudaol/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/tests/test_entrance_exam.py
|
10
|
"""
Tests use cases related to LMS Entrance Exam behavior, such as gated content access (TOC)
"""
from mock import patch, Mock
from django.core.urlresolvers import reverse
from nose.plugins.attrib import attr
from courseware.model_data import FieldDataCache
from courseware.module_render import toc_for_course, get_module
from courseware.tests.factories import UserFactory, InstructorFactory, StaffFactory
from courseware.tests.helpers import (
LoginEnrollmentTestCase,
get_request_for_user
)
from courseware.entrance_exams import (
course_has_entrance_exam,
get_entrance_exam_content,
get_entrance_exam_score,
user_can_skip_entrance_exam,
user_has_passed_entrance_exam,
)
from student.models import CourseEnrollment
from student.tests.factories import CourseEnrollmentFactory, AnonymousUserFactory
from util.milestones_helpers import (
add_milestone,
add_course_milestone,
get_namespace_choices,
generate_milestone_namespace,
add_course_content_milestone,
get_milestone_relationship_types,
seed_milestone_relationship_types,
)
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@attr('shard_1')
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True})
class EntranceExamTestCases(LoginEnrollmentTestCase, ModuleStoreTestCase):
"""
Check that content is properly gated.
Creates a test course from scratch. The tests below are designed to execute
workflows regardless of the feature flag settings.
"""
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': True, 'MILESTONES_APP': True})
def setUp(self):
"""
Test case scaffolding
"""
super(EntranceExamTestCases, self).setUp()
self.course = CourseFactory.create(
metadata={
'entrance_exam_enabled': True,
}
)
self.chapter = ItemFactory.create(
parent=self.course,
display_name='Overview'
)
ItemFactory.create(
parent=self.chapter,
display_name='Welcome'
)
ItemFactory.create(
parent=self.course,
category='chapter',
display_name="Week 1"
)
self.chapter_subsection = ItemFactory.create(
parent=self.chapter,
category='sequential',
display_name="Lesson 1"
)
chapter_vertical = ItemFactory.create(
parent=self.chapter_subsection,
category='vertical',
display_name='Lesson 1 Vertical - Unit 1'
)
ItemFactory.create(
parent=chapter_vertical,
category="problem",
display_name="Problem - Unit 1 Problem 1"
)
ItemFactory.create(
parent=chapter_vertical,
category="problem",
display_name="Problem - Unit 1 Problem 2"
)
ItemFactory.create(
category="instructor",
parent=self.course,
data="Instructor Tab",
display_name="Instructor"
)
self.entrance_exam = ItemFactory.create(
parent=self.course,
category="chapter",
display_name="Entrance Exam Section - Chapter 1",
is_entrance_exam=True,
in_entrance_exam=True
)
self.exam_1 = ItemFactory.create(
parent=self.entrance_exam,
category='sequential',
display_name="Exam Sequential - Subsection 1",
graded=True,
in_entrance_exam=True
)
subsection = ItemFactory.create(
parent=self.exam_1,
category='vertical',
display_name='Exam Vertical - Unit 1'
)
self.problem_1 = ItemFactory.create(
parent=subsection,
category="problem",
display_name="Exam Problem - Problem 1"
)
self.problem_2 = ItemFactory.create(
parent=subsection,
category="problem",
display_name="Exam Problem - Problem 2"
)
seed_milestone_relationship_types()
add_entrance_exam_milestone(self.course, self.entrance_exam)
self.course.entrance_exam_enabled = True
self.course.entrance_exam_minimum_score_pct = 0.50
self.course.entrance_exam_id = unicode(self.entrance_exam.scope_ids.usage_id)
self.anonymous_user = AnonymousUserFactory()
self.request = get_request_for_user(UserFactory())
modulestore().update_item(self.course, self.request.user.id) # pylint: disable=no-member
self.client.login(username=self.request.user.username, password="test")
CourseEnrollment.enroll(self.request.user, self.course.id)
self.expected_locked_toc = (
[
{
'active': True,
'sections': [
{
'url_name': u'Exam_Sequential_-_Subsection_1',
'display_name': u'Exam Sequential - Subsection 1',
'graded': True,
'format': '',
'due': None,
'active': True
}
],
'url_name': u'Entrance_Exam_Section_-_Chapter_1',
'display_name': u'Entrance Exam Section - Chapter 1'
}
]
)
self.expected_unlocked_toc = (
[
{
'active': False,
'sections': [
{
'url_name': u'Welcome',
'display_name': u'Welcome',
'graded': False,
'format': '',
'due': None,
'active': False
},
{
'url_name': u'Lesson_1',
'display_name': u'Lesson 1',
'graded': False,
'format': '',
'due': None,
'active': False
}
],
'url_name': u'Overview',
'display_name': u'Overview'
},
{
'active': False,
'sections': [],
'url_name': u'Week_1',
'display_name': u'Week 1'
},
{
'active': False,
'sections': [],
'url_name': u'Instructor',
'display_name': u'Instructor'
},
{
'active': True,
'sections': [
{
'url_name': u'Exam_Sequential_-_Subsection_1',
'display_name': u'Exam Sequential - Subsection 1',
'graded': True,
'format': '',
'due': None,
'active': True
}
],
'url_name': u'Entrance_Exam_Section_-_Chapter_1',
'display_name': u'Entrance Exam Section - Chapter 1'
}
]
)
def test_view_redirect_if_entrance_exam_required(self):
"""
Unit Test: if entrance exam is required. Should return a redirect.
"""
url = reverse('courseware', kwargs={'course_id': unicode(self.course.id)})
expected_url = reverse('courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.entrance_exam.location.name,
'section': self.exam_1.location.name
})
resp = self.client.get(url)
self.assertRedirects(resp, expected_url, status_code=302, target_status_code=200)
@patch.dict('django.conf.settings.FEATURES', {'ENTRANCE_EXAMS': False})
def test_entrance_exam_content_absence(self):
"""
Unit Test: If entrance exam is not enabled then page should be redirected with chapter contents.
"""
url = reverse('courseware', kwargs={'course_id': unicode(self.course.id)})
expected_url = reverse('courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.chapter.location.name,
'section': self.chapter_subsection.location.name
})
resp = self.client.get(url)
self.assertRedirects(resp, expected_url, status_code=302, target_status_code=200)
resp = self.client.get(expected_url)
self.assertNotIn('Exam Problem - Problem 1', resp.content)
self.assertNotIn('Exam Problem - Problem 2', resp.content)
def test_entrance_exam_content_presence(self):
"""
Unit Test: If entrance exam is enabled then its content e.g. problems should be loaded and redirection will
occur with entrance exam contents.
"""
url = reverse('courseware', kwargs={'course_id': unicode(self.course.id)})
expected_url = reverse('courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.entrance_exam.location.name,
'section': self.exam_1.location.name
})
resp = self.client.get(url)
self.assertRedirects(resp, expected_url, status_code=302, target_status_code=200)
resp = self.client.get(expected_url)
self.assertIn('Exam Problem - Problem 1', resp.content)
self.assertIn('Exam Problem - Problem 2', resp.content)
def test_get_entrance_exam_content(self):
"""
test get entrance exam content method
"""
exam_chapter = get_entrance_exam_content(self.request, self.course)
self.assertEqual(exam_chapter.url_name, self.entrance_exam.url_name)
self.assertFalse(user_has_passed_entrance_exam(self.request, self.course))
answer_entrance_exam_problem(self.course, self.request, self.problem_1)
answer_entrance_exam_problem(self.course, self.request, self.problem_2)
exam_chapter = get_entrance_exam_content(self.request, self.course)
self.assertEqual(exam_chapter, None)
self.assertTrue(user_has_passed_entrance_exam(self.request, self.course))
def test_entrance_exam_score(self):
"""
test entrance exam score. we will hit the method get_entrance_exam_score to verify exam score.
"""
exam_score = get_entrance_exam_score(self.request, self.course)
self.assertEqual(exam_score, 0)
answer_entrance_exam_problem(self.course, self.request, self.problem_1)
answer_entrance_exam_problem(self.course, self.request, self.problem_2)
exam_score = get_entrance_exam_score(self.request, self.course)
# 50 percent exam score should be achieved.
self.assertGreater(exam_score * 100, 50)
def test_entrance_exam_requirement_message(self):
"""
Unit Test: entrance exam requirement message should be present in response
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.entrance_exam.location.name,
'section': self.exam_1.location.name
}
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn('To access course materials, you must score', resp.content)
def test_entrance_exam_requirement_message_hidden(self):
"""
Unit Test: entrance exam message should not be present outside the context of entrance exam subsection.
"""
# Login as staff to avoid redirect to entrance exam
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
self.client.login(username=staff_user.username, password='test')
CourseEnrollment.enroll(staff_user, self.course.id)
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.chapter.location.name,
'section': self.chapter_subsection.location.name
}
)
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertNotIn('To access course materials, you must score', resp.content)
self.assertNotIn('You have passed the entrance exam.', resp.content)
def test_entrance_exam_passed_message_and_course_content(self):
"""
Unit Test: exam passing message and rest of the course section should be present
when user achieves the entrance exam milestone/pass the exam.
"""
url = reverse(
'courseware_section',
kwargs={
'course_id': unicode(self.course.id),
'chapter': self.entrance_exam.location.name,
'section': self.exam_1.location.name
}
)
answer_entrance_exam_problem(self.course, self.request, self.problem_1)
answer_entrance_exam_problem(self.course, self.request, self.problem_2)
resp = self.client.get(url)
self.assertNotIn('To access course materials, you must score', resp.content)
self.assertIn('You have passed the entrance exam.', resp.content)
self.assertIn('Lesson 1', resp.content)
def test_entrance_exam_gating(self):
"""
Unit Test: test_entrance_exam_gating
"""
# This user helps to cover a discovered bug in the milestone fulfillment logic
chaos_user = UserFactory()
locked_toc = self._return_table_of_contents()
for toc_section in self.expected_locked_toc:
self.assertIn(toc_section, locked_toc)
# Set up the chaos user
answer_entrance_exam_problem(self.course, self.request, self.problem_1, chaos_user)
answer_entrance_exam_problem(self.course, self.request, self.problem_1)
answer_entrance_exam_problem(self.course, self.request, self.problem_2)
unlocked_toc = self._return_table_of_contents()
for toc_section in self.expected_unlocked_toc:
self.assertIn(toc_section, unlocked_toc)
def test_skip_entrance_exam_gating(self):
"""
Tests gating is disabled if skip entrance exam is set for a user.
"""
# make sure toc is locked before allowing user to skip entrance exam
locked_toc = self._return_table_of_contents()
for toc_section in self.expected_locked_toc:
self.assertIn(toc_section, locked_toc)
# hit skip entrance exam api in instructor app
instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=instructor.username, password='test')
url = reverse('mark_student_can_skip_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.request.user.email,
})
self.assertEqual(response.status_code, 200)
unlocked_toc = self._return_table_of_contents()
for toc_section in self.expected_unlocked_toc:
self.assertIn(toc_section, unlocked_toc)
def test_entrance_exam_gating_for_staff(self):
"""
Tests gating is disabled if user is member of staff.
"""
# Login as member of staff
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
staff_user.is_staff = True
self.client.login(username=staff_user.username, password='test')
# assert staff has access to all toc
self.request.user = staff_user
unlocked_toc = self._return_table_of_contents()
for toc_section in self.expected_unlocked_toc:
self.assertIn(toc_section, unlocked_toc)
@patch('courseware.entrance_exams.user_has_passed_entrance_exam', Mock(return_value=False))
def test_courseware_page_access_without_passing_entrance_exam(self):
"""
Test courseware access page without passing entrance exam
"""
url = reverse(
'courseware_chapter',
kwargs={'course_id': unicode(self.course.id), 'chapter': self.chapter.url_name}
)
response = self.client.get(url)
redirect_url = reverse('courseware', args=[unicode(self.course.id)])
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=302)
response = self.client.get(redirect_url)
exam_url = response.get('Location')
self.assertRedirects(response, exam_url)
@patch('courseware.entrance_exams.user_has_passed_entrance_exam', Mock(return_value=False))
def test_courseinfo_page_access_without_passing_entrance_exam(self):
"""
Test courseware access page without passing entrance exam
"""
url = reverse('info', args=[unicode(self.course.id)])
response = self.client.get(url)
redirect_url = reverse('courseware', args=[unicode(self.course.id)])
self.assertRedirects(response, redirect_url, status_code=302, target_status_code=302)
response = self.client.get(redirect_url)
exam_url = response.get('Location')
self.assertRedirects(response, exam_url)
@patch('courseware.entrance_exams.user_has_passed_entrance_exam', Mock(return_value=True))
def test_courseware_page_access_after_passing_entrance_exam(self):
"""
Test courseware access page after passing entrance exam
"""
# Mocking get_required_content with empty list to assume user has passed entrance exam
self._assert_chapter_loaded(self.course, self.chapter)
@patch('util.milestones_helpers.get_required_content', Mock(return_value=['a value']))
def test_courseware_page_access_with_staff_user_without_passing_entrance_exam(self):
"""
Test courseware access page without passing entrance exam but with staff user
"""
self.logout()
staff_user = StaffFactory.create(course_key=self.course.id)
self.login(staff_user.email, 'test')
CourseEnrollmentFactory(user=staff_user, course_id=self.course.id)
self._assert_chapter_loaded(self.course, self.chapter)
def test_courseware_page_access_with_staff_user_after_passing_entrance_exam(self):
"""
Test courseware access page after passing entrance exam but with staff user
"""
self.logout()
staff_user = StaffFactory.create(course_key=self.course.id)
self.login(staff_user.email, 'test')
CourseEnrollmentFactory(user=staff_user, course_id=self.course.id)
self._assert_chapter_loaded(self.course, self.chapter)
@patch.dict("django.conf.settings.FEATURES", {'ENTRANCE_EXAMS': False})
def test_courseware_page_access_when_entrance_exams_disabled(self):
"""
Test courseware page access when ENTRANCE_EXAMS feature is disabled
"""
self._assert_chapter_loaded(self.course, self.chapter)
def test_can_skip_entrance_exam_with_anonymous_user(self):
"""
Test can_skip_entrance_exam method with anonymous user
"""
self.assertFalse(user_can_skip_entrance_exam(self.request, self.anonymous_user, self.course))
def test_has_passed_entrance_exam_with_anonymous_user(self):
"""
Test has_passed_entrance_exam method with anonymous user
"""
self.request.user = self.anonymous_user
self.assertFalse(user_has_passed_entrance_exam(self.request, self.course))
def test_course_has_entrance_exam_missing_exam_id(self):
course = CourseFactory.create(
metadata={
'entrance_exam_enabled': True,
}
)
self.assertFalse(course_has_entrance_exam(course))
def test_user_has_passed_entrance_exam_short_circuit_missing_exam(self):
course = CourseFactory.create(
)
self.assertTrue(user_has_passed_entrance_exam(self.request, course))
def _assert_chapter_loaded(self, course, chapter):
"""
Asserts courseware chapter load successfully.
"""
url = reverse(
'courseware_chapter',
kwargs={'course_id': unicode(course.id), 'chapter': chapter.url_name}
)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def _return_table_of_contents(self):
"""
Returns table of content for the entrance exam specific to this test
Returns the table of contents for course self.course, for chapter
self.entrance_exam, and for section self.exam1
"""
self.field_data_cache = FieldDataCache.cache_for_descriptor_descendents( # pylint: disable=attribute-defined-outside-init
self.course.id,
self.request.user,
self.entrance_exam
)
return toc_for_course(
self.request.user,
self.request,
self.course,
self.entrance_exam.url_name,
self.exam_1.url_name,
self.field_data_cache
)
def answer_entrance_exam_problem(course, request, problem, user=None):
"""
Takes a required milestone `problem` in a `course` and fulfills it.
Args:
course (Course): Course object, the course the required problem is in
request (Request): request Object
problem (xblock): xblock object, the problem to be fulfilled
user (User): User object in case it is different from request.user
"""
if not user:
user = request.user
# pylint: disable=maybe-no-member,no-member
grade_dict = {'value': 1, 'max_value': 1, 'user_id': user.id}
field_data_cache = FieldDataCache.cache_for_descriptor_descendents(
course.id,
user,
course,
depth=2
)
# pylint: disable=protected-access
module = get_module(
user,
request,
problem.scope_ids.usage_id,
field_data_cache,
)._xmodule
module.system.publish(problem, 'grade', grade_dict)
def add_entrance_exam_milestone(course, entrance_exam):
"""
Adds the milestone for given `entrance_exam` in `course`
Args:
course (Course): Course object in which the extrance_exam is located
entrance_exam (xblock): the entrance exam to be added as a milestone
"""
namespace_choices = get_namespace_choices()
milestone_relationship_types = get_milestone_relationship_types()
milestone_namespace = generate_milestone_namespace(
namespace_choices.get('ENTRANCE_EXAM'),
course.id
)
milestone = add_milestone(
{
'name': 'Test Milestone',
'namespace': milestone_namespace,
'description': 'Testing Courseware Entrance Exam Chapter',
}
)
add_course_milestone(
unicode(course.id),
milestone_relationship_types['REQUIRES'],
milestone
)
add_course_content_milestone(
unicode(course.id),
unicode(entrance_exam.location),
milestone_relationship_types['FULFILLS'],
milestone
)
|
evamwangi/bc-7-Todo_List
|
refs/heads/master
|
venv/Lib/site-packages/flask/testsuite/blueprints.py
|
563
|
# -*- coding: utf-8 -*-
"""
flask.testsuite.blueprints
~~~~~~~~~~~~~~~~~~~~~~~~~~
Blueprints (and currently modules)
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
import warnings
from flask.testsuite import FlaskTestCase, emits_module_deprecation_warning
from flask._compat import text_type
from werkzeug.exceptions import NotFound
from werkzeug.http import parse_cache_control_header
from jinja2 import TemplateNotFound
# import moduleapp here because it uses deprecated features and we don't
# want to see the warnings
warnings.simplefilter('ignore', DeprecationWarning)
from moduleapp import app as moduleapp
warnings.simplefilter('default', DeprecationWarning)
class ModuleTestCase(FlaskTestCase):
@emits_module_deprecation_warning
def test_basic_module(self):
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin', url_prefix='/admin')
@admin.route('/')
def admin_index():
return 'admin index'
@admin.route('/login')
def admin_login():
return 'admin login'
@admin.route('/logout')
def admin_logout():
return 'admin logout'
@app.route('/')
def index():
return 'the index'
app.register_module(admin)
c = app.test_client()
self.assert_equal(c.get('/').data, b'the index')
self.assert_equal(c.get('/admin/').data, b'admin index')
self.assert_equal(c.get('/admin/login').data, b'admin login')
self.assert_equal(c.get('/admin/logout').data, b'admin logout')
@emits_module_deprecation_warning
def test_default_endpoint_name(self):
app = flask.Flask(__name__)
mod = flask.Module(__name__, 'frontend')
def index():
return 'Awesome'
mod.add_url_rule('/', view_func=index)
app.register_module(mod)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'Awesome')
with app.test_request_context():
self.assert_equal(flask.url_for('frontend.index'), '/')
@emits_module_deprecation_warning
def test_request_processing(self):
catched = []
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin', url_prefix='/admin')
@admin.before_request
def before_admin_request():
catched.append('before-admin')
@admin.after_request
def after_admin_request(response):
catched.append('after-admin')
return response
@admin.route('/')
def admin_index():
return 'the admin'
@app.before_request
def before_request():
catched.append('before-app')
@app.after_request
def after_request(response):
catched.append('after-app')
return response
@app.route('/')
def index():
return 'the index'
app.register_module(admin)
c = app.test_client()
self.assert_equal(c.get('/').data, b'the index')
self.assert_equal(catched, ['before-app', 'after-app'])
del catched[:]
self.assert_equal(c.get('/admin/').data, b'the admin')
self.assert_equal(catched, ['before-app', 'before-admin',
'after-admin', 'after-app'])
@emits_module_deprecation_warning
def test_context_processors(self):
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin', url_prefix='/admin')
@app.context_processor
def inject_all_regular():
return {'a': 1}
@admin.context_processor
def inject_admin():
return {'b': 2}
@admin.app_context_processor
def inject_all_module():
return {'c': 3}
@app.route('/')
def index():
return flask.render_template_string('{{ a }}{{ b }}{{ c }}')
@admin.route('/')
def admin_index():
return flask.render_template_string('{{ a }}{{ b }}{{ c }}')
app.register_module(admin)
c = app.test_client()
self.assert_equal(c.get('/').data, b'13')
self.assert_equal(c.get('/admin/').data, b'123')
@emits_module_deprecation_warning
def test_late_binding(self):
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin')
@admin.route('/')
def index():
return '42'
app.register_module(admin, url_prefix='/admin')
self.assert_equal(app.test_client().get('/admin/').data, b'42')
@emits_module_deprecation_warning
def test_error_handling(self):
app = flask.Flask(__name__)
admin = flask.Module(__name__, 'admin')
@admin.app_errorhandler(404)
def not_found(e):
return 'not found', 404
@admin.app_errorhandler(500)
def internal_server_error(e):
return 'internal server error', 500
@admin.route('/')
def index():
flask.abort(404)
@admin.route('/error')
def error():
1 // 0
app.register_module(admin)
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'not found')
rv = c.get('/error')
self.assert_equal(rv.status_code, 500)
self.assert_equal(b'internal server error', rv.data)
def test_templates_and_static(self):
app = moduleapp
app.testing = True
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'Hello from the Frontend')
rv = c.get('/admin/')
self.assert_equal(rv.data, b'Hello from the Admin')
rv = c.get('/admin/index2')
self.assert_equal(rv.data, b'Hello from the Admin')
rv = c.get('/admin/static/test.txt')
self.assert_equal(rv.data.strip(), b'Admin File')
rv.close()
rv = c.get('/admin/static/css/test.css')
self.assert_equal(rv.data.strip(), b'/* nested file */')
rv.close()
with app.test_request_context():
self.assert_equal(flask.url_for('admin.static', filename='test.txt'),
'/admin/static/test.txt')
with app.test_request_context():
try:
flask.render_template('missing.html')
except TemplateNotFound as e:
self.assert_equal(e.name, 'missing.html')
else:
self.assert_true(0, 'expected exception')
with flask.Flask(__name__).test_request_context():
self.assert_equal(flask.render_template('nested/nested.txt'), 'I\'m nested')
def test_safe_access(self):
app = moduleapp
with app.test_request_context():
f = app.view_functions['admin.static']
try:
f('/etc/passwd')
except NotFound:
pass
else:
self.assert_true(0, 'expected exception')
try:
f('../__init__.py')
except NotFound:
pass
else:
self.assert_true(0, 'expected exception')
# testcase for a security issue that may exist on windows systems
import os
import ntpath
old_path = os.path
os.path = ntpath
try:
try:
f('..\\__init__.py')
except NotFound:
pass
else:
self.assert_true(0, 'expected exception')
finally:
os.path = old_path
@emits_module_deprecation_warning
def test_endpoint_decorator(self):
from werkzeug.routing import Submount, Rule
from flask import Module
app = flask.Flask(__name__)
app.testing = True
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
module = Module(__name__, __name__)
@module.endpoint('bar')
def bar():
return 'bar'
@module.endpoint('index')
def index():
return 'index'
app.register_module(module)
c = app.test_client()
self.assert_equal(c.get('/foo/').data, b'index')
self.assert_equal(c.get('/foo/bar').data, b'bar')
class BlueprintTestCase(FlaskTestCase):
def test_blueprint_specific_error_handling(self):
frontend = flask.Blueprint('frontend', __name__)
backend = flask.Blueprint('backend', __name__)
sideend = flask.Blueprint('sideend', __name__)
@frontend.errorhandler(403)
def frontend_forbidden(e):
return 'frontend says no', 403
@frontend.route('/frontend-no')
def frontend_no():
flask.abort(403)
@backend.errorhandler(403)
def backend_forbidden(e):
return 'backend says no', 403
@backend.route('/backend-no')
def backend_no():
flask.abort(403)
@sideend.route('/what-is-a-sideend')
def sideend_no():
flask.abort(403)
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
app.register_blueprint(sideend)
@app.errorhandler(403)
def app_forbidden(e):
return 'application itself says no', 403
c = app.test_client()
self.assert_equal(c.get('/frontend-no').data, b'frontend says no')
self.assert_equal(c.get('/backend-no').data, b'backend says no')
self.assert_equal(c.get('/what-is-a-sideend').data, b'application itself says no')
def test_blueprint_url_definitions(self):
bp = flask.Blueprint('test', __name__)
@bp.route('/foo', defaults={'baz': 42})
def foo(bar, baz):
return '%s/%d' % (bar, baz)
@bp.route('/bar')
def bar(bar):
return text_type(bar)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/1', url_defaults={'bar': 23})
app.register_blueprint(bp, url_prefix='/2', url_defaults={'bar': 19})
c = app.test_client()
self.assert_equal(c.get('/1/foo').data, b'23/42')
self.assert_equal(c.get('/2/foo').data, b'19/42')
self.assert_equal(c.get('/1/bar').data, b'23')
self.assert_equal(c.get('/2/bar').data, b'19')
def test_blueprint_url_processors(self):
bp = flask.Blueprint('frontend', __name__, url_prefix='/<lang_code>')
@bp.url_defaults
def add_language_code(endpoint, values):
values.setdefault('lang_code', flask.g.lang_code)
@bp.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code')
@bp.route('/')
def index():
return flask.url_for('.about')
@bp.route('/about')
def about():
return flask.url_for('.index')
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
self.assert_equal(c.get('/de/').data, b'/de/about')
self.assert_equal(c.get('/de/about').data, b'/de/')
def test_templates_and_static(self):
from blueprintapp import app
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'Hello from the Frontend')
rv = c.get('/admin/')
self.assert_equal(rv.data, b'Hello from the Admin')
rv = c.get('/admin/index2')
self.assert_equal(rv.data, b'Hello from the Admin')
rv = c.get('/admin/static/test.txt')
self.assert_equal(rv.data.strip(), b'Admin File')
rv.close()
rv = c.get('/admin/static/css/test.css')
self.assert_equal(rv.data.strip(), b'/* nested file */')
rv.close()
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
expected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == expected_max_age:
expected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = expected_max_age
rv = c.get('/admin/static/css/test.css')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, expected_max_age)
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
with app.test_request_context():
self.assert_equal(flask.url_for('admin.static', filename='test.txt'),
'/admin/static/test.txt')
with app.test_request_context():
try:
flask.render_template('missing.html')
except TemplateNotFound as e:
self.assert_equal(e.name, 'missing.html')
else:
self.assert_true(0, 'expected exception')
with flask.Flask(__name__).test_request_context():
self.assert_equal(flask.render_template('nested/nested.txt'), 'I\'m nested')
def test_default_static_cache_timeout(self):
app = flask.Flask(__name__)
class MyBlueprint(flask.Blueprint):
def get_send_file_max_age(self, filename):
return 100
blueprint = MyBlueprint('blueprint', __name__, static_folder='static')
app.register_blueprint(blueprint)
# try/finally, in case other tests use this app for Blueprint tests.
max_age_default = app.config['SEND_FILE_MAX_AGE_DEFAULT']
try:
with app.test_request_context():
unexpected_max_age = 3600
if app.config['SEND_FILE_MAX_AGE_DEFAULT'] == unexpected_max_age:
unexpected_max_age = 7200
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = unexpected_max_age
rv = blueprint.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 100)
rv.close()
finally:
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = max_age_default
def test_templates_list(self):
from blueprintapp import app
templates = sorted(app.jinja_env.list_templates())
self.assert_equal(templates, ['admin/index.html',
'frontend/index.html'])
def test_dotted_names(self):
frontend = flask.Blueprint('myapp.frontend', __name__)
backend = flask.Blueprint('myapp.backend', __name__)
@frontend.route('/fe')
def frontend_index():
return flask.url_for('myapp.backend.backend_index')
@frontend.route('/fe2')
def frontend_page2():
return flask.url_for('.frontend_index')
@backend.route('/be')
def backend_index():
return flask.url_for('myapp.frontend.frontend_index')
app = flask.Flask(__name__)
app.register_blueprint(frontend)
app.register_blueprint(backend)
c = app.test_client()
self.assert_equal(c.get('/fe').data.strip(), b'/be')
self.assert_equal(c.get('/fe2').data.strip(), b'/fe')
self.assert_equal(c.get('/be').data.strip(), b'/fe')
def test_dotted_names_from_app(self):
app = flask.Flask(__name__)
app.testing = True
test = flask.Blueprint('test', __name__)
@app.route('/')
def app_index():
return flask.url_for('test.index')
@test.route('/test/')
def index():
return flask.url_for('app_index')
app.register_blueprint(test)
with app.test_client() as c:
rv = c.get('/')
self.assert_equal(rv.data, b'/test/')
def test_empty_url_defaults(self):
bp = flask.Blueprint('bp', __name__)
@bp.route('/', defaults={'page': 1})
@bp.route('/page/<int:page>')
def something(page):
return str(page)
app = flask.Flask(__name__)
app.register_blueprint(bp)
c = app.test_client()
self.assert_equal(c.get('/').data, b'1')
self.assert_equal(c.get('/page/2').data, b'2')
def test_route_decorator_custom_endpoint(self):
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
@bp.route('/bar', endpoint='bar')
def foo_bar():
return flask.request.endpoint
@bp.route('/bar/123', endpoint='123')
def foo_bar_foo():
return flask.request.endpoint
@bp.route('/bar/foo')
def bar_foo():
return flask.request.endpoint
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.request.endpoint
c = app.test_client()
self.assertEqual(c.get('/').data, b'index')
self.assertEqual(c.get('/py/foo').data, b'bp.foo')
self.assertEqual(c.get('/py/bar').data, b'bp.bar')
self.assertEqual(c.get('/py/bar/123').data, b'bp.123')
self.assertEqual(c.get('/py/bar/foo').data, b'bp.bar_foo')
def test_route_decorator_custom_endpoint_with_dots(self):
bp = flask.Blueprint('bp', __name__)
@bp.route('/foo')
def foo():
return flask.request.endpoint
try:
@bp.route('/bar', endpoint='bar.bar')
def foo_bar():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
try:
@bp.route('/bar/123', endpoint='bar.123')
def foo_bar_foo():
return flask.request.endpoint
except AssertionError:
pass
else:
raise AssertionError('expected AssertionError not raised')
def foo_foo_foo():
pass
self.assertRaises(
AssertionError,
lambda: bp.add_url_rule(
'/bar/123', endpoint='bar.123', view_func=foo_foo_foo
)
)
self.assertRaises(
AssertionError,
bp.route('/bar/123', endpoint='bar.123'),
lambda: None
)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
c = app.test_client()
self.assertEqual(c.get('/py/foo').data, b'bp.foo')
# The rule's didn't actually made it through
rv = c.get('/py/bar')
assert rv.status_code == 404
rv = c.get('/py/bar/123')
assert rv.status_code == 404
def test_template_filter(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('my_reverse', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_add_template_filter(self):
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('my_reverse', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_template_filter_with_name(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('strrev')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('strrev', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_add_template_filter_with_name(self):
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'strrev')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('strrev', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_template_filter_with_template(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_filter_after_route_with_template(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter()
def super_reverse(s):
return s[::-1]
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_add_template_filter_with_template(self):
bp = flask.Blueprint('bp', __name__)
def super_reverse(s):
return s[::-1]
bp.add_app_template_filter(super_reverse)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_filter_with_name_and_template(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_add_template_filter_with_name_and_template(self):
bp = flask.Blueprint('bp', __name__)
def my_reverse(s):
return s[::-1]
bp.add_app_template_filter(my_reverse, 'super_reverse')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_test(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('is_boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['is_boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['is_boolean'](False))
def test_add_template_test(self):
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('is_boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['is_boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['is_boolean'](False))
def test_template_test_with_name(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_add_template_test_with_name(self):
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_template_test_with_template(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_template_test_after_route_with_template(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test()
def boolean(value):
return isinstance(value, bool)
app.register_blueprint(bp, url_prefix='/py')
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_test_with_template(self):
bp = flask.Blueprint('bp', __name__)
def boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(boolean)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_template_test_with_name_and_template(self):
bp = flask.Blueprint('bp', __name__)
@bp.app_template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_test_with_name_and_template(self):
bp = flask.Blueprint('bp', __name__)
def is_boolean(value):
return isinstance(value, bool)
bp.add_app_template_test(is_boolean, 'boolean')
app = flask.Flask(__name__)
app.register_blueprint(bp, url_prefix='/py')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BlueprintTestCase))
suite.addTest(unittest.makeSuite(ModuleTestCase))
return suite
|
MaxStrange/ArtieInfant
|
refs/heads/master
|
scripts/spectrogram_verifier/verifier.py
|
1
|
"""
Verifies if all the spectrograms in a directory are the same dimensionality.
Prints out any that are not the expected dimensionality.
"""
import imageio
import os
import sys
import tqdm
if __name__ == "__main__":
if len(sys.argv) != 3:
print("USAGE: python {} <path-to-dir> <remove: y/n>".format(sys.argv[0]))
exit(1)
elif not os.path.isdir(sys.argv[1]):
print("{} is not a valid directory.".format(sys.argv[1]))
exit(2)
remove = sys.argv[2].strip().lower() == 'y'
targetdir = sys.argv[1]
fpaths = [os.path.join(targetdir, fname) for fname in os.listdir(targetdir)]
for fpath in tqdm.tqdm(fpaths):
spec = imageio.imread(fpath)
if spec.shape != (241, 20):
print("{} has shape {}".format(fpath, spec.shape))
if remove:
os.remove(fpath)
|
huguesv/PTVS
|
refs/heads/master
|
Python/Product/Miniconda/Miniconda3-x64/Tools/scripts/gprof2html.py
|
28
|
#! /usr/bin/env python3
"""Transform gprof(1) output into useful HTML."""
import html
import os
import re
import sys
import webbrowser
header = """\
<html>
<head>
<title>gprof output (%s)</title>
</head>
<body>
<pre>
"""
trailer = """\
</pre>
</body>
</html>
"""
def add_escapes(filename):
with open(filename) as fp:
for line in fp:
yield html.escape(line)
def main():
filename = "gprof.out"
if sys.argv[1:]:
filename = sys.argv[1]
outputfilename = filename + ".html"
input = add_escapes(filename)
output = open(outputfilename, "w")
output.write(header % filename)
for line in input:
output.write(line)
if line.startswith(" time"):
break
labels = {}
for line in input:
m = re.match(r"(.* )(\w+)\n", line)
if not m:
output.write(line)
break
stuff, fname = m.group(1, 2)
labels[fname] = fname
output.write('%s<a name="flat:%s" href="#call:%s">%s</a>\n' %
(stuff, fname, fname, fname))
for line in input:
output.write(line)
if line.startswith("index % time"):
break
for line in input:
m = re.match(r"(.* )(\w+)(( <cycle.*>)? \[\d+\])\n", line)
if not m:
output.write(line)
if line.startswith("Index by function name"):
break
continue
prefix, fname, suffix = m.group(1, 2, 3)
if fname not in labels:
output.write(line)
continue
if line.startswith("["):
output.write('%s<a name="call:%s" href="#flat:%s">%s</a>%s\n' %
(prefix, fname, fname, fname, suffix))
else:
output.write('%s<a href="#call:%s">%s</a>%s\n' %
(prefix, fname, fname, suffix))
for line in input:
for part in re.findall(r"(\w+(?:\.c)?|\W+)", line):
if part in labels:
part = '<a href="#call:%s">%s</a>' % (part, part)
output.write(part)
output.write(trailer)
output.close()
webbrowser.open("file:" + os.path.abspath(outputfilename))
if __name__ == '__main__':
main()
|
ebenezzerr/search-bot
|
refs/heads/master
|
plugins/canary.py
|
1
|
import time
outputs = []
def canary():
#NOTE: you must add a real channel ID for this to work
outputs.append(["D0FEYBP9V", "bot started: " + str(time.time())])
canary()
|
CyanogenMod/android_external_chromium_org
|
refs/heads/cm-12.0
|
tools/cr/cr/actions/gyp.py
|
59
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module to add gyp support to cr."""
import cr
import os
GYP_DEFINE_PREFIX = 'GYP_DEF_'
class GypPrepareOut(cr.PrepareOut):
"""A prepare action that runs gyp whenever you select an output directory."""
ENABLED = cr.Config.From(
GYP_GENERATORS='ninja',
GYP_GENERATOR_FLAGS='output_dir={CR_OUT_BASE} config={CR_BUILDTYPE}',
GYP_DEF_target_arch='{CR_ENVSETUP_ARCH}',
)
def UpdateContext(self):
# Collapse GYP_DEFINES from all GYP_DEF prefixes
gyp_defines = cr.context.Find('GYP_DEFINES') or ''
for key, value in cr.context.exported.items():
if key.startswith(GYP_DEFINE_PREFIX):
gyp_defines += ' %s=%s' % (key[len(GYP_DEFINE_PREFIX):], value)
cr.context['GYP_DEFINES'] = gyp_defines.strip()
if cr.context.verbose >= 1:
print cr.context.Substitute('GYP_DEFINES = {GYP_DEFINES}')
def Prepare(self):
if cr.context.verbose >= 1:
print cr.context.Substitute('Invoking gyp with {GYP_GENERATOR_FLAGS}')
cr.Host.Execute(
'{CR_SRC}/build/gyp_chromium',
'--depth={CR_SRC}',
'--check'
)
|
sudheesh001/oh-mainline
|
refs/heads/master
|
vendor/packages/twisted/doc/lore/howto/listings/lore/a_lore_plugin.py
|
21
|
from zope.interface import implements
from twisted.plugin import IPlugin
from twisted.lore.scripts.lore import IProcessor
class MyHTML(object):
implements(IPlugin, IProcessor)
name = "myhtml"
moduleName = "myhtml.factory"
|
dpgeorge/micropython
|
refs/heads/master
|
tests/basics/bytes_compare.py
|
117
|
print(b"" == b"")
print(b"" > b"")
print(b"" < b"")
print(b"" == b"1")
print(b"1" == b"")
print("==")
print(b"" > b"1")
print(b"1" > b"")
print(b"" < b"1")
print(b"1" < b"")
print(b"" >= b"1")
print(b"1" >= b"")
print(b"" <= b"1")
print(b"1" <= b"")
print(b"1" == b"1")
print(b"1" != b"1")
print(b"1" == b"2")
print(b"1" == b"10")
print(b"1" > b"1")
print(b"1" > b"2")
print(b"2" > b"1")
print(b"10" > b"1")
print(b"1/" > b"1")
print(b"1" > b"10")
print(b"1" > b"1/")
print(b"1" < b"1")
print(b"2" < b"1")
print(b"1" < b"2")
print(b"1" < b"10")
print(b"1" < b"1/")
print(b"10" < b"1")
print(b"1/" < b"1")
print(b"1" >= b"1")
print(b"1" >= b"2")
print(b"2" >= b"1")
print(b"10" >= b"1")
print(b"1/" >= b"1")
print(b"1" >= b"10")
print(b"1" >= b"1/")
print(b"1" <= b"1")
print(b"2" <= b"1")
print(b"1" <= b"2")
print(b"1" <= b"10")
print(b"1" <= b"1/")
print(b"10" <= b"1")
print(b"1/" <= b"1")
print(b'o' == b'\n')
|
anntzer/scipy
|
refs/heads/master
|
scipy/stats/_binned_statistic.py
|
12
|
import builtins
import numpy as np
from numpy.testing import suppress_warnings
from operator import index
from collections import namedtuple
__all__ = ['binned_statistic',
'binned_statistic_2d',
'binned_statistic_dd']
BinnedStatisticResult = namedtuple('BinnedStatisticResult',
('statistic', 'bin_edges', 'binnumber'))
def binned_statistic(x, values, statistic='mean',
bins=10, range=None):
"""
Compute a binned statistic for one or more sets of data.
This is a generalization of a histogram function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a set of sequences - each the same shape as
`x`. If `values` is a set of sequences, the statistic will be computed
on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or sequence of scalars, optional
If `bins` is an int, it defines the number of equal-width bins in the
given range (10 by default). If `bins` is a sequence, it defines the
bin edges, including the rightmost edge, allowing for non-uniform bin
widths. Values in `x` that are smaller than lowest bin edge are
assigned to bin number 0, values beyond the highest bin are assigned to
``bins[-1]``. If the bin edges are specified, the number of bins will
be, (nx = len(bins)-1).
range : (float, float) or [(float, float)], optional
The lower and upper range of the bins. If not provided, range
is simply ``(x.min(), x.max())``. Values outside the range are
ignored.
Returns
-------
statistic : array
The values of the selected statistic in each bin.
bin_edges : array of dtype float
Return the bin edges ``(length(statistic)+1)``.
binnumber: 1-D ndarray of ints
Indices of the bins (corresponding to `bin_edges`) in which each value
of `x` belongs. Same length as `values`. A binnumber of `i` means the
corresponding value is between (bin_edges[i-1], bin_edges[i]).
See Also
--------
numpy.digitize, numpy.histogram, binned_statistic_2d, binned_statistic_dd
Notes
-----
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
First some basic examples:
Create two evenly spaced bins in the range of the given sample, and sum the
corresponding values in each of those bins:
>>> values = [1.0, 1.0, 2.0, 1.5, 3.0]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
BinnedStatisticResult(statistic=array([4. , 4.5]),
bin_edges=array([1., 4., 7.]), binnumber=array([1, 1, 1, 2, 2]))
Multiple arrays of values can also be passed. The statistic is calculated
on each set independently:
>>> values = [[1.0, 1.0, 2.0, 1.5, 3.0], [2.0, 2.0, 4.0, 3.0, 6.0]]
>>> stats.binned_statistic([1, 1, 2, 5, 7], values, 'sum', bins=2)
BinnedStatisticResult(statistic=array([[4. , 4.5],
[8. , 9. ]]), bin_edges=array([1., 4., 7.]),
binnumber=array([1, 1, 1, 2, 2]))
>>> stats.binned_statistic([1, 2, 1, 2, 4], np.arange(5), statistic='mean',
... bins=3)
BinnedStatisticResult(statistic=array([1., 2., 4.]),
bin_edges=array([1., 2., 3., 4.]),
binnumber=array([1, 2, 1, 2, 3]))
As a second example, we now generate some random data of sailing boat speed
as a function of wind speed, and then determine how fast our boat is for
certain wind speeds:
>>> rng = np.random.default_rng()
>>> windspeed = 8 * rng.random(500)
>>> boatspeed = .3 * windspeed**.5 + .2 * rng.random(500)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(windspeed,
... boatspeed, statistic='median', bins=[1,2,3,4,5,6,7])
>>> plt.figure()
>>> plt.plot(windspeed, boatspeed, 'b.', label='raw data')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=5,
... label='binned statistic of data')
>>> plt.legend()
Now we can use ``binnumber`` to select all datapoints with a windspeed
below 1:
>>> low_boatspeed = boatspeed[binnumber == 0]
As a final example, we will use ``bin_edges`` and ``binnumber`` to make a
plot of a distribution that shows the mean and distribution around that
mean per bin, on top of a regular histogram and the probability
distribution function:
>>> x = np.linspace(0, 5, num=500)
>>> x_pdf = stats.maxwell.pdf(x)
>>> samples = stats.maxwell.rvs(size=10000)
>>> bin_means, bin_edges, binnumber = stats.binned_statistic(x, x_pdf,
... statistic='mean', bins=25)
>>> bin_width = (bin_edges[1] - bin_edges[0])
>>> bin_centers = bin_edges[1:] - bin_width/2
>>> plt.figure()
>>> plt.hist(samples, bins=50, density=True, histtype='stepfilled',
... alpha=0.2, label='histogram of data')
>>> plt.plot(x, x_pdf, 'r-', label='analytical pdf')
>>> plt.hlines(bin_means, bin_edges[:-1], bin_edges[1:], colors='g', lw=2,
... label='binned statistic of data')
>>> plt.plot((binnumber - 0.5) * bin_width, x_pdf, 'g.', alpha=0.5)
>>> plt.legend(fontsize=10)
>>> plt.show()
"""
try:
N = len(bins)
except TypeError:
N = 1
if N != 1:
bins = [np.asarray(bins, float)]
if range is not None:
if len(range) == 2:
range = [range]
medians, edges, binnumbers = binned_statistic_dd(
[x], values, statistic, bins, range)
return BinnedStatisticResult(medians, edges[0], binnumbers)
BinnedStatistic2dResult = namedtuple('BinnedStatistic2dResult',
('statistic', 'x_edge', 'y_edge',
'binnumber'))
def binned_statistic_2d(x, y, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False):
"""
Compute a bidimensional binned statistic for one or more sets of data.
This is a generalization of a histogram2d function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values (or set of values) within each bin.
Parameters
----------
x : (N,) array_like
A sequence of values to be binned along the first dimension.
y : (N,) array_like
A sequence of values to be binned along the second dimension.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `x`, or a list of sequences - each with the same
shape as `x`. If `values` is such a list, the statistic will be
computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : int or [int, int] or array_like or [array, array], optional
The bin specification:
* the number of bins for the two dimensions (nx = ny = bins),
* the number of bins in each dimension (nx, ny = bins),
* the bin edges for the two dimensions (x_edge = y_edge = bins),
* the bin edges in each dimension (x_edge, y_edge = bins).
If the bin edges are specified, the number of bins will be,
(nx = len(x_edge)-1, ny = len(y_edge)-1).
range : (2,2) array_like, optional
The leftmost and rightmost edges of the bins along each dimension
(if not specified explicitly in the `bins` parameters):
[[xmin, xmax], [ymin, ymax]]. All values outside of this range will be
considered outliers and not tallied in the histogram.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (2,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section.
.. versionadded:: 0.17.0
Returns
-------
statistic : (nx, ny) ndarray
The values of the selected statistic in each two-dimensional bin.
x_edge : (nx + 1) ndarray
The bin edges along the first dimension.
y_edge : (ny + 1) ndarray
The bin edges along the second dimension.
binnumber : (N,) array of ints or (2,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogram2d, binned_statistic, binned_statistic_dd
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open. In other words, if
`bins` is ``[1, 2, 3, 4]``, then the first bin is ``[1, 2)`` (including 1,
but excluding 2) and the second ``[2, 3)``. The last bin, however, is
``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (2,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(D_edge[i-1], D_edge[i]), where 'D' is either 'x' or 'y'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
Calculate the counts with explicit bin-edges:
>>> x = [0.1, 0.1, 0.1, 0.6]
>>> y = [2.1, 2.6, 2.1, 2.1]
>>> binx = [0.0, 0.5, 1.0]
>>> biny = [2.0, 2.5, 3.0]
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny])
>>> ret.statistic
array([[2., 1.],
[1., 0.]])
The bin in which each sample is placed is given by the `binnumber`
returned parameter. By default, these are the linearized bin indices:
>>> ret.binnumber
array([5, 6, 5, 9])
The bin indices can also be expanded into separate entries for each
dimension using the `expand_binnumbers` parameter:
>>> ret = stats.binned_statistic_2d(x, y, None, 'count', bins=[binx, biny],
... expand_binnumbers=True)
>>> ret.binnumber
array([[1, 1, 1, 2],
[1, 2, 1, 1]])
Which shows that the first three elements belong in the xbin 1, and the
fourth into xbin 2; and so on for y.
"""
# This code is based on np.histogram2d
try:
N = len(bins)
except TypeError:
N = 1
if N != 1 and N != 2:
xedges = yedges = np.asarray(bins, float)
bins = [xedges, yedges]
medians, edges, binnumbers = binned_statistic_dd(
[x, y], values, statistic, bins, range,
expand_binnumbers=expand_binnumbers)
return BinnedStatistic2dResult(medians, edges[0], edges[1], binnumbers)
BinnedStatisticddResult = namedtuple('BinnedStatisticddResult',
('statistic', 'bin_edges',
'binnumber'))
def binned_statistic_dd(sample, values, statistic='mean',
bins=10, range=None, expand_binnumbers=False,
binned_statistic_result=None):
"""
Compute a multidimensional binned statistic for a set of data.
This is a generalization of a histogramdd function. A histogram divides
the space into bins, and returns the count of the number of points in
each bin. This function allows the computation of the sum, mean, median,
or other statistic of the values within each bin.
Parameters
----------
sample : array_like
Data to histogram passed as a sequence of N arrays of length D, or
as an (N,D) array.
values : (N,) array_like or list of (N,) array_like
The data on which the statistic will be computed. This must be
the same shape as `sample`, or a list of sequences - each with the
same shape as `sample`. If `values` is such a list, the statistic
will be computed on each independently.
statistic : string or callable, optional
The statistic to compute (default is 'mean').
The following statistics are available:
* 'mean' : compute the mean of values for points within each bin.
Empty bins will be represented by NaN.
* 'median' : compute the median of values for points within each
bin. Empty bins will be represented by NaN.
* 'count' : compute the count of points within each bin. This is
identical to an unweighted histogram. `values` array is not
referenced.
* 'sum' : compute the sum of values for points within each bin.
This is identical to a weighted histogram.
* 'std' : compute the standard deviation within each bin. This
is implicitly calculated with ddof=0. If the number of values
within a given bin is 0 or 1, the computed standard deviation value
will be 0 for the bin.
* 'min' : compute the minimum of values for points within each bin.
Empty bins will be represented by NaN.
* 'max' : compute the maximum of values for point within each bin.
Empty bins will be represented by NaN.
* function : a user-defined function which takes a 1D array of
values, and outputs a single numerical statistic. This function
will be called on the values in each bin. Empty bins will be
represented by function([]), or NaN if this returns an error.
bins : sequence or positive int, optional
The bin specification must be in one of the following forms:
* A sequence of arrays describing the bin edges along each dimension.
* The number of bins for each dimension (nx, ny, ... = bins).
* The number of bins for all dimensions (nx = ny = ... = bins).
range : sequence, optional
A sequence of lower and upper bin edges to be used if the edges are
not given explicitly in `bins`. Defaults to the minimum and maximum
values along each dimension.
expand_binnumbers : bool, optional
'False' (default): the returned `binnumber` is a shape (N,) array of
linearized bin indices.
'True': the returned `binnumber` is 'unraveled' into a shape (D,N)
ndarray, where each row gives the bin numbers in the corresponding
dimension.
See the `binnumber` returned value, and the `Examples` section of
`binned_statistic_2d`.
binned_statistic_result : binnedStatisticddResult
Result of a previous call to the function in order to reuse bin edges
and bin numbers with new values and/or a different statistic.
To reuse bin numbers, `expand_binnumbers` must have been set to False
(the default)
.. versionadded:: 0.17.0
Returns
-------
statistic : ndarray, shape(nx1, nx2, nx3,...)
The values of the selected statistic in each two-dimensional bin.
bin_edges : list of ndarrays
A list of D arrays describing the (nxi + 1) bin edges for each
dimension.
binnumber : (N,) array of ints or (D,N) ndarray of ints
This assigns to each element of `sample` an integer that represents the
bin in which this observation falls. The representation depends on the
`expand_binnumbers` argument. See `Notes` for details.
See Also
--------
numpy.digitize, numpy.histogramdd, binned_statistic, binned_statistic_2d
Notes
-----
Binedges:
All but the last (righthand-most) bin is half-open in each dimension. In
other words, if `bins` is ``[1, 2, 3, 4]``, then the first bin is
``[1, 2)`` (including 1, but excluding 2) and the second ``[2, 3)``. The
last bin, however, is ``[3, 4]``, which *includes* 4.
`binnumber`:
This returned argument assigns to each element of `sample` an integer that
represents the bin in which it belongs. The representation depends on the
`expand_binnumbers` argument. If 'False' (default): The returned
`binnumber` is a shape (N,) array of linearized indices mapping each
element of `sample` to its corresponding bin (using row-major ordering).
If 'True': The returned `binnumber` is a shape (D,N) ndarray where
each row indicates bin placements for each dimension respectively. In each
dimension, a binnumber of `i` means the corresponding value is between
(bin_edges[D][i-1], bin_edges[D][i]), for each dimension 'D'.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
>>> from mpl_toolkits.mplot3d import Axes3D
Take an array of 600 (x, y) coordinates as an example.
`binned_statistic_dd` can handle arrays of higher dimension `D`. But a plot
of dimension `D+1` is required.
>>> mu = np.array([0., 1.])
>>> sigma = np.array([[1., -0.5],[-0.5, 1.5]])
>>> multinormal = stats.multivariate_normal(mu, sigma)
>>> data = multinormal.rvs(size=600, random_state=235412)
>>> data.shape
(600, 2)
Create bins and count how many arrays fall in each bin:
>>> N = 60
>>> x = np.linspace(-3, 3, N)
>>> y = np.linspace(-3, 4, N)
>>> ret = stats.binned_statistic_dd(data, np.arange(600), bins=[x, y],
... statistic='count')
>>> bincounts = ret.statistic
Set the volume and the location of bars:
>>> dx = x[1] - x[0]
>>> dy = y[1] - y[0]
>>> x, y = np.meshgrid(x[:-1]+dx/2, y[:-1]+dy/2)
>>> z = 0
>>> bincounts = bincounts.ravel()
>>> x = x.ravel()
>>> y = y.ravel()
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111, projection='3d')
>>> with np.errstate(divide='ignore'): # silence random axes3d warning
... ax.bar3d(x, y, z, dx, dy, bincounts)
Reuse bin numbers and bin edges with new values:
>>> ret2 = stats.binned_statistic_dd(data, -np.arange(600),
... binned_statistic_result=ret,
... statistic='mean')
"""
known_stats = ['mean', 'median', 'count', 'sum', 'std', 'min', 'max']
if not callable(statistic) and statistic not in known_stats:
raise ValueError('invalid statistic %r' % (statistic,))
try:
bins = index(bins)
except TypeError:
# bins is not an integer
pass
# If bins was an integer-like object, now it is an actual Python int.
# NOTE: for _bin_edges(), see e.g. gh-11365
if isinstance(bins, int) and not np.isfinite(sample).all():
raise ValueError('%r contains non-finite values.' % (sample,))
# `Ndim` is the number of dimensions (e.g. `2` for `binned_statistic_2d`)
# `Dlen` is the length of elements along each dimension.
# This code is based on np.histogramdd
try:
# `sample` is an ND-array.
Dlen, Ndim = sample.shape
except (AttributeError, ValueError):
# `sample` is a sequence of 1D arrays.
sample = np.atleast_2d(sample).T
Dlen, Ndim = sample.shape
# Store initial shape of `values` to preserve it in the output
values = np.asarray(values)
input_shape = list(values.shape)
# Make sure that `values` is 2D to iterate over rows
values = np.atleast_2d(values)
Vdim, Vlen = values.shape
# Make sure `values` match `sample`
if(statistic != 'count' and Vlen != Dlen):
raise AttributeError('The number of `values` elements must match the '
'length of each `sample` dimension.')
try:
M = len(bins)
if M != Ndim:
raise AttributeError('The dimension of bins must be equal '
'to the dimension of the sample x.')
except TypeError:
bins = Ndim * [bins]
if binned_statistic_result is None:
nbin, edges, dedges = _bin_edges(sample, bins, range)
binnumbers = _bin_numbers(sample, nbin, edges, dedges)
else:
edges = binned_statistic_result.bin_edges
nbin = np.array([len(edges[i]) + 1 for i in builtins.range(Ndim)])
# +1 for outlier bins
dedges = [np.diff(edges[i]) for i in builtins.range(Ndim)]
binnumbers = binned_statistic_result.binnumber
result = np.empty([Vdim, nbin.prod()], float)
if statistic == 'mean':
result.fill(np.nan)
flatcount = np.bincount(binnumbers, None)
a = flatcount.nonzero()
for vv in builtins.range(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
result[vv, a] = flatsum[a] / flatcount[a]
elif statistic == 'std':
result.fill(0)
_calc_binned_statistic(Vdim, binnumbers, result, values, np.std)
elif statistic == 'count':
result.fill(0)
flatcount = np.bincount(binnumbers, None)
a = np.arange(len(flatcount))
result[:, a] = flatcount[np.newaxis, :]
elif statistic == 'sum':
result.fill(0)
for vv in builtins.range(Vdim):
flatsum = np.bincount(binnumbers, values[vv])
a = np.arange(len(flatsum))
result[vv, a] = flatsum
elif statistic == 'median':
result.fill(np.nan)
_calc_binned_statistic(Vdim, binnumbers, result, values, np.median)
elif statistic == 'min':
result.fill(np.nan)
_calc_binned_statistic(Vdim, binnumbers, result, values, np.min)
elif statistic == 'max':
result.fill(np.nan)
_calc_binned_statistic(Vdim, binnumbers, result, values, np.max)
elif callable(statistic):
with np.errstate(invalid='ignore'), suppress_warnings() as sup:
sup.filter(RuntimeWarning)
try:
null = statistic([])
except Exception:
null = np.nan
result.fill(null)
_calc_binned_statistic(Vdim, binnumbers, result, values, statistic,
is_callable=True)
# Shape into a proper matrix
result = result.reshape(np.append(Vdim, nbin))
# Remove outliers (indices 0 and -1 for each bin-dimension).
core = tuple([slice(None)] + Ndim * [slice(1, -1)])
result = result[core]
# Unravel binnumbers into an ndarray, each row the bins for each dimension
if(expand_binnumbers and Ndim > 1):
binnumbers = np.asarray(np.unravel_index(binnumbers, nbin))
if np.any(result.shape[1:] != nbin - 2):
raise RuntimeError('Internal Shape Error')
# Reshape to have output (`result`) match input (`values`) shape
result = result.reshape(input_shape[:-1] + list(nbin-2))
return BinnedStatisticddResult(result, edges, binnumbers)
def _calc_binned_statistic(Vdim, bin_numbers, result, values, stat_func,
is_callable=False):
unique_bin_numbers = np.unique(bin_numbers)
for vv in builtins.range(Vdim):
bin_map = _create_binned_data(bin_numbers, unique_bin_numbers,
values, vv)
for i in unique_bin_numbers:
# if the stat_func is callable, all results should be updated
# if the stat_func is np.std, calc std only when binned data is 2
# or more for speed up.
if is_callable or not (stat_func is np.std and
len(bin_map[i]) < 2):
result[vv, i] = stat_func(np.array(bin_map[i]))
def _create_binned_data(bin_numbers, unique_bin_numbers, values, vv):
""" Create hashmap of bin ids to values in bins
key: bin number
value: list of binned data
"""
bin_map = dict()
for i in unique_bin_numbers:
bin_map[i] = []
for i in builtins.range(len(bin_numbers)):
bin_map[bin_numbers[i]].append(values[vv, i])
return bin_map
def _bin_edges(sample, bins=None, range=None):
""" Create edge arrays
"""
Dlen, Ndim = sample.shape
nbin = np.empty(Ndim, int) # Number of bins in each dimension
edges = Ndim * [None] # Bin edges for each dim (will be 2D array)
dedges = Ndim * [None] # Spacing between edges (will be 2D array)
# Select range for each dimension
# Used only if number of bins is given.
if range is None:
smin = np.atleast_1d(np.array(sample.min(axis=0), float))
smax = np.atleast_1d(np.array(sample.max(axis=0), float))
else:
if len(range) != Ndim:
raise ValueError(
f"range given for {len(range)} dimensions; {Ndim} required")
smin = np.empty(Ndim)
smax = np.empty(Ndim)
for i in builtins.range(Ndim):
if range[i][1] < range[i][0]:
raise ValueError(
"In {}range, start must be <= stop".format(
f"dimension {i + 1} of " if Ndim > 1 else ""))
smin[i], smax[i] = range[i]
# Make sure the bins have a finite width.
for i in builtins.range(len(smin)):
if smin[i] == smax[i]:
smin[i] = smin[i] - .5
smax[i] = smax[i] + .5
# Preserve sample floating point precision in bin edges
edges_dtype = (sample.dtype if np.issubdtype(sample.dtype, np.floating)
else float)
# Create edge arrays
for i in builtins.range(Ndim):
if np.isscalar(bins[i]):
nbin[i] = bins[i] + 2 # +2 for outlier bins
edges[i] = np.linspace(smin[i], smax[i], nbin[i] - 1,
dtype=edges_dtype)
else:
edges[i] = np.asarray(bins[i], edges_dtype)
nbin[i] = len(edges[i]) + 1 # +1 for outlier bins
dedges[i] = np.diff(edges[i])
nbin = np.asarray(nbin)
return nbin, edges, dedges
def _bin_numbers(sample, nbin, edges, dedges):
"""Compute the bin number each sample falls into, in each dimension
"""
Dlen, Ndim = sample.shape
sampBin = [
np.digitize(sample[:, i], edges[i])
for i in range(Ndim)
]
# Using `digitize`, values that fall on an edge are put in the right bin.
# For the rightmost bin, we want values equal to the right
# edge to be counted in the last bin, and not as an outlier.
for i in range(Ndim):
# Find the rounding precision
dedges_min = dedges[i].min()
if dedges_min == 0:
raise ValueError('The smallest edge difference is numerically 0.')
decimal = int(-np.log10(dedges_min)) + 6
# Find which points are on the rightmost edge.
on_edge = np.where(np.around(sample[:, i], decimal) ==
np.around(edges[i][-1], decimal))[0]
# Shift these points one bin to the left.
sampBin[i][on_edge] -= 1
# Compute the sample indices in the flattened statistic matrix.
binnumbers = np.ravel_multi_index(sampBin, nbin)
return binnumbers
|
t-abe/chainer
|
refs/heads/master
|
tests/chainer_tests/functions_tests/loss_tests/test_hinge.py
|
3
|
import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class TestHinge(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (10, 5)).astype(numpy.float32)
# Avoid values around -1.0 for stability
self.x[numpy.logical_and(-1.01 < self.x, self.x < -0.99)] = 0.5
self.t = numpy.random.randint(0, 5, (10,)).astype(numpy.int32)
def check_forward(self, x_data, t_data, norm):
x_val = chainer.Variable(x_data)
t_val = chainer.Variable(t_data)
loss = functions.hinge(x_val, t_val, norm)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
for i in six.moves.range(self.x.shape[0]):
self.x[i, self.t[i]] *= -1
for i in six.moves.range(self.x.shape[0]):
for j in six.moves.range(self.x.shape[1]):
self.x[i, j] = max(0, 1.0 + self.x[i, j])
loss_expect = 0
if norm == 'L1':
loss_expect = numpy.sum(self.x) / self.x.shape[0]
elif norm == 'L2':
loss_expect += numpy.sum(self.x ** 2) / self.x.shape[0]
self.assertAlmostEqual(loss_expect, loss_value, places=5)
@condition.retry(3)
def test_forward_cpu_l1(self):
self.check_forward(self.x, self.t, 'L1')
@condition.retry(3)
def test_forward_cpu_l2(self):
self.check_forward(self.x, self.t, 'L2')
@attr.gpu
@condition.retry(3)
def test_forward_gpu_l1(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), 'L1')
@attr.gpu
@condition.retry(3)
def test_forward_gpu_l2(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), 'L2')
def check_backward(self, x_data, t_data, norm):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.hinge(x, t, norm)
loss.backward()
self.assertEqual(None, t.grad)
func = loss.creator
f = lambda: func.forward((x.data, t.data))
gx, = gradient_check.numerical_grad(f, (x.data,), (1,), eps=0.01)
gradient_check.assert_allclose(gx, x.grad, atol=1e-4)
@condition.retry(3)
def test_backward_cpu_l1(self):
self.check_backward(self.x, self.t, 'L1')
@condition.retry(3)
def test_backward_cpu_l2(self):
self.check_backward(self.x, self.t, 'L2')
@attr.gpu
@condition.retry(3)
def test_backward_gpu_l1(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), 'L1')
@attr.gpu
@condition.retry(3)
def test_backward_gpu_l2(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), 'L2')
testing.run_module(__name__, __file__)
|
pierreg/tensorflow
|
refs/heads/master
|
tensorflow/python/training/adagrad_test.py
|
19
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for aggregate operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class AdagradOptimizerTest(tf.test.TestCase):
def doTestBasic(self, use_locking=False):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
ada_opt = tf.train.AdagradOptimizer(3.0,
initial_accumulator_value=0.1,
use_locking=use_locking)
ada_update = ada_opt.apply_gradients(zip(
[grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testBasic(self):
self.doTestBasic(use_locking=False)
def testBasicLocked(self):
self.doTestBasic(use_locking=True)
def testTensorLearningRate(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
ada_opt = tf.train.AdagradOptimizer(
tf.constant(3.0),
initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(zip(
[grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of adagrad
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
def testSparseBasic(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([[1.0], [2.0]], dtype=dtype)
var1 = tf.Variable([[3.0], [4.0]], dtype=dtype)
grads0 = tf.IndexedSlices(
tf.constant([0.1], shape=[1, 1], dtype=dtype),
tf.constant([0]),
tf.constant([2, 1]))
grads1 = tf.IndexedSlices(
tf.constant([0.01], shape=[1, 1], dtype=dtype),
tf.constant([1]),
tf.constant([2, 1]))
ada_opt = tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(zip(
[grads0, grads1], [var0, var1]))
tf.initialize_all_variables().run()
# Fetch params to validate initial values
self.assertAllClose([[1.0], [2.0]], var0.eval())
self.assertAllClose([[3.0], [4.0]], var1.eval())
# Run 3 step of sgd
for _ in range(3):
ada_update.run()
# Validate updated params
self.assertAllCloseAccordingToType(
np.array([[-1.6026098728179932], [2.0]]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([[3.0], [3.715679168701172]]), var1.eval())
def testSparseStability(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
shape = [1, 6]
var0 = tf.Variable(
[[0.00872496, -0.106952, 0.110467, 0.226505, -0.0147257,
-0.0105945]],
dtype=dtype)
grads0 = tf.IndexedSlices(
tf.constant(
[[-5.91278e-05, 5.31673e-05, -2.5779e-06, 4.29153e-05,
-8.4877e-05, -9.48906e-05]],
shape=shape,
dtype=dtype),
tf.constant([0]),
tf.constant(shape))
ada_opt = tf.train.AdagradOptimizer(1.0, initial_accumulator_value=0.1)
ada_update = ada_opt.apply_gradients(zip([grads0], [var0]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
init = tf.initialize_all_variables()
for _ in range(100):
init.run()
ada_update.run()
self.assertAllCloseAccordingToType(
np.array([[0.1, 0.1, 0.1, 0.1, 0.1, 0.1]]), slot0.eval())
self.assertAllCloseAccordingToType(
np.array([[0.00891194, -0.10712013, 0.11047515, 0.22636929, -
0.0144573, -0.01029443]]), var0.eval())
def testSharing(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
ada_opt = tf.train.AdagradOptimizer(3.0)
# Apply the optimizer twice. Both applications will use
# the same accums.
ada_update1 = ada_opt.apply_gradients(zip(
[grads0, grads1], [var0, var1]))
ada_update2 = ada_opt.apply_gradients(zip(
[grads0, grads1], [var0, var1]))
self.assertEqual(["accumulator"], ada_opt.get_slot_names())
slot0 = ada_opt.get_slot(var0, "accumulator")
self.assertEquals(slot0.get_shape(), var0.get_shape())
slot1 = ada_opt.get_slot(var1, "accumulator")
self.assertEquals(slot1.get_shape(), var1.get_shape())
tf.initialize_all_variables().run()
# Fetch params to validate initial values.
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Mix the first and the second adagrad for 3 steps.
ada_update1.run()
ada_update2.run()
ada_update1.run()
# Validate updated params (the same as with only 1 Adagrad).
self.assertAllCloseAccordingToType(
np.array([-1.6026098728179932, -0.6026098728179932]), var0.eval())
self.assertAllCloseAccordingToType(
np.array([2.715679168701172, 3.715679168701172]), var1.eval())
if __name__ == "__main__":
tf.test.main()
|
nkrishnaswami/census
|
refs/heads/master
|
uscensus/data/__init__.py
|
1
|
from .discovery import DiscoveryInterface
|
looooo/pivy
|
refs/heads/master
|
scons/scons-local-1.2.0.d20090919/SCons/Tool/mssdk.py
|
2
|
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mssdk.py 4369 2009/09/19 15:58:29 scons"
"""engine.SCons.Tool.mssdk
Tool-specific initialization for Microsoft SDKs, both Platform
SDKs and Windows SDKs.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
from MSCommon import mssdk_exists, \
mssdk_setup_env
def generate(env):
"""Add construction variables for an MS SDK to an Environment."""
mssdk_setup_env(env)
def exists(env):
return mssdk_exists()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Tokyo-Buffalo/tokyosouth
|
refs/heads/master
|
env/lib/python3.6/site-packages/twisted/pair/ip.py
|
13
|
# -*- test-case-name: twisted.pair.test.test_ip -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
#
"""Support for working directly with IP packets"""
import struct
import socket
from twisted.internet import protocol
from twisted.pair import raw
from zope.interface import implementer
class IPHeader:
def __init__(self, data):
(ihlversion, self.tos, self.tot_len, self.fragment_id, frag_off,
self.ttl, self.protocol, self.check, saddr, daddr) \
= struct.unpack("!BBHHHBBH4s4s", data[:20])
self.saddr = socket.inet_ntoa(saddr)
self.daddr = socket.inet_ntoa(daddr)
self.version = ihlversion & 0x0F
self.ihl = ((ihlversion & 0xF0) >> 4) << 2
self.fragment_offset = frag_off & 0x1FFF
self.dont_fragment = (frag_off & 0x4000 != 0)
self.more_fragments = (frag_off & 0x2000 != 0)
MAX_SIZE = 2**32
@implementer(raw.IRawPacketProtocol)
class IPProtocol(protocol.AbstractDatagramProtocol):
def __init__(self):
self.ipProtos = {}
def addProto(self, num, proto):
proto = raw.IRawDatagramProtocol(proto)
if num < 0:
raise TypeError('Added protocol must be positive or zero')
if num >= MAX_SIZE:
raise TypeError('Added protocol must fit in 32 bits')
if num not in self.ipProtos:
self.ipProtos[num] = []
self.ipProtos[num].append(proto)
def datagramReceived(self,
data,
partial,
dest,
source,
protocol):
header = IPHeader(data)
for proto in self.ipProtos.get(header.protocol, ()):
proto.datagramReceived(data=data[20:],
partial=partial,
source=header.saddr,
dest=header.daddr,
protocol=header.protocol,
version=header.version,
ihl=header.ihl,
tos=header.tos,
tot_len=header.tot_len,
fragment_id=header.fragment_id,
fragment_offset=header.fragment_offset,
dont_fragment=header.dont_fragment,
more_fragments=header.more_fragments,
ttl=header.ttl,
)
|
gocardless/gocardless-pro-python
|
refs/heads/master
|
gocardless_pro/services/subscriptions_service.py
|
1
|
# WARNING: Do not edit by hand, this file was generated by Crank:
#
# https://github.com/gocardless/crank
#
from . import base_service
from .. import resources
from ..paginator import Paginator
from .. import errors
class SubscriptionsService(base_service.BaseService):
"""Service class that provides access to the subscriptions
endpoints of the GoCardless Pro API.
"""
RESOURCE_CLASS = resources.Subscription
RESOURCE_NAME = 'subscriptions'
def create(self,params=None, headers=None):
"""Create a subscription.
Creates a new subscription object
Args:
params (dict, optional): Request body.
Returns:
ListResponse of Subscription instances
"""
path = '/subscriptions'
if params is not None:
params = {self._envelope_key(): params}
try:
response = self._perform_request('POST', path, params, headers,
retry_failures=True)
except errors.IdempotentCreationConflictError as err:
if self.raise_on_idempotency_conflict:
raise err
return self.get(identity=err.conflicting_resource_id,
params=params,
headers=headers)
return self._resource_for(response)
def list(self,params=None, headers=None):
"""List subscriptions.
Returns a [cursor-paginated](#api-usage-cursor-pagination) list of your
subscriptions.
Args:
params (dict, optional): Query string parameters.
Returns:
Subscription
"""
path = '/subscriptions'
response = self._perform_request('GET', path, params, headers,
retry_failures=True)
return self._resource_for(response)
def all(self, params=None):
if params is None:
params = {}
return Paginator(self, params)
def get(self,identity,params=None, headers=None):
"""Get a single subscription.
Retrieves the details of a single subscription.
Args:
identity (string): Unique identifier, beginning with "SB".
params (dict, optional): Query string parameters.
Returns:
ListResponse of Subscription instances
"""
path = self._sub_url_params('/subscriptions/:identity', {
'identity': identity,
})
response = self._perform_request('GET', path, params, headers,
retry_failures=True)
return self._resource_for(response)
def update(self,identity,params=None, headers=None):
"""Update a subscription.
Updates a subscription object.
This fails with:
- `validation_failed` if invalid data is provided when attempting to
update a subscription.
- `subscription_not_active` if the subscription is no longer active.
- `subscription_already_ended` if the subscription has taken all
payments.
- `mandate_payments_require_approval` if the amount is being changed
and the mandate requires approval.
- `number_of_subscription_amendments_exceeded` error if the
subscription amount has already been changed 10 times.
- `forbidden` if the amount is being changed, and the subscription was
created by an app and you are not authenticated as that app, or if the
subscription was not created by an app and you are authenticated as an
app
- `resource_created_by_another_app` if the app fee is being changed,
and the subscription was created by an app other than the app you are
authenticated as
Args:
identity (string): Unique identifier, beginning with "SB".
params (dict, optional): Request body.
Returns:
ListResponse of Subscription instances
"""
path = self._sub_url_params('/subscriptions/:identity', {
'identity': identity,
})
if params is not None:
params = {self._envelope_key(): params}
response = self._perform_request('PUT', path, params, headers,
retry_failures=True)
return self._resource_for(response)
def pause(self,identity,params=None, headers=None):
"""Pause a subscription.
Pause a subscription object.
No payments will be created until it is resumed.
This can only be used when a subscription collecting a fixed number of
payments (created using `count`),
when they continue forever (created without `count` or `end_date`) or
the subscription is paused for a number of cycles.
When `pause_cycles` is omitted the subscription is paused until the
[resume endpoint](#subscriptions-resume-a-subscription) is called.
If the subscription is collecting a fixed number of payments,
`end_date` will be set to `null`.
When paused indefinitely, `upcoming_payments` will be empty.
When `pause_cycles` is provided the subscription will be paused for the
number of cycles requested.
If the subscription is collecting a fixed number of payments,
`end_date` will be set to a new value.
When paused for a number of cycles, `upcoming_payments` will still
contain the upcoming charge dates.
This fails with:
- `forbidden` if the subscription was created by an app and you are not
authenticated as that app, or if the subscription was not created by an
app and you are authenticated as an app
- `validation_failed` if invalid data is provided when attempting to
pause a subscription.
- `subscription_paused_cannot_update_cycles` if the subscription is
already paused for a number of cycles and the request provides a value
for `pause_cycle`.
- `subscription_cannot_be_paused` if the subscription cannot be paused.
- `subscription_already_ended` if the subscription has taken all
payments.
- `pause_cycles_must_be_greater_than_or_equal_to` if the provided value
for `pause_cycles` cannot be satisfied.
Args:
identity (string): Unique identifier, beginning with "SB".
params (dict, optional): Request body.
Returns:
ListResponse of Subscription instances
"""
path = self._sub_url_params('/subscriptions/:identity/actions/pause', {
'identity': identity,
})
if params is not None:
params = {'data': params}
response = self._perform_request('POST', path, params, headers,
retry_failures=False)
return self._resource_for(response)
def resume(self,identity,params=None, headers=None):
"""Resume a subscription.
Resume a subscription object.
Payments will start to be created again based on the subscriptions
recurrence rules.
The `charge_date` on the next payment will be the same as the
subscriptions `earliest_charge_date_after_resume`
This fails with:
- `forbidden` if the subscription was created by an app and you are not
authenticated as that app, or if the subscription was not created by an
app and you are authenticated as an app
- `validation_failed` if invalid data is provided when attempting to
resume a subscription.
- `subscription_not_paused` if the subscription is not paused.
Args:
identity (string): Unique identifier, beginning with "SB".
params (dict, optional): Request body.
Returns:
ListResponse of Subscription instances
"""
path = self._sub_url_params('/subscriptions/:identity/actions/resume', {
'identity': identity,
})
if params is not None:
params = {'data': params}
response = self._perform_request('POST', path, params, headers,
retry_failures=False)
return self._resource_for(response)
def cancel(self,identity,params=None, headers=None):
"""Cancel a subscription.
Immediately cancels a subscription; no more payments will be created
under it. Any metadata supplied to this endpoint will be stored on the
payment cancellation event it causes.
This will fail with a cancellation_failed error if the subscription is
already cancelled or finished.
Args:
identity (string): Unique identifier, beginning with "SB".
params (dict, optional): Request body.
Returns:
ListResponse of Subscription instances
"""
path = self._sub_url_params('/subscriptions/:identity/actions/cancel', {
'identity': identity,
})
if params is not None:
params = {'data': params}
response = self._perform_request('POST', path, params, headers,
retry_failures=False)
return self._resource_for(response)
|
jasonbot/django
|
refs/heads/master
|
tests/utils_tests/test_safestring.py
|
278
|
from __future__ import unicode_literals
from django.template import Context, Template
from django.test import SimpleTestCase
from django.utils import html, six, text
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import lazy
from django.utils.safestring import (
EscapeData, SafeData, mark_for_escaping, mark_safe,
)
lazystr = lazy(force_text, six.text_type)
lazybytes = lazy(force_bytes, bytes)
class customescape(six.text_type):
def __html__(self):
# implement specific and obviously wrong escaping
# in order to be able to tell for sure when it runs
return self.replace('<', '<<').replace('>', '>>')
class SafeStringTest(SimpleTestCase):
def assertRenderEqual(self, tpl, expected, **context):
context = Context(context)
tpl = Template(tpl)
self.assertEqual(tpl.render(context), expected)
def test_mark_safe(self):
s = mark_safe('a&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
self.assertRenderEqual('{{ s|force_escape }}', 'a&b', s=s)
def test_mark_safe_object_implementing_dunder_html(self):
e = customescape('<a&b>')
s = mark_safe(e)
self.assertIs(s, e)
self.assertRenderEqual('{{ s }}', '<<a&b>>', s=s)
self.assertRenderEqual('{{ s|force_escape }}', '<a&b>', s=s)
def test_mark_safe_lazy(self):
s = lazystr('a&b')
b = lazybytes(b'a&b')
self.assertIsInstance(mark_safe(s), SafeData)
self.assertIsInstance(mark_safe(b), SafeData)
self.assertRenderEqual('{{ s }}', 'a&b', s=mark_safe(s))
def test_mark_safe_object_implementing_dunder_str(self):
class Obj(object):
def __str__(self):
return '<obj>'
s = mark_safe(Obj())
self.assertRenderEqual('{{ s }}', '<obj>', s=s)
def test_mark_safe_result_implements_dunder_html(self):
self.assertEqual(mark_safe('a&b').__html__(), 'a&b')
def test_mark_safe_lazy_result_implements_dunder_html(self):
self.assertEqual(mark_safe(lazystr('a&b')).__html__(), 'a&b')
def test_mark_for_escaping(self):
s = mark_for_escaping('a&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
self.assertRenderEqual('{{ s }}', 'a&b', s=mark_for_escaping(s))
def test_mark_for_escaping_object_implementing_dunder_html(self):
e = customescape('<a&b>')
s = mark_for_escaping(e)
self.assertIs(s, e)
self.assertRenderEqual('{{ s }}', '<<a&b>>', s=s)
self.assertRenderEqual('{{ s|force_escape }}', '<a&b>', s=s)
def test_mark_for_escaping_lazy(self):
s = lazystr('a&b')
b = lazybytes(b'a&b')
self.assertIsInstance(mark_for_escaping(s), EscapeData)
self.assertIsInstance(mark_for_escaping(b), EscapeData)
self.assertRenderEqual('{% autoescape off %}{{ s }}{% endautoescape %}', 'a&b', s=mark_for_escaping(s))
def test_mark_for_escaping_object_implementing_dunder_str(self):
class Obj(object):
def __str__(self):
return '<obj>'
s = mark_for_escaping(Obj())
self.assertRenderEqual('{{ s }}', '<obj>', s=s)
def test_add_lazy_safe_text_and_safe_text(self):
s = html.escape(lazystr('a'))
s += mark_safe('&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
s = html.escapejs(lazystr('a'))
s += mark_safe('&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
s = text.slugify(lazystr('a'))
s += mark_safe('&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.