repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
xifengchuo/openembedded
|
refs/heads/master
|
contrib/python/generate-manifest-2.4.py
|
45
|
#!/usr/bin/env python
# generate Python Manifest for the OpenEmbedded build system
# (C) 2002-2007 Michael 'Mickey' Lauer <mickey@Vanille.de>
# MIT license
import os
import sys
import time
VERSION = "2.4.4"
# increase when touching python-core
BASEREV = 2
__author__ = "Michael 'Mickey' Lauer <mickey@Vanille.de>"
__version__ = "20070721"
class MakefileMaker:
def __init__( self, outfile ):
"""initialize"""
self.packages = {}
self.sourcePrefix = "/lib/python%s/" % VERSION[:3]
self.targetPrefix = "${libdir}/python%s" % VERSION[:3]
self.output = outfile
self.out( "#" * 120 )
self.out( "### AUTO-GENERATED by '%s' [(C) 2002-2007 Michael 'Mickey' Lauer <mickey@Vanille.de>] on %s" % ( sys.argv[0], time.asctime() ) )
self.out( "###" )
self.out( "### Visit THE Python for Embedded Systems Site => http://www.Vanille.de/projects/python.spy" )
self.out( "###" )
self.out( "### Warning: Manual edits will be lost!" )
self.out( "###" )
self.out( "#" * 120 )
#
# helper functions
#
def out( self, data ):
"""print a line to the output file"""
print >> self.output, data
def setPrefix( self, sourcePrefix, targetPrefix ):
"""set a file prefix for addPackage files"""
self.sourcePrefix = sourcePrefix
self.targetPrefix = targetPrefix
def doProlog( self ):
self.out( """ """ )
self.out( "" )
def addPackage( self, revision, name, description, dependencies, filenames ):
"""add a package to the Makefile"""
if type( filenames ) == type( "" ):
filenames = filenames.split()
fullFilenames = []
for filename in filenames:
if filename[0] != "/":
fullFilenames.append( ( "%s%s" % ( self.sourcePrefix, filename ), "%s%s" % ( self.targetPrefix, filename ) ) )
else:
fullFilenames.append( ( filename, filename ) )
self.packages[name] = revision, description, dependencies, fullFilenames
def doBody( self ):
"""generate body of Makefile"""
global VERSION
#
# generate provides line
#
provideLine = 'PROVIDES+="'
for name in self.packages:
provideLine += "%s " % name
provideLine += '"'
self.out( provideLine )
self.out( "" )
#
# generate package line
#
packageLine = 'PACKAGES="'
for name in self.packages:
packageLine += "%s " % name
packageLine += '"'
self.out( packageLine )
self.out( "" )
#
# generate package variables
#
for name, data in self.packages.iteritems():
rev, desc, deps, files = data
#
# write out the description, revision and dependencies
#
self.out( 'DESCRIPTION_%s="%s"' % ( name, desc ) )
self.out( 'PR_%s="ml%d"' % ( name, rev + BASEREV ) )
self.out( 'RDEPENDS_%s="%s"' % ( name, deps.replace( ",", "" ) ) )
line = 'FILES_%s="' % name
#
# check which directories to make in the temporary directory
#
dirset = {} # if python had a set-datatype this would be sufficient. for now, we're using a dict instead.
for source, target in files:
dirset[os.path.dirname( target )] = True
#
# generate which files to copy for the target (-dfR because whole directories are also allowed)
#
for source, target in files:
line += "%s " % target
line += '"'
self.out( line )
self.out( "" )
def doEpilog( self ):
self.out( """""" )
self.out( "" )
def make( self ):
self.doProlog()
self.doBody()
self.doEpilog()
if __name__ == "__main__":
if len( sys.argv ) > 1:
os.popen( "rm -f ./%s" % sys.argv[1] )
outfile = file( sys.argv[1], "w" )
else:
outfile = sys.stdout
m = MakefileMaker( outfile )
# Add packages here. Only specify dlopen-style library dependencies here, no ldd-style dependencies!
# Parameters: revision, name, description, dependencies, filenames
#
m.setPrefix( "/", "/usr/" )
m.addPackage( 2, "python-core", "Python Interpreter and core modules (needed!)", "",
"lib/python2.4/__future__.* lib/python2.4/copy.* lib/python2.4/copy_reg.* lib/python2.4/ConfigParser.* " +
"lib/python2.4/getopt.* lib/python2.4/linecache.* lib/python2.4/new.* " +
"lib/python2.4/os.* lib/python2.4/posixpath.* " +
"lib/python2.4/warnings.* lib/python2.4/site.* lib/python2.4/stat.* " +
"lib/python2.4/UserDict.* lib/python2.4/UserList.* lib/python2.4/UserString.* " +
"lib/python2.4/lib-dynload/binascii.so lib/python2.4/lib-dynload/struct.so lib/python2.4/lib-dynload/time.so " +
"lib/python2.4/lib-dynload/xreadlines.so lib/python2.4/types.* bin/python*" )
m.addPackage( 0, "python-core-dbg", "Python core module debug information", "python-core",
"lib/python2.4/lib-dynload/.debug bin/.debug lib/.debug" )
m.addPackage( 0, "python-devel", "Python Development Package", "python-core",
"include lib/python2.4/config" ) # package
m.addPackage( 0, "python-idle", "Python Integrated Development Environment", "python-core, python-tkinter",
"bin/idle lib/python2.4/idlelib" ) # package
m.addPackage( 0, "python-pydoc", "Python Interactive Help Support", "python-core, python-lang, python-stringold, python-re",
"bin/pydoc lib/python2.4/pydoc.*" )
m.addPackage( 0, "python-smtpd", "Python Simple Mail Transport Daemon", "python-core python-netserver python-email python-mime",
"bin/smtpd.*" )
m.setPrefix( "/lib/python2.4/", "${libdir}/python2.4/" )
m.addPackage( 0, "python-audio", "Python Audio Handling", "python-core",
"wave.* chunk.* sndhdr.* lib-dynload/ossaudiodev.so lib-dynload/audioop.so" )
m.addPackage( 0, "python-bsddb", "Python Berkeley Database Bindings", "python-core",
"bsddb" ) # package
m.addPackage( 0, "python-codecs", "Python Codecs, Encodings & i18n Support", "python-core",
"codecs.* encodings gettext.* locale.* lib-dynload/_locale.so lib-dynload/unicodedata.so stringprep.* xdrlib.*" )
m.addPackage( 0, "python-compile", "Python Bytecode Compilation Support", "python-core",
"py_compile.* compileall.*" )
m.addPackage( 0, "python-compiler", "Python Compiler Support", "python-core",
"compiler" ) # package
m.addPackage( 0, "python-compression", "Python High Level Compression Support", "python-core, python-zlib",
"gzip.* zipfile.*" )
m.addPackage( 0, "python-crypt", "Python Basic Cryptographic and Hashing Support", "python-core",
"lib-dynload/crypt.so lib-dynload/md5.so lib-dynload/rotor.so lib-dynload/sha.so" )
m.addPackage( 0, "python-textutils", "Python Option Parsing, Text Wrapping and Comma-Separated-Value Support", "python-core, python-io, python-re, python-stringold",
"lib-dynload/_csv.so csv.* optparse.* textwrap.*" )
m.addPackage( 0, "python-curses", "Python Curses Support", "python-core",
"curses lib-dynload/_curses.so lib-dynload/_curses_panel.so" ) # package
m.addPackage( 0, "python-datetime", "Python Calendar and Time support", "python-core, python-codecs",
"_strptime.* calendar.* lib-dynload/datetime.so" )
m.addPackage( 0, "python-db", "Python File-Based Database Support", "python-core",
"anydbm.* dumbdbm.* whichdb.* " )
m.addPackage( 0, "python-debugger", "Python Debugger", "python-core, python-io, python-lang, python-re, python-stringold, python-shell",
"bdb.* pdb.*" )
m.addPackage( 0, "python-distutils", "Python Distribution Utilities", "python-core",
"config distutils" ) # package
m.addPackage( 0, "python-email", "Python Email Support", "python-core, python-io, python-re, python-mime, python-audio python-image",
"email" ) # package
m.addPackage( 0, "python-fcntl", "Python's fcntl Interface", "python-core",
"lib-dynload/fcntl.so" )
m.addPackage( 0, "python-hotshot", "Python Hotshot Profiler", "python-core",
"hotshot lib-dynload/_hotshot.so" )
m.addPackage( 0, "python-html", "Python HTML Processing", "python-core",
"formatter.* htmlentitydefs.* htmllib.* markupbase.* sgmllib.* " )
m.addPackage( 0, "python-gdbm", "Python GNU Database Support", "python-core",
"lib-dynload/gdbm.so" )
m.addPackage( 0, "python-image", "Python Graphical Image Handling", "python-core",
"colorsys.* imghdr.* lib-dynload/imageop.so lib-dynload/rgbimg.so" )
m.addPackage( 0, "python-io", "Python Low-Level I/O", "python-core, python-math",
"lib-dynload/_socket.so lib-dynload/_ssl.so lib-dynload/select.so lib-dynload/termios.so lib-dynload/cStringIO.so "
"pipes.* socket.* tempfile.* StringIO.* " )
m.addPackage( 0, "python-lang", "Python Low-Level Language Support", "python-core",
"lib-dynload/array.so lib-dynload/parser.so lib-dynload/operator.so lib-dynload/_weakref.so " +
"lib-dynload/itertools.so lib-dynload/collections.so lib-dynload/_bisect.so lib-dynload/_heapq.so " +
"atexit.* bisect.* code.* codeop.* dis.* heapq.* inspect.* keyword.* opcode.* repr.* token.* tokenize.* " +
"traceback.* linecache.* weakref.*" )
m.addPackage( 0, "python-logging", "Python Logging Support", "python-core",
"logging" ) # package
m.addPackage( 0, "python-lib-old-and-deprecated", "Python Deprecated Libraries", "python-core",
"lib-old" ) # package
m.addPackage( 0, "python-tkinter", "Python Tcl/Tk Bindings", "python-core",
"lib-dynload/_tkinter.so lib-tk" ) # package
m.addPackage( 0, "python-math", "Python Math Support", "python-core",
"lib-dynload/cmath.so lib-dynload/math.so lib-dynload/_random.so random.* sets.*" )
m.addPackage( 0, "python-mime", "Python MIME Handling APIs", "python-core, python-io",
"mimetools.* uu.* quopri.* rfc822.*" )
m.addPackage( 0, "python-mmap", "Python Memory-Mapped-File Support", "python-core, python-io",
"lib-dynload/mmap.so " )
m.addPackage( 0, "python-unixadmin", "Python Unix Administration Support", "python-core",
"lib-dynload/nis.so lib-dynload/grp.so lib-dynload/pwd.so getpass.*" )
m.addPackage( 0, "python-netclient", "Python Internet Protocol Clients", "python-core, python-datetime, python-io, python-lang, python-logging, python-mime",
"*Cookie*.* " +
"base64.* cookielib.* ftplib.* gopherlib.* hmac.* httplib.* mimetypes.* nntplib.* poplib.* smtplib.* telnetlib.* urllib.* urllib2.* urlparse.*" )
m.addPackage( 0, "python-netserver", "Python Internet Protocol Servers", "python-core, python-netclient",
"cgi.* BaseHTTPServer.* SimpleHTTPServer.* SocketServer.*" )
m.addPackage( 0, "python-pickle", "Python Persistence Support", "python-core, python-codecs, python-io, python-re",
"pickle.* shelve.* lib-dynload/cPickle.so" )
m.addPackage( 0, "python-pprint", "Python Pretty-Print Support", "python-core",
"pprint.*" )
m.addPackage( 0, "python-profile", "Python Basic Profiling Support", "python-core",
"profile.* pstats.*" )
m.addPackage( 0, "python-re", "Python Regular Expression APIs", "python-core",
"re.* sre.* sre_compile.* sre_constants* sre_parse.*" ) # _sre is builtin
m.addPackage( 0, "python-readline", "Python Readline Support", "python-core",
"lib-dynload/readline.so rlcompleter.*" )
m.addPackage( 0, "python-resource", "Python Resource Control Interface", "python-core",
"lib-dynload/resource.so" )
m.addPackage( 0, "python-shell", "Python Shell-Like Functionality", "python-core, python-re",
"cmd.* commands.* dircache.* fnmatch.* glob.* popen2.* shutil.*" )
m.addPackage( 0, "python-robotparser", "Python robots.txt parser", "python-core, python-netclient",
"robotparser.*")
m.addPackage( 0, "python-subprocess", "Python Subprocess Support", "python-core, python-io, python-re, python-fcntl, python-pickle",
"subprocess.*" )
m.addPackage( 0, "python-stringold", "Python String APIs [deprecated]", "python-core, python-re",
"lib-dynload/strop.so string.*" )
m.addPackage( 0, "python-syslog", "Python's syslog Interface", "python-core",
"lib-dynload/syslog.so" )
m.addPackage( 0, "python-terminal", "Python Terminal Controlling Support", "python-core, python-io",
"pty.* tty.*" )
m.addPackage( 0, "python-tests", "Python Tests", "python-core",
"test" ) # package
m.addPackage( 0, "python-threading", "Python Threading & Synchronization Support", "python-core, python-lang",
"_threading_local.* dummy_thread.* dummy_threading.* mutex.* threading.* Queue.*" )
m.addPackage( 0, "python-unittest", "Python Unit Testing Framework", "python-core, python-stringold, python-lang",
"unittest.*" )
m.addPackage( 0, "python-xml", "Python basic XML support.", "python-core, python-re",
"lib-dynload/pyexpat.so xml xmllib.*" ) # package
m.addPackage( 0, "python-xmlrpc", "Python XMLRPC Support", "python-core, python-xml, python-netserver, python-lang",
"xmlrpclib.* SimpleXMLRPCServer.*" )
m.addPackage( 0, "python-zlib", "Python zlib Support.", "python-core",
"lib-dynload/zlib.so" )
m.addPackage( 0, "python-mailbox", "Python Mailbox Format Support", "python-core, python-mime",
"mailbox.*" )
m.make()
|
ThinkOpen-Solutions/odoo
|
refs/heads/stable
|
addons/hr_timesheet_sheet/wizard/hr_timesheet_current.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_timesheet_current_open(osv.osv_memory):
_name = 'hr.timesheet.current.open'
_description = 'hr.timesheet.current.open'
def open_timesheet(self, cr, uid, ids, context=None):
ts = self.pool.get('hr_timesheet_sheet.sheet')
if context is None:
context = {}
view_type = 'form,tree'
user_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context)
if not len(user_ids):
raise osv.except_osv(_('Error!'), _('Please create an employee and associate it with this user.'))
ids = ts.search(cr, uid, [('user_id','=',uid),('state','in',('draft','new')),('date_from','<=',time.strftime('%Y-%m-%d')), ('date_to','>=',time.strftime('%Y-%m-%d'))], context=context)
if len(ids) > 1:
view_type = 'tree,form'
domain = "[('id','in',["+','.join(map(str, ids))+"]),('user_id', '=', uid)]"
elif len(ids)==1:
domain = "[('user_id', '=', uid)]"
else:
domain = "[('user_id', '=', uid)]"
value = {
'domain': domain,
'name': _('Open Timesheet'),
'view_type': 'form',
'view_mode': view_type,
'res_model': 'hr_timesheet_sheet.sheet',
'view_id': False,
'type': 'ir.actions.act_window'
}
if len(ids) == 1:
value['res_id'] = ids[0]
return value
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mmnelemane/nova
|
refs/heads/master
|
nova/db/sqlalchemy/migrate_repo/versions/277_add_fixed_ip_updated_index.py
|
73
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from sqlalchemy import Index, MetaData, Table
from nova.i18n import _LI
LOG = logging.getLogger(__name__)
INDEX_COLUMNS = ['deleted', 'allocated', 'updated_at']
INDEX_NAME = 'fixed_ips_%s_idx' % ('_'.join(INDEX_COLUMNS),)
def _get_table_index(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table('fixed_ips', meta, autoload=True)
for idx in table.indexes:
if idx.columns.keys() == INDEX_COLUMNS:
break
else:
idx = None
return meta, table, idx
def upgrade(migrate_engine):
meta, table, index = _get_table_index(migrate_engine)
if index:
LOG.info(_LI('Skipped adding %s because an equivalent index'
' already exists.'), INDEX_NAME)
return
columns = [getattr(table.c, col_name) for col_name in INDEX_COLUMNS]
index = Index(INDEX_NAME, *columns)
index.create(migrate_engine)
|
bkach/TravelTerminal
|
refs/heads/master
|
program.py
|
1
|
from findLocation import findLocation
import sys
import datetime
import time
from search import search
from time import mktime
from datetime import datetime
reload(sys)
sys.setdefaultencoding('UTF8')
def printResults(results,start,num):
if(num > len(results.items()) - 1):
num = len(results.items()) - 1
elif(start < len(results.items()) - 1):
for i in range(start,num):
print str(i) + ": " + \
unicode(results.items()[i][0]).encode('utf-8')
else:
print "No More Results"
def station_choice(results,num):
choice = raw_input("Station Number: ")
if choice == "":
printResults(results,num,num+10)
return station_choice(results,num+10)
elif int(choice) < 0 or int(choice) >= num:
print "Out of bounds"
return station_choice(results,num)
else:
return choice
if len(sys.argv) == 1:
from_station = raw_input("From: Stockholm" + chr(8)*9)
if not from_station:
from_station = "Stockholm"
from_results = findLocation(from_station)
printResults(from_results,0,10)
from_choice = int(station_choice(from_results,10))
from_choice_name = from_results.items()[from_choice][0]
from_choice_id = from_results.items()[from_choice][1]
print "\t" + unicode(from_choice_name).encode('utf-8')
to_station = raw_input("To: Uppsala" + chr(8)*4)
if not to_station:
to_station = "Uppsala"
to_results = findLocation(to_station)
printResults(to_results,0,10)
to_choice = int(station_choice(to_results,10))
to_choice_name = to_results.items()[to_choice][0]
to_choice_id = to_results.items()[to_choice][1]
print "\t" + unicode(to_choice_name).encode('utf-8')
departureDate = raw_input("Departure Window Date : %s" \
% datetime.now().strftime("%Y-%m-%d") + chr(8)*10)
if not departureDate:
departureDate = datetime.now().strftime("%Y-%m-%d")
departureTime = raw_input("Departure Window Time: %s" \
% datetime.now().strftime("%H:%M") + chr(8)*5)
if not departureTime:
departureTime = datetime.now().strftime("%H:%M")
arrivalDate = ""
while arrivalDate == "":
arrivalDate = raw_input("Arrival Window Date (%s): " \
% datetime.now().strftime("%Y-%m-%d"))
arrivalTime = ""
while arrivalTime == "":
arrivalTime = raw_input("Arrival Window Time (%s): " \
% datetime.now().strftime("%H:%M"))
trips = search(from_choice_id, to_choice_id, departureDate, departureTime,
arrivalDate, arrivalTime)
else:
trips = search(sys.argv[1],sys.argv[2],sys.argv[3],sys.argv[4],sys.argv[5],sys.argv[6])
cheapest_choice = raw_input("Cheapest? y/n ")
if cheapest_choice == "y":
print
cheapest = 0
for i in range(len(trips)):
if (trips[i].totalPrice < trips[cheapest].totalPrice):
cheapest = i
print str(int(trips[cheapest].totalPrice)) + " sek " + \
datetime.fromtimestamp(mktime(trips[cheapest].segments[0].departureTime )).strftime("%Y-%m-%d %H:%M") + "\n\t" + trips[cheapest].URL
#time_choice = raw_input("Shortest Time? y/n")
#if time_choice == "y":
#cheapest = 0
#for i in range(len(trips)):
#if (trips[i].totalPrice < trips[cheapest].totalPrice):
#cheapest = i
|
CiscoSystems/vespa
|
refs/heads/master
|
neutron/tests/unit/test_metadata_namespace_proxy.py
|
13
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
import socket
import mock
import testtools
import webob
from neutron.agent.metadata import namespace_proxy as ns_proxy
from neutron.common import utils
from neutron.tests import base
class FakeConf(object):
admin_user = 'neutron'
admin_password = 'password'
admin_tenant_name = 'tenant'
auth_url = 'http://127.0.0.1'
auth_strategy = 'keystone'
auth_region = 'region'
nova_metadata_ip = '9.9.9.9'
nova_metadata_port = 8775
metadata_proxy_shared_secret = 'secret'
class TestUnixDomainHttpConnection(base.BaseTestCase):
def test_connect(self):
with mock.patch.object(ns_proxy, 'cfg') as cfg:
cfg.CONF.metadata_proxy_socket = '/the/path'
with mock.patch('socket.socket') as socket_create:
conn = ns_proxy.UnixDomainHTTPConnection('169.254.169.254',
timeout=3)
conn.connect()
socket_create.assert_has_calls([
mock.call(socket.AF_UNIX, socket.SOCK_STREAM),
mock.call().settimeout(3),
mock.call().connect('/the/path')]
)
self.assertEqual(conn.timeout, 3)
class TestNetworkMetadataProxyHandler(base.BaseTestCase):
def setUp(self):
super(TestNetworkMetadataProxyHandler, self).setUp()
self.log_p = mock.patch.object(ns_proxy, 'LOG')
self.log = self.log_p.start()
self.addCleanup(self.log_p.stop)
self.handler = ns_proxy.NetworkMetadataProxyHandler('router_id')
def test_call(self):
req = mock.Mock(headers={})
with mock.patch.object(self.handler, '_proxy_request') as proxy_req:
proxy_req.return_value = 'value'
retval = self.handler(req)
self.assertEqual(retval, 'value')
proxy_req.assert_called_once_with(req.remote_addr,
req.method,
req.path_info,
req.query_string,
req.body)
def test_no_argument_passed_to_init(self):
with testtools.ExpectedException(ValueError):
ns_proxy.NetworkMetadataProxyHandler()
def test_call_internal_server_error(self):
req = mock.Mock(headers={})
with mock.patch.object(self.handler, '_proxy_request') as proxy_req:
proxy_req.side_effect = Exception
retval = self.handler(req)
self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)
self.assertEqual(len(self.log.mock_calls), 2)
self.assertTrue(proxy_req.called)
def test_proxy_request_router_200(self):
self.handler.router_id = 'router_id'
resp = mock.Mock(status=200)
with mock.patch('httplib2.Http') as mock_http:
mock_http.return_value.request.return_value = (resp, 'content')
retval = self.handler._proxy_request('192.168.1.1',
'GET',
'/latest/meta-data',
'',
'')
mock_http.assert_has_calls([
mock.call().request(
'http://169.254.169.254/latest/meta-data',
method='GET',
headers={
'X-Forwarded-For': '192.168.1.1',
'X-Neutron-Router-ID': 'router_id'
},
connection_type=ns_proxy.UnixDomainHTTPConnection,
body=''
)]
)
self.assertEqual(retval, 'content')
def test_proxy_request_network_200(self):
self.handler.network_id = 'network_id'
resp = mock.Mock(status=200)
with mock.patch('httplib2.Http') as mock_http:
mock_http.return_value.request.return_value = (resp, 'content')
retval = self.handler._proxy_request('192.168.1.1',
'GET',
'/latest/meta-data',
'',
'')
mock_http.assert_has_calls([
mock.call().request(
'http://169.254.169.254/latest/meta-data',
method='GET',
headers={
'X-Forwarded-For': '192.168.1.1',
'X-Neutron-Network-ID': 'network_id'
},
connection_type=ns_proxy.UnixDomainHTTPConnection,
body=''
)]
)
self.assertEqual(retval, 'content')
def test_proxy_request_network_404(self):
self.handler.network_id = 'network_id'
resp = mock.Mock(status=404)
with mock.patch('httplib2.Http') as mock_http:
mock_http.return_value.request.return_value = (resp, '')
retval = self.handler._proxy_request('192.168.1.1',
'GET',
'/latest/meta-data',
'',
'')
mock_http.assert_has_calls([
mock.call().request(
'http://169.254.169.254/latest/meta-data',
method='GET',
headers={
'X-Forwarded-For': '192.168.1.1',
'X-Neutron-Network-ID': 'network_id'
},
connection_type=ns_proxy.UnixDomainHTTPConnection,
body=''
)]
)
self.assertIsInstance(retval, webob.exc.HTTPNotFound)
def test_proxy_request_network_409(self):
self.handler.network_id = 'network_id'
resp = mock.Mock(status=409)
with mock.patch('httplib2.Http') as mock_http:
mock_http.return_value.request.return_value = (resp, '')
retval = self.handler._proxy_request('192.168.1.1',
'POST',
'/latest/meta-data',
'',
'')
mock_http.assert_has_calls([
mock.call().request(
'http://169.254.169.254/latest/meta-data',
method='POST',
headers={
'X-Forwarded-For': '192.168.1.1',
'X-Neutron-Network-ID': 'network_id'
},
connection_type=ns_proxy.UnixDomainHTTPConnection,
body=''
)]
)
self.assertIsInstance(retval, webob.exc.HTTPConflict)
def test_proxy_request_network_500(self):
self.handler.network_id = 'network_id'
resp = mock.Mock(status=500)
with mock.patch('httplib2.Http') as mock_http:
mock_http.return_value.request.return_value = (resp, '')
retval = self.handler._proxy_request('192.168.1.1',
'GET',
'/latest/meta-data',
'',
'')
mock_http.assert_has_calls([
mock.call().request(
'http://169.254.169.254/latest/meta-data',
method='GET',
headers={
'X-Forwarded-For': '192.168.1.1',
'X-Neutron-Network-ID': 'network_id'
},
connection_type=ns_proxy.UnixDomainHTTPConnection,
body=''
)]
)
self.assertIsInstance(retval, webob.exc.HTTPInternalServerError)
def test_proxy_request_network_418(self):
self.handler.network_id = 'network_id'
resp = mock.Mock(status=418)
with mock.patch('httplib2.Http') as mock_http:
mock_http.return_value.request.return_value = (resp, '')
with testtools.ExpectedException(Exception):
self.handler._proxy_request('192.168.1.1',
'GET',
'/latest/meta-data',
'',
'')
mock_http.assert_has_calls([
mock.call().request(
'http://169.254.169.254/latest/meta-data',
method='GET',
headers={
'X-Forwarded-For': '192.168.1.1',
'X-Neutron-Network-ID': 'network_id'
},
connection_type=ns_proxy.UnixDomainHTTPConnection,
body=''
)]
)
def test_proxy_request_network_exception(self):
self.handler.network_id = 'network_id'
mock.Mock(status=500)
with mock.patch('httplib2.Http') as mock_http:
mock_http.return_value.request.side_effect = Exception
with testtools.ExpectedException(Exception):
self.handler._proxy_request('192.168.1.1',
'GET',
'/latest/meta-data',
'',
'')
mock_http.assert_has_calls([
mock.call().request(
'http://169.254.169.254/latest/meta-data',
method='GET',
headers={
'X-Forwarded-For': '192.168.1.1',
'X-Neutron-Network-ID': 'network_id'
},
connection_type=ns_proxy.UnixDomainHTTPConnection,
body=''
)]
)
class TestProxyDaemon(base.BaseTestCase):
def test_init(self):
with mock.patch('neutron.agent.linux.daemon.Pidfile'):
pd = ns_proxy.ProxyDaemon('pidfile', 9697, 'net_id', 'router_id')
self.assertEqual(pd.router_id, 'router_id')
self.assertEqual(pd.network_id, 'net_id')
def test_run(self):
with mock.patch('neutron.agent.linux.daemon.Pidfile'):
with mock.patch('neutron.wsgi.Server') as Server:
pd = ns_proxy.ProxyDaemon('pidfile', 9697, 'net_id',
'router_id')
pd.run()
Server.assert_has_calls([
mock.call('neutron-network-metadata-proxy'),
mock.call().start(mock.ANY, 9697),
mock.call().wait()]
)
def test_main(self):
with mock.patch.object(ns_proxy, 'ProxyDaemon') as daemon:
with mock.patch('eventlet.monkey_patch') as eventlet:
with mock.patch.object(ns_proxy, 'config') as config:
with mock.patch.object(ns_proxy, 'cfg') as cfg:
with mock.patch.object(utils, 'cfg') as utils_cfg:
cfg.CONF.router_id = 'router_id'
cfg.CONF.network_id = None
cfg.CONF.metadata_port = 9697
cfg.CONF.pid_file = 'pidfile'
cfg.CONF.daemonize = True
utils_cfg.CONF.log_opt_values.return_value = None
ns_proxy.main()
self.assertTrue(eventlet.called)
self.assertTrue(config.setup_logging.called)
daemon.assert_has_calls([
mock.call('pidfile', 9697,
router_id='router_id',
network_id=None),
mock.call().start()]
)
def test_main_dont_fork(self):
with mock.patch.object(ns_proxy, 'ProxyDaemon') as daemon:
with mock.patch('eventlet.monkey_patch') as eventlet:
with mock.patch.object(ns_proxy, 'config') as config:
with mock.patch.object(ns_proxy, 'cfg') as cfg:
with mock.patch.object(utils, 'cfg') as utils_cfg:
cfg.CONF.router_id = 'router_id'
cfg.CONF.network_id = None
cfg.CONF.metadata_port = 9697
cfg.CONF.pid_file = 'pidfile'
cfg.CONF.daemonize = False
utils_cfg.CONF.log_opt_values.return_value = None
ns_proxy.main()
self.assertTrue(eventlet.called)
self.assertTrue(config.setup_logging.called)
daemon.assert_has_calls([
mock.call('pidfile', 9697,
router_id='router_id',
network_id=None),
mock.call().run()]
)
|
remh/dd-agent
|
refs/heads/master
|
tests/checks/mock/test_consul.py
|
18
|
import random
from tests.checks.common import AgentCheckTest, load_check
MOCK_CONFIG = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
}]
}
MOCK_CONFIG_SERVICE_WHITELIST = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
'service_whitelist': ['service_{0}'.format(k) for k in range(70)]
}]
}
MOCK_CONFIG_LEADER_CHECK = {
'init_config': {},
'instances' : [{
'url': 'http://localhost:8500',
'catalog_checks': True,
'new_leader_checks': True
}]
}
MOCK_BAD_CONFIG = {
'init_config': {},
'instances' : [{ # Multiple instances should cause it to fail
'url': 'http://localhost:8500',
'catalog_checks': True,
'new_leader_checks': True
}, {
'url': 'http://localhost:8501',
'catalog_checks': True,
'new_leader_checks': True
}]
}
class TestCheckConsul(AgentCheckTest):
CHECK_NAME = 'consul'
def mock_get_peers_in_cluster(self, instance):
return [
"10.0.2.14:8300",
"10.0.2.15:8300",
"10.0.2.16:8300"
]
def mock_get_services_in_cluster(self, instance):
return {
"service-1": [
"az-us-east-1a"
],
"service-2": [
"az-us-east-1a"
],
"service-3": [
"az-us-east-1a"
],
"service-4": [
"az-us-east-1a"
],
"service-5": [
"az-us-east-1a"
],
"service-6": [
"az-us-east-1a"
]
}
def mock_get_n_services_in_cluster(self, n):
dct = {}
for i in range(n):
k = "service_{0}".format(i)
dct[k] = []
return dct
def mock_get_local_config(self, instance):
return {
"Config": {
"AdvertiseAddr": "10.0.2.15",
"Datacenter": "dc1",
"Ports": {
"DNS": 8600,
"HTTP": 8500,
"HTTPS": -1,
"RPC": 8400,
"SerfLan": 8301,
"SerfWan": 8302,
"Server": 8300
},
}
}
def mock_get_nodes_in_cluster(self, instance):
return [
{
"Address": "10.0.2.15",
"Node": "node-1"
},
{
"Address": "10.0.2.25",
"Node": "node-2"
},
{
"Address": "10.0.2.35",
"Node": "node-2"
},
]
def mock_get_nodes_with_service(self, instance, service):
def _get_random_ip():
rand_int = int(15 * random.random()) + 10
return "10.0.2.{0}".format(rand_int)
return [
{
"Address": _get_random_ip(),
"Node": "node-1",
"ServiceAddress": "",
"ServiceID": service,
"ServiceName": service,
"ServicePort": 80,
"ServiceTags": [
"az-us-east-1a"
]
}
]
def mock_get_cluster_leader_A(self, instance):
return '10.0.2.15:8300'
def mock_get_cluster_leader_B(self, instance):
return 'My New Leader'
def _get_consul_mocks(self):
return {
'get_services_in_cluster': self.mock_get_services_in_cluster,
'get_nodes_with_service': self.mock_get_nodes_with_service,
'get_peers_in_cluster': self.mock_get_peers_in_cluster,
'_get_local_config': self.mock_get_local_config,
'_get_cluster_leader': self.mock_get_cluster_leader_A
}
def test_bad_config(self):
self.assertRaises(Exception, self.run_check, MOCK_BAD_CONFIG)
def test_get_nodes_with_service(self):
self.run_check(MOCK_CONFIG, mocks=self._get_consul_mocks())
self.assertMetric('consul.catalog.nodes_up', value=1, tags=['consul_datacenter:dc1', 'consul_service_id:service-1'])
def test_get_peers_in_cluster(self):
mocks = self._get_consul_mocks()
# When node is leader
self.run_check(MOCK_CONFIG, mocks=mocks)
self.assertMetric('consul.peers', value=3, tags=['consul_datacenter:dc1', 'mode:leader'])
mocks['_get_cluster_leader'] = self.mock_get_cluster_leader_B
# When node is follower
self.run_check(MOCK_CONFIG, mocks=mocks)
self.assertMetric('consul.peers', value=3, tags=['consul_datacenter:dc1', 'mode:follower'])
def test_get_services_on_node(self):
self.run_check(MOCK_CONFIG, mocks=self._get_consul_mocks())
self.assertMetric('consul.catalog.services_up', value=6, tags=['consul_datacenter:dc1', 'consul_node_id:node-1'])
def test_cull_services_list(self):
self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_LEADER_CHECK, self.DEFAULT_AGENT_CONFIG)
# Pad num_services to kick in truncation logic
num_services = self.check.MAX_SERVICES + 20
# Big whitelist
services = self.mock_get_n_services_in_cluster(num_services)
whitelist = ['service_{0}'.format(k) for k in range(num_services)]
self.assertEqual(len(self.check._cull_services_list(services, whitelist)), self.check.MAX_SERVICES)
# Whitelist < MAX_SERVICES should spit out the whitelist
services = self.mock_get_n_services_in_cluster(num_services)
whitelist = ['service_{0}'.format(k) for k in range(self.check.MAX_SERVICES-1)]
self.assertEqual(set(self.check._cull_services_list(services, whitelist)), set(whitelist))
# No whitelist, still triggers truncation
whitelist = []
self.assertEqual(len(self.check._cull_services_list(services, whitelist)), self.check.MAX_SERVICES)
# Num. services < MAX_SERVICES should be no-op in absence of whitelist
num_services = self.check.MAX_SERVICES - 1
services = self.mock_get_n_services_in_cluster(num_services)
self.assertEqual(len(self.check._cull_services_list(services, whitelist)), num_services)
# Num. services < MAX_SERVICES should spit out only the whitelist when one is defined
num_services = self.check.MAX_SERVICES - 1
whitelist = ['service_1', 'service_2', 'service_3']
services = self.mock_get_n_services_in_cluster(num_services)
self.assertEqual(set(self.check._cull_services_list(services, whitelist)), set(whitelist))
def test_new_leader_event(self):
self.check = load_check(self.CHECK_NAME, MOCK_CONFIG_LEADER_CHECK, self.DEFAULT_AGENT_CONFIG)
self.check._last_known_leader = 'My Old Leader'
mocks = self._get_consul_mocks()
mocks['_get_cluster_leader'] = self.mock_get_cluster_leader_B
self.run_check(MOCK_CONFIG_LEADER_CHECK, mocks=mocks)
self.assertEqual(len(self.events), 1)
event = self.events[0]
self.assertEqual(event['event_type'], 'consul.new_leader')
self.assertIn('prev_consul_leader:My Old Leader', event['tags'])
self.assertIn('curr_consul_leader:My New Leader', event['tags'])
|
prakritish/ansible
|
refs/heads/devel
|
test/units/module_utils/json_utils/__init__.py
|
12133432
| |
camptocamp/QGIS
|
refs/heads/master
|
python/plugins/processing/admintools/geoserver/__init__.py
|
12133432
| |
sgzsh269/django
|
refs/heads/master
|
tests/version/__init__.py
|
12133432
| |
wji/plenarnavi_backend
|
refs/heads/master
|
api/__init__.py
|
12133432
| |
since2014/gitosis
|
refs/heads/master
|
gitosis/test/test_group.py
|
29
|
from nose.tools import eq_ as eq, assert_raises
from ConfigParser import RawConfigParser
from gitosis import group
def test_no_emptyConfig():
cfg = RawConfigParser()
gen = group.getMembership(config=cfg, user='jdoe')
eq(gen.next(), 'all')
assert_raises(StopIteration, gen.next)
def test_no_emptyGroup():
cfg = RawConfigParser()
cfg.add_section('group hackers')
gen = group.getMembership(config=cfg, user='jdoe')
eq(gen.next(), 'all')
assert_raises(StopIteration, gen.next)
def test_no_notListed():
cfg = RawConfigParser()
cfg.add_section('group hackers')
cfg.set('group hackers', 'members', 'wsmith')
gen = group.getMembership(config=cfg, user='jdoe')
eq(gen.next(), 'all')
assert_raises(StopIteration, gen.next)
def test_yes_simple():
cfg = RawConfigParser()
cfg.add_section('group hackers')
cfg.set('group hackers', 'members', 'jdoe')
gen = group.getMembership(config=cfg, user='jdoe')
eq(gen.next(), 'hackers')
eq(gen.next(), 'all')
assert_raises(StopIteration, gen.next)
def test_yes_leading():
cfg = RawConfigParser()
cfg.add_section('group hackers')
cfg.set('group hackers', 'members', 'jdoe wsmith')
gen = group.getMembership(config=cfg, user='jdoe')
eq(gen.next(), 'hackers')
eq(gen.next(), 'all')
assert_raises(StopIteration, gen.next)
def test_yes_trailing():
cfg = RawConfigParser()
cfg.add_section('group hackers')
cfg.set('group hackers', 'members', 'wsmith jdoe')
gen = group.getMembership(config=cfg, user='jdoe')
eq(gen.next(), 'hackers')
eq(gen.next(), 'all')
assert_raises(StopIteration, gen.next)
def test_yes_middle():
cfg = RawConfigParser()
cfg.add_section('group hackers')
cfg.set('group hackers', 'members', 'wsmith jdoe danny')
gen = group.getMembership(config=cfg, user='jdoe')
eq(gen.next(), 'hackers')
eq(gen.next(), 'all')
assert_raises(StopIteration, gen.next)
def test_yes_recurse_one():
cfg = RawConfigParser()
cfg.add_section('group hackers')
cfg.set('group hackers', 'members', 'wsmith @smackers')
cfg.add_section('group smackers')
cfg.set('group smackers', 'members', 'danny jdoe')
gen = group.getMembership(config=cfg, user='jdoe')
eq(gen.next(), 'smackers')
eq(gen.next(), 'hackers')
eq(gen.next(), 'all')
assert_raises(StopIteration, gen.next)
def test_yes_recurse_one_ordering():
cfg = RawConfigParser()
cfg.add_section('group smackers')
cfg.set('group smackers', 'members', 'danny jdoe')
cfg.add_section('group hackers')
cfg.set('group hackers', 'members', 'wsmith @smackers')
gen = group.getMembership(config=cfg, user='jdoe')
eq(gen.next(), 'smackers')
eq(gen.next(), 'hackers')
eq(gen.next(), 'all')
assert_raises(StopIteration, gen.next)
def test_yes_recurse_three():
cfg = RawConfigParser()
cfg.add_section('group hackers')
cfg.set('group hackers', 'members', 'wsmith @smackers')
cfg.add_section('group smackers')
cfg.set('group smackers', 'members', 'danny @snackers')
cfg.add_section('group snackers')
cfg.set('group snackers', 'members', '@whackers foo')
cfg.add_section('group whackers')
cfg.set('group whackers', 'members', 'jdoe')
gen = group.getMembership(config=cfg, user='jdoe')
eq(gen.next(), 'whackers')
eq(gen.next(), 'snackers')
eq(gen.next(), 'smackers')
eq(gen.next(), 'hackers')
eq(gen.next(), 'all')
assert_raises(StopIteration, gen.next)
def test_yes_recurse_junk():
cfg = RawConfigParser()
cfg.add_section('group hackers')
cfg.set('group hackers', 'members', '@notexist @smackers')
cfg.add_section('group smackers')
cfg.set('group smackers', 'members', 'jdoe')
gen = group.getMembership(config=cfg, user='jdoe')
eq(gen.next(), 'smackers')
eq(gen.next(), 'hackers')
eq(gen.next(), 'all')
assert_raises(StopIteration, gen.next)
def test_yes_recurse_loop():
cfg = RawConfigParser()
cfg.add_section('group hackers')
cfg.set('group hackers', 'members', '@smackers')
cfg.add_section('group smackers')
cfg.set('group smackers', 'members', '@hackers jdoe')
gen = group.getMembership(config=cfg, user='jdoe')
eq(gen.next(), 'smackers')
eq(gen.next(), 'hackers')
eq(gen.next(), 'all')
assert_raises(StopIteration, gen.next)
def test_no_recurse_loop():
cfg = RawConfigParser()
cfg.add_section('group hackers')
cfg.set('group hackers', 'members', '@smackers')
cfg.add_section('group smackers')
cfg.set('group smackers', 'members', '@hackers')
gen = group.getMembership(config=cfg, user='jdoe')
eq(gen.next(), 'all')
assert_raises(StopIteration, gen.next)
|
OpenNingia/l5r-character-manager
|
refs/heads/master
|
l5rcm/dal/query.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Daniele Simonetti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
def get_clan(storage, ident):
try:
return [x for x in storage.clans if x.id == ident][0]
except:
return None
def get_family(storage, ident):
try:
return [x for x in storage.families if x.id == ident][0]
except:
return None
def get_school(storage, ident):
try:
return [x for x in storage.schools if x.id == ident][0]
except Exception as e:
print(e)
return None
def get_base_schools(storage):
def is_base_school(school):
return (len(school.require) == 0 and
'advanced' not in school.tags and
'alternate' not in school.tags)
try:
return [x for x in storage.schools if is_base_school(x)]
except:
return None
def get_school_tech(school_obj, rank):
try:
return [x for x in school_obj.techs if x.rank == rank][0]
except:
return None
def get_tech(storage, ident):
for sc in storage.schools:
tech = [x for x in sc.techs if x.id == ident]
if len(tech): return sc, tech[0]
return None, None
def get_skill(storage, ident):
try:
return [x for x in storage.skills if x.id == ident][0]
except:
return None
def get_skills(storage, categ):
return [x for x in storage.skills if x.type == categ]
def get_spells(storage, ring, mastery):
return [x for x in storage.spells if x.element == ring and x.mastery == mastery]
def get_maho_spells(storage, ring, mastery):
return [x for x in storage.spells if x.element == ring and x.mastery == mastery and 'maho' in x.tags]
def get_mastery_ability_rule(storage, ident, value):
try:
skill = get_skill(storage, ident)
return [x for x in skill.mastery_abilities if x.rank == value][0].rule
except:
return None
def get_kata(storage, ident):
try:
return [x for x in storage.katas if x.id == ident][0]
except:
return None
def get_kiho(storage, ident):
try:
return [x for x in storage.kihos if x.id == ident][0]
except:
return None
def get_spell(storage, ident):
try:
return [x for x in storage.spells if x.id == ident][0]
except:
return None
def get_merit(storage, ident):
try:
return [x for x in storage.merits if x.id == ident][0]
except:
return None
def get_flaw(storage, ident):
try:
return [x for x in storage.flaws if x.id == ident][0]
except:
return None
def get_weapon(storage, name):
try:
return [x for x in storage.weapons if x.name == name][0]
except:
return None
def get_armor(storage, name):
try:
return [x for x in storage.armors if x.name == name][0]
except:
return None
def get_weapon_effect(storage, ident):
try:
return [x for x in storage.weapon_effects if x.id == ident][0]
except:
return None
def get_ring(storage, ident):
try:
return [x for x in storage.rings if x.id == ident][0]
except:
return None
def get_trait(storage, ident):
try:
return [x for x in storage.traits if x.id == ident][0]
except:
return None
class DataQuery(object):
def __init__(self, data):
self.d = data
def get_clan(self, ident):
return get_clan(self.d, ident)
def get_family(self, ident):
return get_family(self.d, ident)
def get_school(self, ident):
s = get_school(self.d, ident)
return get_school(self.d, ident)
def get_base_schools(self):
return get_base_schools(self.d)
def get_school_tech(self, school_id, rank):
school_obj = self.get_school(school_id)
return get_school_tech(school_obj, rank)
def get_tech(self, ident):
return get_tech(self.d, ident)
def get_skill(self, ident):
return get_skill(self.d, ident)
def get_skills(self, categ):
return get_skills(self.d, categ)
def get_spells(self, ring, mastery):
return get_spells(self.d, ring, mastery)
def get_maho_spells(self, ring, mastery):
return get_maho_spells(self.d, ring, mastery)
def get_mastery_ability_rule(self, ident, value):
return get_mastery_ability_rule(self.d, ident, value)
def get_kata(self, ident):
return get_kata(self.d, ident)
def get_kiho(self, ident):
return get_kiho(self.d, ident)
def get_spell(self, ident):
return get_spell(self.d, ident)
def get_merit(self, ident):
return get_merit(self.d, ident)
def get_flaw(self, ident):
return get_flaw(self.d, ident)
def get_weapon(self, name):
return get_weapon(self.d, name)
def get_armor(self, name):
return get_armor(self.d, name)
def get_weapon_effect(self, ident):
return get_weapon_effect(self.d, ident)
def get_ring(self, ident):
return get_ring(self.d, ident)
def get_trait(self, ident):
return get_trait(self.d, ident)
|
openlabs/nereid-project
|
refs/heads/develop
|
application.py
|
3
|
#!/usr/bin/env python
import os
import datetime
import mimetypes
from nereid import Nereid
from werkzeug.contrib.sessions import FilesystemSessionStore
from werkzeug.wsgi import SharedDataMiddleware
from nereid.contrib.locale import Babel
from nereid.sessions import Session
from raven.contrib.flask import Sentry
CWD = os.path.abspath(os.path.dirname(__file__))
DATABASE_NAME = os.environ.get('TRYTOND_DB_NAME', 'nereid_project')
SECRET_PATH = os.environ.get('SECRET_PATH', '.secret')
from trytond.config import config
config.update_etc()
APP_CONFIG = dict(
# The name of database
DATABASE_NAME=DATABASE_NAME,
# If the application is to be configured in the debug mode
DEBUG=False,
# The location where the translations of this template are stored
TRANSLATIONS_PATH='i18n',
SECRET_KEY=open(SECRET_PATH).read(),
WTF_CSRF_ENABLED=False,
)
# Create a new application
app = Nereid(static_folder='%s/static/' % CWD, static_url_path='/static')
# Update the configuration with the above config values
app.config.update(APP_CONFIG)
# Initialise the app, connect to cache and backend
app.initialise()
app.jinja_env.filters[
'float_to_time'] = lambda hours: "%dh %dm" % (hours, (hours * 60) % 60)
app.jinja_env.globals.update({
'datetime': datetime,
'guess_mimetype': mimetypes.guess_type,
})
# Setup the filesystem cache for session store.
# This wont work if you scale on more than one servers.
# Use something like redissessionstore or memcached store
app.session_interface.session_store = FilesystemSessionStore(
'/tmp', session_class=Session
)
Babel(app)
sentry = Sentry(app)
app.wsgi_app = SharedDataMiddleware(app.wsgi_app, {
'/app': os.path.join(os.path.dirname(__file__), 'ng-app/app')
})
if __name__ == '__main__':
app.debug = True
app.run('0.0.0.0')
|
krafczyk/root
|
refs/heads/master
|
interpreter/llvm/src/utils/llvm-build/llvmbuild/util.py
|
143
|
import os
import sys
def _write_message(kind, message):
program = os.path.basename(sys.argv[0])
sys.stderr.write('%s: %s: %s\n' % (program, kind, message))
note = lambda message: _write_message('note', message)
warning = lambda message: _write_message('warning', message)
error = lambda message: _write_message('error', message)
fatal = lambda message: (_write_message('fatal error', message), sys.exit(1))
__all__ = ['note', 'warning', 'error', 'fatal']
|
Vassy/odoo
|
refs/heads/master
|
addons/hr_recruitment/report/hr_recruitment_report.py
|
56
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
from .. import hr_recruitment
from openerp.addons.decimal_precision import decimal_precision as dp
class hr_recruitment_report(osv.Model):
_name = "hr.recruitment.report"
_description = "Recruitments Statistics"
_auto = False
_rec_name = 'date_create'
_order = 'date_create desc'
_columns = {
'user_id': fields.many2one('res.users', 'User', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'date_create': fields.datetime('Create Date', readonly=True),
'date_last_stage_update': fields.datetime('Last Stage Update', readonly=True),
'date_closed': fields.date('Closed', readonly=True),
'job_id': fields.many2one('hr.job', 'Applied Job',readonly=True),
'stage_id': fields.many2one ('hr.recruitment.stage', 'Stage'),
'type_id': fields.many2one('hr.recruitment.degree', 'Degree'),
'department_id': fields.many2one('hr.department','Department',readonly=True),
'priority': fields.selection(hr_recruitment.AVAILABLE_PRIORITIES, 'Appreciation'),
'salary_prop' : fields.float("Salary Proposed", digits_compute=dp.get_precision('Account')),
'salary_prop_avg' : fields.float("Avg. Proposed Salary", group_operator="avg", digits_compute=dp.get_precision('Account')),
'salary_exp' : fields.float("Salary Expected", digits_compute=dp.get_precision('Account')),
'salary_exp_avg' : fields.float("Avg. Expected Salary", group_operator="avg", digits_compute=dp.get_precision('Account')),
'partner_id': fields.many2one('res.partner', 'Partner',readonly=True),
'available': fields.float("Availability"),
'delay_close': fields.float('Avg. Delay to Close', digits=(16,2), readonly=True, group_operator="avg",
help="Number of Days to close the project issue"),
'last_stage_id': fields.many2one ('hr.recruitment.stage', 'Last Stage'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'hr_recruitment_report')
cr.execute("""
create or replace view hr_recruitment_report as (
select
min(s.id) as id,
s.create_date as date_create,
date_trunc('day',s.date_closed) as date_closed,
date_trunc('day',s.date_last_stage_update) as date_last_stage_update,
to_char(s.create_date, 'YYYY') as year,
to_char(s.create_date, 'MM') as month,
to_char(s.create_date, 'YYYY-MM-DD') as day,
s.partner_id,
s.company_id,
s.user_id,
s.job_id,
s.type_id,
sum(s.availability) as available,
s.department_id,
s.priority,
s.stage_id,
s.last_stage_id,
sum(salary_proposed) as salary_prop,
(sum(salary_proposed)/count(*)) as salary_prop_avg,
sum(salary_expected) as salary_exp,
(sum(salary_expected)/count(*)) as salary_exp_avg,
extract('epoch' from (s.write_date-s.create_date))/(3600*24) as delay_close,
count(*) as nbr
from hr_applicant s
group by
date_trunc('day',s.create_date),
date_trunc('day',s.date_closed),
s.date_open,
s.create_date,
s.write_date,
s.date_closed,
s.date_last_stage_update,
s.partner_id,
s.company_id,
s.user_id,
s.stage_id,
s.last_stage_id,
s.type_id,
s.priority,
s.job_id,
s.department_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
mastbaum/rat-pac
|
refs/heads/master
|
python/ratproc/ntuple.py
|
7
|
from ratproc.base import Processor
from rat import ROOT, RAT, ratiter
from rat.parser import create_evaluation_tree
from array import array
import operator
import itertools
class NtupleWriter(object):
def __init__(self, ntuple, field_selectors):
self.ntuple = ntuple
self.field_selectors = field_selectors
self.field_contents = array('f', [0.0]*len(self.field_selectors))
self.count = 0
self.eval_tree = create_evaluation_tree(*field_selectors)
def fill(self, ds):
for row in self.eval_tree.eval(ds):
for i, value in enumerate(row):
if value is None:
value = -9999999.0
self.field_contents[i] = value
self.ntuple.Fill(self.field_contents)
self.count += 1
def write(self):
self.ntuple.Write()
class Ntuple(Processor):
def __init__(self, filename, *ntuples):
self.filename = filename
self.ntuple_names = ntuples
self.first_event = True
def create_ntuple(self, name):
db = RAT.DB.Get()
lntuple = db.GetLink("NTUPLE", name)
# Python lists are better than STL vectors
fields = list(lntuple.GetSArray("fields"))
# Deinterleave field names and selectors
field_names = fields[::2]
field_selectors = fields[1::2]
assert len(field_names) == len(field_selectors)
N = ROOT.TNtuple(name, "RAT reduced ntuple",
":".join(field_names))
return NtupleWriter(N, field_selectors)
def dsevent(self, ds):
if self.first_event:
print 'ntuple: Writing to', self.filename
self.f = ROOT.TFile.Open(self.filename, "RECREATE")
self.ntuples = [ self.create_ntuple(name)
for name in self.ntuple_names ]
self.first_event = False
for writer in self.ntuples:
writer.fill(ds)
return Processor.OK
def finish(self):
self.f.cd()
for writer in self.ntuples:
print 'ntuple: Wrote %d entries to %s' % (writer.count, writer.ntuple.GetName())
writer.write()
self.f.Close()
|
bclau/nova
|
refs/heads/master
|
nova/netconf.py
|
17
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
from oslo.config import cfg
from nova import utils
CONF = cfg.CONF
def _get_my_ip():
"""
Returns the actual ip of the local machine.
This code figures out what source address would be used if some traffic
were to be sent out to some well known address on the Internet. In this
case, a Google DNS server is used, but the specific address does not
matter much. No traffic is actually sent.
"""
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return utils.get_my_ipv4_address()
netconf_opts = [
cfg.StrOpt('my_ip',
default=_get_my_ip(),
help='ip address of this host'),
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of this node. This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address. '
'However, the node name must be valid within '
'an AMQP key, and if using ZeroMQ, a valid '
'hostname, FQDN, or IP address'),
cfg.BoolOpt('use_ipv6',
default=False,
help='use ipv6'),
]
CONF.register_opts(netconf_opts)
|
saydulk/django
|
refs/heads/master
|
django/contrib/admin/decorators.py
|
558
|
def register(*models, **kwargs):
"""
Registers the given model(s) classes and wrapped ModelAdmin class with
admin site:
@register(Author)
class AuthorAdmin(admin.ModelAdmin):
pass
A kwarg of `site` can be passed as the admin site, otherwise the default
admin site will be used.
"""
from django.contrib.admin import ModelAdmin
from django.contrib.admin.sites import site, AdminSite
def _model_admin_wrapper(admin_class):
admin_site = kwargs.pop('site', site)
if not isinstance(admin_site, AdminSite):
raise ValueError('site must subclass AdminSite')
if not issubclass(admin_class, ModelAdmin):
raise ValueError('Wrapped class must subclass ModelAdmin.')
admin_site.register(models, admin_class=admin_class)
return admin_class
return _model_admin_wrapper
|
ThiagoGarciaAlves/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/relativeImportsToModulesInSameMovedPackageNotUpdated/before/src/pkg/subpkg/m1.py
|
76
|
from . import m2
import m3
import subsubpkg as foo
from . import subsubpkg as bar
from .subsubpkg import m4
from .. import m5
print(m2, m3, m4, m5, foo, bar)
|
aerickson/ansible
|
refs/heads/devel
|
lib/ansible/modules/web_infrastructure/ansible_tower/tower_group.py
|
33
|
#!/usr/bin/python
#coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_group
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower group.
description:
- Create, update, or destroy Ansible Tower groups. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- The name to use for the group.
required: True
description:
description:
- The description to use for the group.
required: False
default: null
inventory:
description:
- Inventory the group should be made a member of.
required: True
variables:
description:
- Variables to use for the group, use '@' for a file.
required: False
default: null
credential:
description:
- Credential to use for the group.
required: False
default: null
source:
description:
- The source to use for this group.
required: False
default: null,
choices: ["manual", "file", "ec2", "rax", "vmware", "gce", "azure", "azure_rm", "openstack", "satellite6" , "cloudforms", "custom"]
source_regions:
description:
- Regions for cloud provider.
required: False
default: null
source_vars:
description:
- Override variables from source with variables from this field.
required: False
default: null
instance_filters:
description:
- Comma-separated list of filter expressions for matching hosts.
required: False
default: null
group_by:
description:
- Limit groups automatically created from inventory source.
required: False
default: null
source_script:
description:
- Inventory script to be used when group type is "custom".
required: False
default: null
overwrite:
description:
- Delete child roups and hosts not found in source.
required: False
default: False
overwrite_vars:
description:
- Override vars in child groups and hosts with those from external source.
required: False
default: null
update_on_launch:
description:
- Refresh inventory data from its source each time a job is run.
required: False
default: False
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
tower_host:
description:
- URL to your Tower instance.
required: False
default: null
tower_username:
description:
- Username for your Tower instance.
required: False
default: null
tower_password:
description:
- Password for your Tower instance.
required: False
default: null
tower_verify_ssl:
description:
- Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: False
default: True
tower_config_file:
description:
- Path to the Tower config file. See notes.
required: False
default: null
requirements:
- "python >= 2.6"
- "ansible-tower-cli >= 3.0.2"
notes:
- If no I(config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
EXAMPLES = '''
- name: Add tower group
tower_group:
name: localhost
description: "Local Host Group"
inventory: "Local Inventory"
state: present
tower_config_file: "~/tower_cli.cfg"
'''
try:
import os
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
description = dict(),
inventory = dict(required=True),
variables = dict(),
credential = dict(),
source = dict(choices=["manual", "file", "ec2", "rax", "vmware",
"gce", "azure", "azure_rm", "openstack",
"satellite6" , "cloudforms", "custom"], default="manual"),
source_regions = dict(),
source_vars = dict(),
instance_filters = dict(),
group_by = dict(),
source_script = dict(),
overwrite = dict(type='bool', default=False),
overwrite_vars = dict(),
update_on_launch = dict(type='bool' , default=False),
tower_host = dict(),
tower_username = dict(),
tower_password = dict(no_log=True),
tower_verify_ssl = dict(type='bool', default=True),
tower_config_file = dict(type='path'),
state = dict(choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
inventory = module.params.get('inventory')
credential = module.params.get('credential')
state = module.params.get('state')
variables = module.params.get('variables')
if variables:
if variables.startswith('@'):
filename = os.path.expanduser(variables[1:])
variables = module.contents_from_file(filename)
json_output = {'group': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
group = tower_cli.get_resource('group')
try:
params = module.params.copy()
params['create_on_missing'] = True
params['variables'] = variables
inv_res = tower_cli.get_resource('inventory')
inv = inv_res.get(name=inventory)
params['inventory'] = inv['id']
if credential:
cred_res = tower_cli.get_resource('credential')
cred = cred_res.get(name=credential)
params['credential'] = cred['id']
if state == 'present':
result = group.modify(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = group.delete(**params)
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update the group, inventory not found: {0}'.format(excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update the group: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
|
python-dirbtuves/Misago
|
refs/heads/ubuntu-lt
|
misago/apps/profiles/followers/urls.py
|
3
|
from django.conf.urls import patterns, url
def register_profile_urls(first=False):
urlpatterns = []
if first:
urlpatterns += patterns('misago.apps.profiles.followers.views',
url(r'^$', 'followers', name="user"),
url(r'^$', 'followers', name="user_followers"),
url(r'^(?P<page>[1-9]([0-9]+)?)/$', 'followers', name="user_followers"),
)
else:
urlpatterns += patterns('misago.apps.profiles.followers.views',
url(r'^followers/$', 'followers', name="user_followers"),
url(r'^followers/(?P<page>[1-9]([0-9]+)?)/$', 'followers', name="user_followers"),
)
return urlpatterns
|
polinom/djangopeople
|
refs/heads/master
|
djangopeople/djangopeople/groupedselect.py
|
1
|
from django import forms
from django.forms.util import flatatt
from django.utils.encoding import smart_unicode
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
# From http://www.djangosnippets.org/snippets/200/
# widget for select with optional opt groups
# modified from ticket 3442
# not sure if it's better but it doesn't force all options to be grouped
# Example:
# groceries = ((False, (('milk','milk'), (-1,'eggs'))),
# ('fruit', ((0,'apple'), (1,'orange'))),
# ('', (('yum','beer'), )),
# )
# grocery_list = GroupedChoiceField(choices=groceries)
# Renders:
# <select name="grocery_list" id="id_grocery_list">
# <option value="milk">milk</option>
# <option value="-1">eggs</option>
# <optgroup label="fruit">
# <option value="0">apple</option>
# <option value="1">orange</option>
# </optgroup>
# <option value="yum">beer</option>
# </select>
class GroupedSelect(forms.Select):
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = ''
final_attrs = self.build_attrs(attrs, name=name)
output = [u'<select%s>' % flatatt(final_attrs)]
str_value = smart_unicode(value)
for group_label, group in self.choices:
if group_label: # should belong to an optgroup
group_label = smart_unicode(group_label)
output.append(u'<optgroup label="%s">' % escape(group_label))
for k, v in group:
option_value = smart_unicode(k)
option_label = smart_unicode(v)
selected_html = ((option_value == str_value) and
u' selected="selected"' or '')
output.append(u'<option value="%s"%s>%s</option>' % (
escape(option_value), selected_html,
escape(option_label)
))
if group_label:
output.append(u'</optgroup>')
output.append(u'</select>')
return mark_safe(u'\n'.join(output))
# field for grouped choices, handles cleaning of funky choice tuple
class GroupedChoiceField(forms.ChoiceField):
def __init__(self, choices=(), required=True, widget=GroupedSelect,
label=None, initial=None, help_text=None):
super(forms.ChoiceField, self).__init__(required, widget, label,
initial, help_text)
self.choices = choices
def clean(self, value):
"""
Validates that the input is in self.choices.
"""
value = super(forms.ChoiceField, self).clean(value)
if value in (None, ''):
value = u''
value = smart_unicode(value)
if value == u'':
return value
valid_values = []
for group_label, group in self.choices:
valid_values += [str(k) for k, v in group]
if value not in valid_values:
raise forms.ValidationError(
_(u'Select a valid choice. That choice is not one of the '
'available choices.')
)
return value
|
cgstudiomap/cgstudiomap
|
refs/heads/develop
|
main/eggs/PyChart-1.39-py2.7.egg/pychart/range_plot_doc.py
|
143
|
# automatically generated by generate_docs.py.
doc=" "
|
Ikergune/firos
|
refs/heads/master
|
scripts/include/pubsub/contextbroker/cbQueryBuilder.py
|
1
|
# MIT License
#
# Copyright (c) <2015> <Ikergune, Etxetar>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import urllib2
from include.logger import Log
from include.constants import DATA_CONTEXTBROKER
from include.pubsub.iPubSub import IqueryBuilder
class CbQueryBuilder(IqueryBuilder):
## \brief Query data to context broker
def findById(self, entity_id, data_type="ROBOT", isPattern=False):
## \brief Get entity data from context broker
# \param entity name (can be regular expression)
# \param entity type
# \param if the entity name is a pattern or not (false by default)
url = "http://{}:{}/NGSI10/queryContext".format(DATA_CONTEXTBROKER["ADDRESS"], DATA_CONTEXTBROKER["PORT"])
data = {
"entities": [
{
"type": data_type,
"isPattern": "true" if isPattern else "false",
"id": entity_id
}
]
}
return self._sendRequest(url, json.dumps(data))
def _sendRequest(self, url, data, method=None):
## \brief Send request to context broker
# \param url to request to
# \param data to send
# \param HTTP method (GET by default)
try:
request = urllib2.Request(url, data, {'Content-Type': 'application/json', 'Accept': 'application/json'})
if method is not None:
request.get_method = lambda: method
response = urllib2.urlopen(request)
data = response.read()
response_body = json.loads(data)
response.close()
return response_body
except Exception as ex:
Log("ERROR", ex.reason)
return None
|
AltSchool/django-allauth
|
refs/heads/master
|
allauth/socialaccount/providers/daum/__init__.py
|
12133432
| |
go-bears/nupic
|
refs/heads/master
|
src/nupic/bindings/proto/__init__.py
|
12133432
| |
davgibbs/django
|
refs/heads/master
|
django/conf/locale/is/__init__.py
|
12133432
| |
gangadhar-kadam/sapphire_app
|
refs/heads/1310
|
stock/report/requested_items_to_be_transferred/__init__.py
|
12133432
| |
craynot/django
|
refs/heads/master
|
django/views/static.py
|
300
|
"""
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
from __future__ import unicode_literals
import mimetypes
import os
import posixpath
import re
import stat
from django.http import (
FileResponse, Http404, HttpResponse, HttpResponseNotModified,
HttpResponseRedirect,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.http import http_date, parse_http_date
from django.utils.six.moves.urllib.parse import unquote
from django.utils.translation import ugettext as _, ugettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(fullpath)
content_type = content_type or 'application/octet-stream'
response = FileResponse(open(fullpath, 'rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% if directory != "/" %}
<li><a href="../">../</a></li>
{% endif %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = ugettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template([
'static/directory_index.html',
'static/directory_index',
])
except TemplateDoesNotExist:
t = Engine().from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
|
kaleidos/intranet
|
refs/heads/master
|
backend/cmsutils/templatetags/datefilters.py
|
3
|
from django import template
from django.conf import settings
from django.utils.translation import get_language
register = template.Library()
## ("short format", "long format") as strptime format
DATEFORMAT_TRANSLATIONS_DEFAULT = {
'es': ("%d/%m/%Y", ),
'fr': ("%d/%m/%Y", ),
'en': ("%Y-%m-%d", ),
'de': ("%d.%m.%Y", ),
}
DATEFORMAT_MAP = {
'short': 0,
# 'long': 1,
}
def get_date_format(format, showtime=False):
formatindex = DATEFORMAT_MAP[format]
lang = get_language()
dateformats = getattr(settings, 'DATEFORMAT_TRANSLATIONS', DATEFORMAT_TRANSLATIONS_DEFAULT)
if lang in dateformats:
datetimeformat = dateformats[lang][formatindex]
else:
datetimeformat = dateformats[settings.LANGUAGE_CODE][formatindex]
if showtime:
datetimeformat += " %H:%M"
return datetimeformat
@register.filter
def transdate(value, format='short'):
"""
User transdate to translate a date to language session selected
(year/month/day) format.
Usage:
{% load datefilters %}
{{ datefield|transdate }}
"""
if value:
format = get_date_format(format)
return value.strftime(format)
@register.filter
def transdatetime(value, format='short'):
"""
User transdate to translate a date to language session selected
(year/month/day) format.
Usage:
{% load datefilters %}
{{ datefield|transdate }}
"""
if value:
dateformat = get_date_format(format, showtime=True)
return value.strftime(dateformat)
|
francis-liberty/PHD
|
refs/heads/master
|
tests/test_srt.py
|
1
|
import unittest
import os
import parser.srt as psrt
class TestSRT(unittest.TestCase):
def setUp(self):
pass
def test_parse(self):
dr = os.path.dirname(__file__)
file_path = os.path.join(dr, '../data/test.srt')
texts = psrt.parse(file_path)
print texts
|
inspirehep/invenio
|
refs/heads/prod
|
modules/bibdocfile/lib/icon_migration_kit.py
|
23
|
## This file is part of Invenio.
## Copyright (C) 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
This script updates the filesystem and database structure WRT icons.
In particular it will move all the icons information out of bibdoc_bibdoc
tables and into the normal bibdoc + subformat infrastructure.
"""
import sys
from datetime import datetime
from invenio.textutils import wrap_text_in_a_box, wait_for_user
from invenio.bibtask import check_running_process_user
from invenio.dbquery import run_sql, OperationalError
from invenio.bibdocfile import BibDoc
from invenio.config import CFG_LOGDIR, CFG_SITE_SUPPORT_EMAIL
from invenio.bibdocfilecli import cli_fix_marc
from invenio.errorlib import register_exception
from invenio.intbitset import intbitset
from invenio.search_engine import record_exists
def retrieve_bibdoc_bibdoc():
return run_sql('SELECT id_bibdoc1, id_bibdoc2 from bibdoc_bibdoc')
def get_recid_from_docid(docid):
return run_sql('SELECT id_bibrec FROM bibrec_bibdoc WHERE id_bibdoc=%s', (docid, ))
def backup_tables(drop=False):
"""This function create a backup of bibrec_bibdoc, bibdoc and bibdoc_bibdoc tables. Returns False in case dropping of previous table is needed."""
if drop:
run_sql('DROP TABLE bibdoc_bibdoc_backup_for_icon')
try:
run_sql("""CREATE TABLE bibdoc_bibdoc_backup_for_icon (KEY id_bibdoc1(id_bibdoc1),
KEY id_bibdoc2(id_bibdoc2)) SELECT * FROM bibdoc_bibdoc""")
except OperationalError, e:
if not drop:
return False
raise e
return True
def fix_bibdoc_bibdoc(id_bibdoc1, id_bibdoc2, logfile):
"""
Migrate an icon.
"""
try:
the_bibdoc = BibDoc.create_instance(id_bibdoc1)
except Exception, err:
msg = "WARNING: when opening docid %s: %s" % (id_bibdoc1, err)
print >> logfile, msg
print msg
return True
try:
msg = "Fixing icon for the document %s" % (id_bibdoc1, )
print msg,
print >> logfile, msg,
the_icon = BibDoc.create_instance(id_bibdoc2)
for a_file in the_icon.list_latest_files():
the_bibdoc.add_icon(a_file.get_full_path(), format=a_file.get_format())
the_icon.delete()
run_sql("DELETE FROM bibdoc_bibdoc WHERE id_bibdoc1=%s AND id_bibdoc2=%s", (id_bibdoc1, id_bibdoc2))
print "OK"
print >> logfile, "OK"
return True
except Exception, err:
print "ERROR: %s" % err
print >> logfile, "ERROR: %s" % err
register_exception()
return False
def main():
"""Core loop."""
check_running_process_user()
logfilename = '%s/fulltext_files_migration_kit-%s.log' % (CFG_LOGDIR, datetime.today().strftime('%Y%m%d%H%M%S'))
try:
logfile = open(logfilename, 'w')
except IOError, e:
print wrap_text_in_a_box('NOTE: it\'s impossible to create the log:\n\n %s\n\nbecause of:\n\n %s\n\nPlease run this migration kit as the same user who runs Invenio (e.g. Apache)' % (logfilename, e), style='conclusion', break_long=False)
sys.exit(1)
bibdoc_bibdoc = retrieve_bibdoc_bibdoc()
print wrap_text_in_a_box ("""This script migrate the filesystem structure used to store icons files to the new stricter structure.
This script must not be run during normal Invenio operations.
It is safe to run this script. No file will be deleted.
Anyway it is recommended to run a backup of the filesystem structure just in case.
A backup of the database tables involved will be automatically performed.""", style='important')
if not bibdoc_bibdoc:
print wrap_text_in_a_box("No need for migration", style='conclusion')
return
print "%s icons will be migrated/fixed." % len(bibdoc_bibdoc)
wait_for_user()
print "Backing up database tables"
try:
if not backup_tables():
print wrap_text_in_a_box("""It appears that is not the first time that you run this script.
Backup tables have been already created by a previous run.
In order for the script to go further they need to be removed.""", style='important')
wait_for_user()
print "Backing up database tables (after dropping previous backup)",
backup_tables(drop=True)
print "-> OK"
else:
print "-> OK"
except Exception, e:
print wrap_text_in_a_box("Unexpected error while backing up tables. Please, do your checks: %s" % e, style='conclusion')
sys.exit(1)
to_fix_marc = intbitset()
print "Created a complete log file into %s" % logfilename
try:
try:
for id_bibdoc1, id_bibdoc2 in bibdoc_bibdoc:
try:
record_does_exist = True
recids = get_recid_from_docid(id_bibdoc1)
if not recids:
print "Skipping %s" % id_bibdoc1
continue
for recid in recids:
if record_exists(recid[0]) > 0:
to_fix_marc.add(recid[0])
else:
record_does_exist = False
if not fix_bibdoc_bibdoc(id_bibdoc1, id_bibdoc2, logfile):
if record_does_exist:
raise StandardError("Error when correcting document ID %s" % id_bibdoc1)
except Exception, err:
print >> logfile, "ERROR: %s" % err
print wrap_text_in_a_box("DONE", style='conclusion')
except:
logfile.close()
register_exception()
print wrap_text_in_a_box(
title = "INTERRUPTED BECAUSE OF ERROR!",
body = """Please see the log file %s for what was the status prior to the error. Contact %s in case of problems, attaching the log.""" % (logfilename, CFG_SITE_SUPPORT_EMAIL),
style = 'conclusion')
sys.exit(1)
finally:
print "Scheduling FIX-MARC to synchronize MARCXML for updated records."
cli_fix_marc(options={}, explicit_recid_set=to_fix_marc)
if __name__ == '__main__':
main()
|
fpy171/django
|
refs/heads/master
|
django/conf/locale/vi/__init__.py
|
12133432
| |
rockneurotiko/django
|
refs/heads/master
|
tests/dbshell/__init__.py
|
12133432
| |
dsfsdgsbngfggb/odoo
|
refs/heads/8.0
|
addons/hr_gamification/wizard/__init__.py
|
388
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import grant_badge
|
smenon8/AlgDataStruct_practice
|
refs/heads/master
|
practice_problems/sample.py
|
2
|
from itertools import product
k,m = input().split()
k,m = int(k),int(m)
a = []
for lno in range(k):
a.append(input().split())
ad = []
for l in a:
l = (list(map(int,l[1:])))
ad.append(l)
al = product(*ad)
maxim = []
for i in al:
i = [x**2 for x in i]
summ = sum(i)% m
maxim.append(summ)
print(max(maxim))
|
MarcosCommunity/odoo
|
refs/heads/marcos-8.0
|
comunity_modules/website_countdown/models/countdown.py
|
1
|
# -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp.osv import osv, orm, fields
from openerp.addons.base.ir.ir_qweb import HTMLSafe
class Countdown(orm.AbstractModel):
_name = 'website.qweb.field.countdown'
_inherit = 'website.qweb.field'
def record_to_html(self, cr, uid, field_name, record, options=None, context=None):
if context is None:
context = {}
html = self.pool["ir.ui.view"].render(cr, uid, "website_countdown.front_countdown", {'countdown_date':record[field_name], 'options':options}, engine='ir.qweb', context=context).decode('utf8')
return HTMLSafe(html)
|
Jeremy-WEI/python-mode
|
refs/heads/develop
|
pymode/libs2/rope/refactor/change_signature.py
|
59
|
import copy
import rope.base.exceptions
from rope.base import pyobjects, taskhandle, evaluate, worder, codeanalyze, utils
from rope.base.change import ChangeContents, ChangeSet
from rope.refactor import occurrences, functionutils
class ChangeSignature(object):
def __init__(self, project, resource, offset):
self.pycore = project.pycore
self.resource = resource
self.offset = offset
self._set_name_and_pyname()
if self.pyname is None or self.pyname.get_object() is None or \
not isinstance(self.pyname.get_object(), pyobjects.PyFunction):
raise rope.base.exceptions.RefactoringError(
'Change method signature should be performed on functions')
def _set_name_and_pyname(self):
self.name = worder.get_name_at(self.resource, self.offset)
this_pymodule = self.pycore.resource_to_pyobject(self.resource)
self.primary, self.pyname = evaluate.eval_location2(
this_pymodule, self.offset)
if self.pyname is None:
return
pyobject = self.pyname.get_object()
if isinstance(pyobject, pyobjects.PyClass) and \
'__init__' in pyobject:
self.pyname = pyobject['__init__']
self.name = '__init__'
pyobject = self.pyname.get_object()
self.others = None
if self.name == '__init__' and \
isinstance(pyobject, pyobjects.PyFunction) and \
isinstance(pyobject.parent, pyobjects.PyClass):
pyclass = pyobject.parent
self.others = (pyclass.get_name(),
pyclass.parent[pyclass.get_name()])
def _change_calls(self, call_changer, in_hierarchy=None, resources=None,
handle=taskhandle.NullTaskHandle()):
if resources is None:
resources = self.pycore.get_python_files()
changes = ChangeSet('Changing signature of <%s>' % self.name)
job_set = handle.create_jobset('Collecting Changes', len(resources))
finder = occurrences.create_finder(
self.pycore, self.name, self.pyname, instance=self.primary,
in_hierarchy=in_hierarchy and self.is_method())
if self.others:
name, pyname = self.others
constructor_finder = occurrences.create_finder(
self.pycore, name, pyname, only_calls=True)
finder = _MultipleFinders([finder, constructor_finder])
for file in resources:
job_set.started_job(file.path)
change_calls = _ChangeCallsInModule(
self.pycore, finder, file, call_changer)
changed_file = change_calls.get_changed_module()
if changed_file is not None:
changes.add_change(ChangeContents(file, changed_file))
job_set.finished_job()
return changes
def get_args(self):
"""Get function arguments.
Return a list of ``(name, default)`` tuples for all but star
and double star arguments. For arguments that don't have a
default, `None` will be used.
"""
return self._definfo().args_with_defaults
def is_method(self):
pyfunction = self.pyname.get_object()
return isinstance(pyfunction.parent, pyobjects.PyClass)
@utils.deprecated('Use `ChangeSignature.get_args()` instead')
def get_definition_info(self):
return self._definfo()
def _definfo(self):
return functionutils.DefinitionInfo.read(self.pyname.get_object())
@utils.deprecated()
def normalize(self):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentNormalizer()])
return self._change_calls(changer)
@utils.deprecated()
def remove(self, index):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentRemover(index)])
return self._change_calls(changer)
@utils.deprecated()
def add(self, index, name, default=None, value=None):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentAdder(index, name, default, value)])
return self._change_calls(changer)
@utils.deprecated()
def inline_default(self, index):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentDefaultInliner(index)])
return self._change_calls(changer)
@utils.deprecated()
def reorder(self, new_ordering):
changer = _FunctionChangers(
self.pyname.get_object(), self.get_definition_info(),
[ArgumentReorderer(new_ordering)])
return self._change_calls(changer)
def get_changes(self, changers, in_hierarchy=False, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get changes caused by this refactoring
`changers` is a list of `_ArgumentChanger`\s. If `in_hierarchy`
is `True` the changers are applyed to all matching methods in
the class hierarchy.
`resources` can be a list of `rope.base.resource.File`\s that
should be searched for occurrences; if `None` all python files
in the project are searched.
"""
function_changer = _FunctionChangers(self.pyname.get_object(),
self._definfo(), changers)
return self._change_calls(function_changer, in_hierarchy,
resources, task_handle)
class _FunctionChangers(object):
def __init__(self, pyfunction, definition_info, changers=None):
self.pyfunction = pyfunction
self.definition_info = definition_info
self.changers = changers
self.changed_definition_infos = self._get_changed_definition_infos()
def _get_changed_definition_infos(self):
result = []
definition_info = self.definition_info
result.append(definition_info)
for changer in self.changers:
definition_info = copy.deepcopy(definition_info)
changer.change_definition_info(definition_info)
result.append(definition_info)
return result
def change_definition(self, call):
return self.changed_definition_infos[-1].to_string()
def change_call(self, primary, pyname, call):
call_info = functionutils.CallInfo.read(
primary, pyname, self.definition_info, call)
mapping = functionutils.ArgumentMapping(self.definition_info, call_info)
for definition_info, changer in zip(self.changed_definition_infos, self.changers):
changer.change_argument_mapping(definition_info, mapping)
return mapping.to_call_info(self.changed_definition_infos[-1]).to_string()
class _ArgumentChanger(object):
def change_definition_info(self, definition_info):
pass
def change_argument_mapping(self, definition_info, argument_mapping):
pass
class ArgumentNormalizer(_ArgumentChanger):
pass
class ArgumentRemover(_ArgumentChanger):
def __init__(self, index):
self.index = index
def change_definition_info(self, call_info):
if self.index < len(call_info.args_with_defaults):
del call_info.args_with_defaults[self.index]
elif self.index == len(call_info.args_with_defaults) and \
call_info.args_arg is not None:
call_info.args_arg = None
elif (self.index == len(call_info.args_with_defaults) and
call_info.args_arg is None and call_info.keywords_arg is not None) or \
(self.index == len(call_info.args_with_defaults) + 1 and
call_info.args_arg is not None and call_info.keywords_arg is not None):
call_info.keywords_arg = None
def change_argument_mapping(self, definition_info, mapping):
if self.index < len(definition_info.args_with_defaults):
name = definition_info.args_with_defaults[0]
if name in mapping.param_dict:
del mapping.param_dict[name]
class ArgumentAdder(_ArgumentChanger):
def __init__(self, index, name, default=None, value=None):
self.index = index
self.name = name
self.default = default
self.value = value
def change_definition_info(self, definition_info):
for pair in definition_info.args_with_defaults:
if pair[0] == self.name:
raise rope.base.exceptions.RefactoringError(
'Adding duplicate parameter: <%s>.' % self.name)
definition_info.args_with_defaults.insert(self.index,
(self.name, self.default))
def change_argument_mapping(self, definition_info, mapping):
if self.value is not None:
mapping.param_dict[self.name] = self.value
class ArgumentDefaultInliner(_ArgumentChanger):
def __init__(self, index):
self.index = index
self.remove = False
def change_definition_info(self, definition_info):
if self.remove:
definition_info.args_with_defaults[self.index] = \
(definition_info.args_with_defaults[self.index][0], None)
def change_argument_mapping(self, definition_info, mapping):
default = definition_info.args_with_defaults[self.index][1]
name = definition_info.args_with_defaults[self.index][0]
if default is not None and name not in mapping.param_dict:
mapping.param_dict[name] = default
class ArgumentReorderer(_ArgumentChanger):
def __init__(self, new_order, autodef=None):
"""Construct an `ArgumentReorderer`
Note that the `new_order` is a list containing the new
position of parameters; not the position each parameter
is going to be moved to. (changed in ``0.5m4``)
For example changing ``f(a, b, c)`` to ``f(c, a, b)``
requires passing ``[2, 0, 1]`` and *not* ``[1, 2, 0]``.
The `autodef` (automatic default) argument, forces rope to use
it as a default if a default is needed after the change. That
happens when an argument without default is moved after
another that has a default value. Note that `autodef` should
be a string or `None`; the latter disables adding automatic
default.
"""
self.new_order = new_order
self.autodef = autodef
def change_definition_info(self, definition_info):
new_args = list(definition_info.args_with_defaults)
for new_index, index in enumerate(self.new_order):
new_args[new_index] = definition_info.args_with_defaults[index]
seen_default = False
for index, (arg, default) in enumerate(list(new_args)):
if default is not None:
seen_default = True
if seen_default and default is None and self.autodef is not None:
new_args[index] = (arg, self.autodef)
definition_info.args_with_defaults = new_args
class _ChangeCallsInModule(object):
def __init__(self, pycore, occurrence_finder, resource, call_changer):
self.pycore = pycore
self.occurrence_finder = occurrence_finder
self.resource = resource
self.call_changer = call_changer
def get_changed_module(self):
word_finder = worder.Worder(self.source)
change_collector = codeanalyze.ChangeCollector(self.source)
for occurrence in self.occurrence_finder.find_occurrences(self.resource):
if not occurrence.is_called() and not occurrence.is_defined():
continue
start, end = occurrence.get_primary_range()
begin_parens, end_parens = word_finder.get_word_parens_range(end - 1)
if occurrence.is_called():
primary, pyname = occurrence.get_primary_and_pyname()
changed_call = self.call_changer.change_call(
primary, pyname, self.source[start:end_parens])
else:
changed_call = self.call_changer.change_definition(
self.source[start:end_parens])
if changed_call is not None:
change_collector.add_change(start, end_parens, changed_call)
return change_collector.get_changed()
@property
@utils.saveit
def pymodule(self):
return self.pycore.resource_to_pyobject(self.resource)
@property
@utils.saveit
def source(self):
if self.resource is not None:
return self.resource.read()
else:
return self.pymodule.source_code
@property
@utils.saveit
def lines(self):
return self.pymodule.lines
class _MultipleFinders(object):
def __init__(self, finders):
self.finders = finders
def find_occurrences(self, resource=None, pymodule=None):
all_occurrences = []
for finder in self.finders:
all_occurrences.extend(finder.find_occurrences(resource, pymodule))
all_occurrences.sort(self._cmp_occurrences)
return all_occurrences
def _cmp_occurrences(self, o1, o2):
return cmp(o1.get_primary_range(), o2.get_primary_range())
|
gibiansky/tensorflow
|
refs/heads/master
|
tensorflow/python/ops/gradient_checker.py
|
33
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradient checker for any ops, graphs.
The gradient checker verifies numerically that an op/graph properly
computes the gradients
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients
from tensorflow.python.platform import tf_logging as logging
def _product(t):
if isinstance(t, int):
return t
else:
y = 1
for x in t:
y *= x
return y
def _extra_feeds(extra_feed_dict, new_feeds):
if not extra_feed_dict:
return new_feeds
r = {}
r.update(extra_feed_dict)
r.update(new_feeds)
return r
def _compute_theoretical_jacobian(x, x_shape, x_data, dy, dy_shape, dx,
extra_feed_dict):
"""Computes the theoretical Jacobian for dy/dx.
Computes the theoretical Jacobian using the ops generated by
compute_gradient().
Args:
x: the tensor "x".
x_shape: the dimensions of x as a tuple or an array of ints.
x_data: a numpy parray as the input data for x
dy: the tensor "dy".
dy_shape: the dimensions of dy as a tuple or an array of ints.
dx: Tensor or IndexedSlices representing dx
extra_feed_dict: dict that allows fixing specified tensor values
during the jacobian calculation.
Returns:
A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows
and "dy_size" columns where "x_size" is the number of elements in x and
"dy_size" is the number of elements in dy.
Raises:
ValueError: If `dy` is empty but the gradient is nonzero.
"""
# Complex vectors are treated as vectors of twice as many reals.
if x.dtype.is_complex:
x_shape = tuple(x_shape) + (2,)
dy_factor = 2 if dy.dtype.is_complex else 1
# To compute the jacobian, we treat x and y as one-dimensional vectors.
x_size = _product(x_shape)
x_val_size = _product(x_shape[1:]) # This is used for sparse gradients
dy_size = _product(dy_shape) * dy_factor
# Allocate 2-D Jacobian, with x dimensions smashed into the first
# dimension and y dimensions smashed into the second.
jacobian = np.zeros((x_size, dy_size),
dtype=x.dtype.real_dtype.as_numpy_dtype)
# For each of the entry of dy, we set this to be 1 and
# everything else to be 0 and compute the backprop -- this will give us one
# one column of the Jacobian matrix.
dy_data = np.zeros(dy_shape, dtype=dy.dtype.as_numpy_dtype)
dy_data_flat = dy_data.ravel().view(dy.dtype.real_dtype.as_numpy_dtype)
sess = ops.get_default_session()
for col in range(dy_size):
dy_data_flat[col] = 1
if isinstance(dx, ops.IndexedSlices):
backprop_indices, backprop_values = sess.run(
[dx.indices, dx.values],
feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))
for i, v in zip(backprop_indices, backprop_values):
r_begin = i * x_val_size
r_end = r_begin + x_val_size
jacobian[r_begin:r_end, col] += v.flat
else:
assert isinstance(dx, ops.Tensor), "dx = " + str(dx)
backprop = sess.run(
dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))
jacobian[:, col] = backprop.ravel().view(jacobian.dtype)
dy_data_flat[col] = 0
# If the output is empty, run the gradients at least once and make sure
# they produce zeros.
if not dy_size:
backprop = sess.run(
dx, feed_dict=_extra_feeds(extra_feed_dict, {x: x_data, dy: dy_data}))
if backprop.shape != x_data.shape:
raise ValueError("Empty gradient has wrong shape: expected %s, got %s" %
(x_data.shape, backprop.shape))
if np.any(backprop):
raise ValueError("Empty tensor with nonzero gradients")
logging.vlog(1, "Theoretical Jacobian =\n%s", jacobian)
return jacobian
def _compute_numeric_jacobian(x, x_shape, x_data, y, y_shape, delta,
extra_feed_dict):
"""Computes the numeric Jacobian for dy/dx.
Computes the numeric Jacobian by slightly perturbing the inputs and
measuring the differences on the output.
Args:
x: the tensor "x".
x_shape: the dimensions of x as a tuple or an array of ints.
x_data: a numpy array as the input data for x
y: the tensor "y".
y_shape: the dimensions of y as a tuple or an array of ints.
delta: the amount of perturbation we give to the input
extra_feed_dict: dict that allows fixing specified tensor values
during the jacobian calculation.
Returns:
A 2-d numpy array representing the Jacobian for dy/dx. It has "x_size" rows
and "y_size" columns where "x_size" is the number of elements in x and
"y_size" is the number of elements in y.
"""
# To compute the jacobian, we treat x and y as one-dimensional vectors
x_size = _product(x_shape) * (2 if x.dtype.is_complex else 1)
y_size = _product(y_shape) * (2 if y.dtype.is_complex else 1)
x_dtype = x.dtype.real_dtype.as_numpy_dtype
y_dtype = y.dtype.real_dtype.as_numpy_dtype
# Make sure we have the right types
x_data = np.asarray(x_data, dtype=x.dtype.as_numpy_dtype)
scale = np.asarray(2 * delta, dtype=y_dtype)[()]
jacobian = np.zeros((x_size, y_size), dtype=x_dtype)
# For each of the entry of x, we slightly perturbs this by adding and
# subtracting a delta and then compute difference between the outputs. This
# will give us one row of the Jacobian matrix.
for row in range(x_size):
x_pos = x_data.copy()
x_neg = x_data.copy()
x_pos.ravel().view(x_dtype)[row] += delta
y_pos = y.eval(feed_dict=_extra_feeds(extra_feed_dict, {x: x_pos}))
x_neg.ravel().view(x_dtype)[row] -= delta
y_neg = y.eval(feed_dict=_extra_feeds(extra_feed_dict, {x: x_neg}))
diff = (y_pos - y_neg) / scale
jacobian[row, :] = diff.ravel().view(y_dtype)
logging.vlog(1, "Numeric Jacobian =\n%s", jacobian)
return jacobian
def _compute_dx_and_dy(x, y, y_shape):
"""Returns a node to compute gradient of x wrt y."""
# We make up a dy so that we can compute the gradients. We don't really use
# the value of dy -- we will always feed it. We need to add an identity node
# so that we can always feed it properly. Otherwise, for the Add operation,
# dx is the same as dy and we cannot fetch the tensor that we are feeding.
with x.graph.as_default():
dy_orig = constant_op.constant(1.0, shape=y_shape, dtype=y.dtype)
dy = array_ops.identity(dy_orig)
# We compute the gradients for x wrt. y
grads = gradients.gradients(y, x, dy)
assert len(grads) == 1
return grads[0], dy_orig
def _compute_gradient(x,
x_shape,
dx,
y,
y_shape,
dy,
x_init_value=None,
delta=1e-3,
extra_feed_dict=None):
"""Computes the theoretical and numerical jacobian."""
t = dtypes.as_dtype(x.dtype)
allowed_types = [dtypes.float16, dtypes.float32, dtypes.float64,
dtypes.complex64, dtypes.complex128]
assert t.base_dtype in allowed_types, "Don't support type %s for x" % t.name
t2 = dtypes.as_dtype(y.dtype)
assert t2.base_dtype in allowed_types, "Don't support type %s for y" % t2.name
if x_init_value is not None:
i_shape = list(x_init_value.shape)
assert(list(x_shape) == i_shape), "x_shape = %s, init_data shape = %s" % (
x_shape, i_shape)
x_data = x_init_value
else:
if t == dtypes.float16:
dtype = np.float16
elif t == dtypes.float32:
dtype = np.float32
else:
dtype = np.float64
x_data = np.asfarray(np.random.random_sample(x_shape), dtype=dtype)
jacob_t = _compute_theoretical_jacobian(
x, x_shape, x_data, dy, y_shape, dx, extra_feed_dict=extra_feed_dict)
jacob_n = _compute_numeric_jacobian(
x, x_shape, x_data, y, y_shape, delta, extra_feed_dict=extra_feed_dict)
return jacob_t, jacob_n
def _compute_gradient_list(x,
x_shape,
y,
y_shape,
x_init_value=None,
delta=1e-3,
init_targets=None,
extra_feed_dict=None):
"""Compute gradients for a list of x values."""
assert isinstance(x, list)
dx, dy = zip(*[_compute_dx_and_dy(xi, y, y_shape) for xi in x])
if init_targets is not None:
assert isinstance(init_targets, (list, tuple))
for init in init_targets:
init.run()
if x_init_value is None:
x_init_value = [None] * len(x)
ret = [_compute_gradient(xi, x_shapei, dxi, y, y_shape, dyi, x_init_valuei,
delta, extra_feed_dict=extra_feed_dict)
for xi, x_shapei, dxi, dyi, x_init_valuei in zip(x, x_shape, dx, dy,
x_init_value)]
return ret
def compute_gradient(x,
x_shape,
y,
y_shape,
x_init_value=None,
delta=1e-3,
init_targets=None,
extra_feed_dict=None):
"""Computes and returns the theoretical and numerical Jacobian.
If `x` or `y` is complex, the Jacobian will still be real but the
corresponding Jacobian dimension(s) will be twice as large. This is required
even if both input and output is complex since TensorFlow graphs are not
necessarily holomorphic, and may have gradients not expressible as complex
numbers. For example, if `x` is complex with shape `[m]` and `y` is complex
with shape `[n]`, each Jacobian `J` will have shape `[m * 2, n * 2]` with
J[:m, :n] = d(Re y)/d(Re x)
J[:m, n:] = d(Im y)/d(Re x)
J[m:, :n] = d(Re y)/d(Im x)
J[m:, n:] = d(Im y)/d(Im x)
Args:
x: a tensor or list of tensors
x_shape: the dimensions of x as a tuple or an array of ints. If x is a list,
then this is the list of shapes.
y: a tensor
y_shape: the dimensions of y as a tuple or an array of ints.
x_init_value: (optional) a numpy array of the same shape as "x"
representing the initial value of x. If x is a list, this should be a list
of numpy arrays. If this is none, the function will pick a random tensor
as the initial value.
delta: (optional) the amount of perturbation.
init_targets: list of targets to run to initialize model params.
TODO(mrry): remove this argument.
extra_feed_dict: dict that allows fixing specified tensor values
during the Jacobian calculation.
Returns:
Two 2-d numpy arrays representing the theoretical and numerical
Jacobian for dy/dx. Each has "x_size" rows and "y_size" columns
where "x_size" is the number of elements in x and "y_size" is the
number of elements in y. If x is a list, returns a list of two numpy arrays.
"""
if extra_feed_dict is None:
extra_feed_dict = {}
if isinstance(x, list):
return _compute_gradient_list(x, x_shape, y, y_shape, x_init_value, delta,
init_targets, extra_feed_dict=extra_feed_dict)
else:
if init_targets is not None:
assert isinstance(init_targets, (list, tuple))
for init in init_targets:
init.run()
dx, dy = _compute_dx_and_dy(x, y, y_shape)
ret = _compute_gradient(x, x_shape, dx, y, y_shape, dy, x_init_value, delta,
extra_feed_dict=extra_feed_dict)
return ret
def compute_gradient_error(x,
x_shape,
y,
y_shape,
x_init_value=None,
delta=1e-3,
init_targets=None,
extra_feed_dict=None):
"""Computes the gradient error.
Computes the maximum error for dy/dx between the computed Jacobian and the
numerically estimated Jacobian.
This function will modify the tensors passed in as it adds more operations
and hence changing the consumers of the operations of the input tensors.
This function adds operations to the current session. To compute the error
using a particular device, such as a GPU, use the standard methods for
setting a device (e.g. using with sess.graph.device() or setting a device
function in the session constructor).
Args:
x: a tensor or list of tensors
x_shape: the dimensions of x as a tuple or an array of ints. If x is a list,
then this is the list of shapes.
y: a tensor
y_shape: the dimensions of y as a tuple or an array of ints.
x_init_value: (optional) a numpy array of the same shape as "x"
representing the initial value of x. If x is a list, this should be a list
of numpy arrays. If this is none, the function will pick a random tensor
as the initial value.
delta: (optional) the amount of perturbation.
init_targets: list of targets to run to initialize model params.
TODO(mrry): Remove this argument.
extra_feed_dict: dict that allows fixing specified tensor values
during the Jacobian calculation.
Returns:
The maximum error in between the two Jacobians.
"""
grad = compute_gradient(x, x_shape, y, y_shape, x_init_value, delta,
init_targets, extra_feed_dict=extra_feed_dict)
if isinstance(grad, tuple):
grad = [grad]
error = 0
for j_t, j_n in grad:
if j_t.size or j_n.size: # Handle zero size tensors correctly
error = np.maximum(error, np.fabs(j_t - j_n).max())
return error
|
jromang/clop
|
refs/heads/master
|
LaTeX/2008-06-02-CLOP/old-plots/plot_parameters.py
|
1
|
from qlr import *
def build_tikz(p, scale):
#########################################################################
"build tikz object"
#########################################################################
if p.GetDimensions() == 1:
tikz = CTikZOneD()
tikz.SetScale(5.5)
elif p.GetDimensions() == 2:
tikz = CTikZTwoD()
tikz.SetScale(2.3)
tikz.SetContourResolution(40)
if scale != 0:
tikz.SetScale(scale)
return tikz
def set_graphical_parameters(tikz):
#########################################################################
"set graphical parameters"
#########################################################################
my_dash = "dash pattern = on 12pt off 2pt"
tikz.SetStyle("Confidence", 0.10, 0.10, 0.80, "")
tikz.SetStyle("Posterior", 0.50, 1.00, 0.50, "")
tikz.SetStyle("MAP", 0.60, 0.10, 0.00, "thick")
tikz.SetStyle("Expected", 0.90, 0.00, 0.10, "thick," + my_dash)
tikz.SetStyle("True", 1.00, 0.00, 0.00, "thick,densely dotted")
tikz.SetStyle("Contour", 0.80, 0.80, 1.00, "")
# tikz.SetGrayscale(True)
#
# Accuracy parameters
#
tikz.SetCircleN(2)
tikz.SetSplineSamples(5000)
tikz.SetSplineD(0.0018)
def full_plot(exp, tikz, seed, expected, confidence, plot_true, plot_samples):
#########################################################################
"full plot"
#########################################################################
rnd = Random(seed)
tikz.Prolog()
tikz.BeginClip()
if exp.p.GetDimensions() == 2:
tikz.Contour(exp.p, 12)
if plot_samples:
tikz.Results(exp.res)
if exp.p.GetDimensions() == 1:
reg = exp.reg
tikz.Posterior(20, reg, rnd)
if confidence > 0:
tikz.Confidence(reg, confidence)
tikz.Confidence(reg, -confidence)
tikz.MAP(reg)
if expected > 0:
tikz.Expected(reg, expected, seed)
if plot_true:
tikz.True(exp.p)
tikz.EndClip()
if plot_samples:
tikz.Frame(exp.res.GetSamples())
else:
tikz.Frame(0)
tikz.Key()
tikz.Epilog()
def do_tikz(exp,
scale = 0,
seed = 0,
expected = 0,
confidence = 0,
plot_true = False,
plot_samples = True):
#########################################################################
"plot exp with TikZ"
#########################################################################
tikz = build_tikz(exp.p, scale)
set_graphical_parameters(tikz)
full_plot(exp, tikz, seed, expected, confidence, plot_true, plot_samples)
def tikz_problem(p, scale = 0):
#########################################################################
"plot 1D problem with TikZ"
#########################################################################
tikz = build_tikz(p, scale)
tikz.SetStyle("True", 1.00, 0.00, 0.00, "thick")
tikz.Prolog()
tikz.BeginClip()
tikz.True(p)
tikz.EndClip()
tikz.Frame(0)
tikz.Epilog()
|
fengbaicanhe/intellij-community
|
refs/heads/master
|
python/testData/completion/superClassAttributesNoCompletionInFunc.after.py
|
166
|
class Foo:
attribute = 1
class Boo(Foo):
def f(self):
attr<caret>
|
kmoocdev2/edx-platform
|
refs/heads/real_2019
|
openedx/core/djangoapps/zendesk_proxy/v1/__init__.py
|
12133432
| |
adedayo/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/localflavor/in_/__init__.py
|
12133432
| |
procangroup/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/embargo/migrations/__init__.py
|
12133432
| |
poiati/django
|
refs/heads/master
|
django/conf/locale/sq/__init__.py
|
12133432
| |
edry/edx-platform
|
refs/heads/master
|
lms/djangoapps/instructor_task/__init__.py
|
12133432
| |
marcydoty/geraldo
|
refs/heads/master
|
site/newsite/site-geraldo/django/contrib/auth/handlers/modpython.py
|
436
|
from mod_python import apache
import os
def authenhandler(req, **kwargs):
"""
Authentication handler that checks against Django's auth database.
"""
# mod_python fakes the environ, and thus doesn't process SetEnv. This fixes
# that so that the following import works
os.environ.update(req.subprocess_env)
# apache 2.2 requires a call to req.get_basic_auth_pw() before
# req.user and friends are available.
req.get_basic_auth_pw()
# check for PythonOptions
_str_to_bool = lambda s: s.lower() in ('1', 'true', 'on', 'yes')
options = req.get_options()
permission_name = options.get('DjangoPermissionName', None)
staff_only = _str_to_bool(options.get('DjangoRequireStaffStatus', "on"))
superuser_only = _str_to_bool(options.get('DjangoRequireSuperuserStatus', "off"))
settings_module = options.get('DJANGO_SETTINGS_MODULE', None)
if settings_module:
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
from django.contrib.auth.models import User
from django import db
db.reset_queries()
# check that the username is valid
kwargs = {'username': req.user, 'is_active': True}
if staff_only:
kwargs['is_staff'] = True
if superuser_only:
kwargs['is_superuser'] = True
try:
try:
user = User.objects.get(**kwargs)
except User.DoesNotExist:
return apache.HTTP_UNAUTHORIZED
# check the password and any permission given
if user.check_password(req.get_basic_auth_pw()):
if permission_name:
if user.has_perm(permission_name):
return apache.OK
else:
return apache.HTTP_UNAUTHORIZED
else:
return apache.OK
else:
return apache.HTTP_UNAUTHORIZED
finally:
db.connection.close()
|
kingosticks/mopidy
|
refs/heads/develop
|
tests/file/conftest.py
|
4
|
import pytest
@pytest.fixture
def file_config():
return {"file": {}}
@pytest.fixture
def file_library(file_config):
# Import library, thus scanner, thus gobject as late as possible to avoid
# hard to track import errors during conftest setup.
from mopidy.file import library
return library.FileLibraryProvider(backend=None, config=file_config)
|
Stanford-Online/edx-platform
|
refs/heads/master
|
openedx/core/djangoapps/schedules/management/__init__.py
|
12133432
| |
manqala/erpnext
|
refs/heads/develop
|
erpnext/maintenance/report/maintenance_schedules/__init__.py
|
12133432
| |
AlexanderMatveenko/omim
|
refs/heads/master
|
3party/protobuf/examples/list_people.py
|
429
|
#! /usr/bin/python
# See README.txt for information and build instructions.
import addressbook_pb2
import sys
# Iterates though all people in the AddressBook and prints info about them.
def ListPeople(address_book):
for person in address_book.person:
print "Person ID:", person.id
print " Name:", person.name
if person.HasField('email'):
print " E-mail address:", person.email
for phone_number in person.phone:
if phone_number.type == addressbook_pb2.Person.MOBILE:
print " Mobile phone #:",
elif phone_number.type == addressbook_pb2.Person.HOME:
print " Home phone #:",
elif phone_number.type == addressbook_pb2.Person.WORK:
print " Work phone #:",
print phone_number.number
# Main procedure: Reads the entire address book from a file and prints all
# the information inside.
if len(sys.argv) != 2:
print "Usage:", sys.argv[0], "ADDRESS_BOOK_FILE"
sys.exit(-1)
address_book = addressbook_pb2.AddressBook()
# Read the existing address book.
f = open(sys.argv[1], "rb")
address_book.ParseFromString(f.read())
f.close()
ListPeople(address_book)
|
delph-in/pydmrs
|
refs/heads/master
|
examples/examples_dmrs.py
|
1
|
from pydmrs.components import Pred, GPred, RealPred, Sortinfo, EventSortinfo, InstanceSortinfo
from pydmrs.core import Node, Link, DictDmrs
def the():
dmrs = DictDmrs()
dmrs.add_node(Node(pred=RealPred('the', 'q'))) # node id set automatically
return dmrs
def the_cat():
dmrs = DictDmrs(surface='the cat')
dmrs.add_node(Node(nodeid=1, pred=RealPred('the', 'q'), cfrom=0, cto=3))
dmrs.add_node(Node(nodeid=2, pred=RealPred('cat', 'n', '1'), cfrom=4, cto=7,
sortinfo=InstanceSortinfo(pers='3', num='sg',
ind='+'))) # underspecified sortinfo
dmrs.add_link(Link(start=1, end=2, rargname='RSTR', post='H'))
return dmrs
def the_mouse():
dmrs = DictDmrs(surface='the mouse')
dmrs.add_node(Node(nodeid=1, pred=RealPred('the', 'q'), cfrom=0, cto=3))
dmrs.add_node(Node(nodeid=2, pred=RealPred('mouse', 'n', '1'), cfrom=4, cto=9,
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+')))
dmrs.add_link(Link(start=1, end=2, rargname='RSTR', post='H'))
return dmrs
def dog_cat():
dmrs = DictDmrs(surface='dog cat')
dmrs.add_node(Node(pred=RealPred('dog', 'n', '1'), cfrom=0, cto=3,
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+')))
dmrs.add_node(Node(pred=RealPred('cat', 'n', '1'), cfrom=4, cto=7,
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+')))
return dmrs
def the_dog_chases_the_cat():
return DictDmrs(
surface='the dog chases the cat',
nodes=[Node(nodeid=1, pred=RealPred('the', 'q'), cfrom=0, cto=3),
Node(nodeid=2, pred=RealPred('dog', 'n', '1'), cfrom=4, cto=7,
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+')),
Node(nodeid=3, pred=RealPred('chase', 'v', '1'), cfrom=8, cto=14,
sortinfo=EventSortinfo(sf='prop', tense='pres', mood='indicative')),
Node(nodeid=4, pred=RealPred('the', 'q'), cfrom=15, cto=18),
Node(nodeid=5, pred=RealPred('cat', 'n', '1'), cfrom=19, cto=22,
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+'))],
links=[Link(start=1, end=2, rargname='RSTR', post='H'),
Link(start=3, end=2, rargname='ARG1', post='NEQ'),
Link(start=3, end=5, rargname='ARG2', post='NEQ'),
Link(start=4, end=5, rargname='RSTR', post='H')],
index=3,
top=3)
def the_cat_chases_the_dog():
return DictDmrs(
surface='the cat chases the dog',
nodes=[Node(nodeid=1, pred=RealPred('the', 'q'), cfrom=0, cto=3),
Node(nodeid=2, pred=RealPred('cat', 'n', '1'), cfrom=4, cto=7,
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+')),
Node(nodeid=3, pred=RealPred('chase', 'v', '1'), cfrom=8, cto=14,
sortinfo=EventSortinfo(sf='prop', tense='pres', mood='indicative')),
Node(nodeid=4, pred=RealPred('the', 'q'), cfrom=15, cto=18),
Node(nodeid=5, pred=RealPred('dog', 'n', '1'), cfrom=19, cto=22,
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+'))],
links=[Link(start=1, end=2, rargname='RSTR', post='H'),
Link(start=3, end=2, rargname='ARG1', post='NEQ'),
Link(start=3, end=5, rargname='ARG2', post='NEQ'),
Link(start=4, end=5, rargname='RSTR', post='H')],
index=3,
top=3)
def the_dog_chases_the_mouse():
return DictDmrs(
nodes=[Node(nodeid=1, pred=RealPred('the', 'q')),
Node(nodeid=2, pred=RealPred('dog', 'n', '1'),
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+')),
Node(nodeid=3, pred=RealPred('chase', 'v', '1'),
sortinfo=EventSortinfo(sf='prop', tense='pres', mood='indicative')),
Node(nodeid=4, pred=RealPred('the', 'q')),
Node(nodeid=5, pred=RealPred('mouse', 'n', '1'),
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+'))],
links=[Link(start=1, end=2, rargname='RSTR', post='H'),
Link(start=3, end=2, rargname='ARG1', post='NEQ'),
Link(start=3, end=5, rargname='ARG2', post='NEQ'),
Link(start=4, end=5, rargname='RSTR', post='H')],
index=3,
top=3)
def the_dog_chases_the_cat_and_the_mouse():
return DictDmrs(
nodes=[Node(nodeid=1, pred=RealPred('the', 'q')),
Node(nodeid=2, pred=RealPred('dog', 'n', '1'),
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+')),
Node(nodeid=3, pred=RealPred('chase', 'v', '1'),
sortinfo=EventSortinfo(sf='prop', tense='pres', mood='indicative')),
Node(nodeid=4, pred=RealPred('the', 'q')),
Node(nodeid=5, pred=RealPred('cat', 'n', '1'),
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+')),
Node(nodeid=6, pred=GPred('udef_q')),
Node(nodeid=7, pred=RealPred('and', 'c'),
sortinfo=InstanceSortinfo(pers='3', num='pl')),
Node(nodeid=8, pred=RealPred('the', 'q')),
Node(nodeid=9, pred=RealPred('mouse', 'n', '1'),
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+'))],
links=[Link(start=1, end=2, rargname='RSTR', post='H'),
Link(start=3, end=2, rargname='ARG1', post='NEQ'),
Link(start=3, end=7, rargname='ARG2', post='NEQ'),
Link(start=4, end=5, rargname='RSTR', post='H'),
Link(start=6, end=7, rargname='RSTR', post='H'),
Link(start=7, end=5, rargname='L-INDEX', post='NEQ'),
Link(start=7, end=9, rargname='R-INDEX', post='NEQ'),
Link(start=8, end=9, rargname='RSTR', post='H')],
index=3,
top=3)
def the_dog_chases_the_cat_and_the_cat_chases_the_mouse():
return DictDmrs(
nodes=[Node(nodeid=1, pred=RealPred('the', 'q')),
Node(nodeid=2, pred=RealPred('dog', 'n', '1'),
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+')),
Node(nodeid=3, pred=RealPred('chase', 'v', '1'),
sortinfo=EventSortinfo(sf='prop', tense='pres', mood='indicative')),
Node(nodeid=4, pred=RealPred('the', 'q')),
Node(nodeid=5, pred=RealPred('cat', 'n', '1'),
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+')),
Node(nodeid=6, pred=RealPred('and', 'c'),
sortinfo=InstanceSortinfo(pers='3', num='pl')),
Node(nodeid=7, pred=RealPred('the', 'q')),
Node(nodeid=8, pred=RealPred('cat', 'n', '1'),
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+')),
Node(nodeid=9, pred=RealPred('chase', 'v', '1'),
sortinfo=EventSortinfo(sf='prop', tense='pres', mood='indicative')),
Node(nodeid=10, pred=RealPred('the', 'q')),
Node(nodeid=11, pred=RealPred('mouse', 'n', '1'),
sortinfo=InstanceSortinfo(pers='3', num='sg', ind='+'))],
links=[Link(start=1, end=2, rargname='RSTR', post='H'),
Link(start=3, end=2, rargname='ARG1', post='NEQ'),
Link(start=3, end=5, rargname='ARG2', post='NEQ'),
Link(start=4, end=5, rargname='RSTR', post='H'),
Link(start=6, end=3, rargname='L-INDEX', post='NEQ'),
Link(start=6, end=3, rargname='L-HNDL', post='H'),
Link(start=6, end=9, rargname='R-INDEX', post='NEQ'),
Link(start=6, end=9, rargname='R-HNDL', post='H'),
Link(start=7, end=8, rargname='RSTR', post='H'),
Link(start=9, end=8, rargname='ARG1', post='NEQ'),
Link(start=9, end=11, rargname='ARG2', post='NEQ'),
Link(start=10, end=11, rargname='RSTR', post='H')],
index=6,
top=6)
def predsort():
dmrs = DictDmrs()
dmrs.add_node(Node(pred=Pred(), sortinfo=Sortinfo())) # underspecified predicate and sortinfo
return dmrs
def noun():
dmrs = DictDmrs()
dmrs.add_node(
Node(pred=RealPred('?', 'n', 'unknown'), sortinfo=Sortinfo())) # underspecified noun and sortinfo
return dmrs
|
Thor77/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/lynda.py
|
7
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
urlencode_postdata,
)
class LyndaBaseIE(InfoExtractor):
_SIGNIN_URL = 'https://www.lynda.com/signin'
_PASSWORD_URL = 'https://www.lynda.com/signin/password'
_USER_URL = 'https://www.lynda.com/signin/user'
_ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to provide lynda.com account credentials.'
_NETRC_MACHINE = 'lynda'
def _real_initialize(self):
self._login()
@staticmethod
def _check_error(json_string, key_or_keys):
keys = [key_or_keys] if isinstance(key_or_keys, compat_str) else key_or_keys
for key in keys:
error = json_string.get(key)
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
def _login_step(self, form_html, fallback_action_url, extra_form_data, note, referrer_url):
action_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', form_html,
'post url', default=fallback_action_url, group='url')
if not action_url.startswith('http'):
action_url = compat_urlparse.urljoin(self._SIGNIN_URL, action_url)
form_data = self._hidden_inputs(form_html)
form_data.update(extra_form_data)
try:
response = self._download_json(
action_url, None, note,
data=urlencode_postdata(form_data),
headers={
'Referer': referrer_url,
'X-Requested-With': 'XMLHttpRequest',
})
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 500:
response = self._parse_json(e.cause.read().decode('utf-8'), None)
self._check_error(response, ('email', 'password'))
raise
self._check_error(response, 'ErrorMessage')
return response, action_url
def _login(self):
username, password = self._get_login_info()
if username is None:
return
# Step 1: download signin page
signin_page = self._download_webpage(
self._SIGNIN_URL, None, 'Downloading signin page')
# Already logged in
if any(re.search(p, signin_page) for p in (
'isLoggedIn\s*:\s*true', r'logout\.aspx', r'>Log out<')):
return
# Step 2: submit email
signin_form = self._search_regex(
r'(?s)(<form[^>]+data-form-name=["\']signin["\'][^>]*>.+?</form>)',
signin_page, 'signin form')
signin_page, signin_url = self._login_step(
signin_form, self._PASSWORD_URL, {'email': username},
'Submitting email', self._SIGNIN_URL)
# Step 3: submit password
password_form = signin_page['body']
self._login_step(
password_form, self._USER_URL, {'email': username, 'password': password},
'Submitting password', signin_url)
class LyndaIE(LyndaBaseIE):
IE_NAME = 'lynda'
IE_DESC = 'lynda.com videos'
_VALID_URL = r'https?://www\.lynda\.com/(?:[^/]+/[^/]+/\d+|player/embed)/(?P<id>\d+)'
_TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]'
_TESTS = [{
'url': 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html',
# md5 is unstable
'info_dict': {
'id': '114408',
'ext': 'mp4',
'title': 'Using the exercise files',
'duration': 68
}
}, {
'url': 'https://www.lynda.com/player/embed/133770?tr=foo=1;bar=g;fizz=rt&fs=0',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
video = self._download_json(
'http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id,
video_id, 'Downloading video JSON')
if 'Status' in video:
raise ExtractorError(
'lynda returned error: %s' % video['Message'], expected=True)
if video.get('HasAccess') is False:
self.raise_login_required('Video %s is only available for members' % video_id)
video_id = compat_str(video.get('ID') or video_id)
duration = int_or_none(video.get('DurationInSeconds'))
title = video['Title']
formats = []
fmts = video.get('Formats')
if fmts:
formats.extend([{
'url': f['Url'],
'ext': f.get('Extension'),
'width': int_or_none(f.get('Width')),
'height': int_or_none(f.get('Height')),
'filesize': int_or_none(f.get('FileSize')),
'format_id': compat_str(f.get('Resolution')) if f.get('Resolution') else None,
} for f in fmts if f.get('Url')])
prioritized_streams = video.get('PrioritizedStreams')
if prioritized_streams:
for prioritized_stream_id, prioritized_stream in prioritized_streams.items():
formats.extend([{
'url': video_url,
'width': int_or_none(format_id),
'format_id': '%s-%s' % (prioritized_stream_id, format_id),
} for format_id, video_url in prioritized_stream.items()])
self._check_formats(formats, video_id)
self._sort_formats(formats)
subtitles = self.extract_subtitles(video_id)
return {
'id': video_id,
'title': title,
'duration': duration,
'subtitles': subtitles,
'formats': formats
}
def _fix_subtitles(self, subs):
srt = ''
seq_counter = 0
for pos in range(0, len(subs) - 1):
seq_current = subs[pos]
m_current = re.match(self._TIMECODE_REGEX, seq_current['Timecode'])
if m_current is None:
continue
seq_next = subs[pos + 1]
m_next = re.match(self._TIMECODE_REGEX, seq_next['Timecode'])
if m_next is None:
continue
appear_time = m_current.group('timecode')
disappear_time = m_next.group('timecode')
text = seq_current['Caption'].strip()
if text:
seq_counter += 1
srt += '%s\r\n%s --> %s\r\n%s\r\n\r\n' % (seq_counter, appear_time, disappear_time, text)
if srt:
return srt
def _get_subtitles(self, video_id):
url = 'http://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id
subs = self._download_json(url, None, False)
if subs:
return {'en': [{'ext': 'srt', 'data': self._fix_subtitles(subs)}]}
else:
return {}
class LyndaCourseIE(LyndaBaseIE):
IE_NAME = 'lynda:course'
IE_DESC = 'lynda.com online courses'
# Course link equals to welcome/introduction video link of same course
# We will recognize it as course link
_VALID_URL = r'https?://(?:www|m)\.lynda\.com/(?P<coursepath>[^/]+/[^/]+/(?P<courseid>\d+))-\d\.html'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
course_path = mobj.group('coursepath')
course_id = mobj.group('courseid')
course = self._download_json(
'http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
course_id, 'Downloading course JSON')
if course.get('Status') == 'NotFound':
raise ExtractorError(
'Course %s does not exist' % course_id, expected=True)
unaccessible_videos = 0
entries = []
# Might want to extract videos right here from video['Formats'] as it seems 'Formats' is not provided
# by single video API anymore
for chapter in course['Chapters']:
for video in chapter.get('Videos', []):
if video.get('HasAccess') is False:
unaccessible_videos += 1
continue
video_id = video.get('ID')
if video_id:
entries.append({
'_type': 'url_transparent',
'url': 'http://www.lynda.com/%s/%s-4.html' % (course_path, video_id),
'ie_key': LyndaIE.ie_key(),
'chapter': chapter.get('Title'),
'chapter_number': int_or_none(chapter.get('ChapterIndex')),
'chapter_id': compat_str(chapter.get('ID')),
})
if unaccessible_videos > 0:
self._downloader.report_warning(
'%s videos are only available for members (or paid members) and will not be downloaded. '
% unaccessible_videos + self._ACCOUNT_CREDENTIALS_HINT)
course_title = course.get('Title')
course_description = course.get('Description')
return self.playlist_result(entries, course_id, course_title, course_description)
|
immanetize/supernova
|
refs/heads/master
|
tests/test_credentials.py
|
5
|
import keyring.backend
import pytest
import six
from supernova import credentials, utils
class TestKeyring(keyring.backend.KeyringBackend):
"""A test keyring which always outputs same password
"""
priority = 1
def set_password(self, servicename, username, password):
pass
def get_password(self, servicename, username):
return "password from TestKeyring"
def delete_password(self, servicename, username, password):
pass
class TestCredentials(object):
def test_get_user_password(self):
keyring.set_keyring(TestKeyring())
result = credentials.get_user_password('prod', 'prodpass', force=True)
assert result[0] == 'prod:prodpass'
if six.PY3:
assert result[1] == b'password from TestKeyring'
else:
assert result[1] == 'password from TestKeyring'
def test_get_user_password_failure(self, monkeypatch):
def mockreturn(path):
return False
monkeypatch.setattr(credentials, "password_get", mockreturn)
keyring.set_keyring(TestKeyring())
result = credentials.get_user_password('prod', 'prodpass', force=True)
assert not result
def test_reject_confirmation(self, monkeypatch):
def mockreturn(path):
return False
monkeypatch.setattr(utils, "confirm_credential_display", mockreturn)
keyring.set_keyring(TestKeyring())
result = credentials.get_user_password('prod', 'prodpass')
assert result is None
def test_password_get(self):
keyring.set_keyring(TestKeyring())
result = credentials.password_get('user')
if six.PY3:
assert result == b'password from TestKeyring'
else:
assert result == 'password from TestKeyring'
def test_password_get_failure(self, monkeypatch):
def mockreturn(self, username, password):
return None
monkeypatch.setattr(TestKeyring, "get_password", mockreturn)
with pytest.raises(LookupError) as excinfo:
credentials.password_get('global:something')
assert "Couldn't find a credential" in str(excinfo.value)
def test_password_set(self):
keyring.set_keyring(TestKeyring())
result = credentials.password_set('user', 'password')
assert result
def test_password_set_failure(self, monkeypatch):
def mockreturn(system, username, password):
return False
monkeypatch.setattr(keyring, "set_password", mockreturn)
keyring.set_keyring(TestKeyring())
result = credentials.password_set('user', 'password')
assert not result
def test_invalid_environment(self):
nova_env = "non-existent"
nova_creds = {'dfw': None}
with pytest.raises(KeyError) as excinfo:
credentials.prep_nova_creds(nova_env, nova_creds)
assert "was not found" in str(excinfo.value)
def test_uppercase_credentials(self):
nova_env = 'prod'
nova_creds = {
'prod': {
'key': 'value'
}
}
result = credentials.prep_nova_creds(nova_env, nova_creds)
assert result[0][0] == 'KEY'
def test_lowercase_credentials(self):
nova_env = 'prod'
nova_creds = {
'prod': {
'http_proxy': 'value'
}
}
result = credentials.prep_nova_creds(nova_env, nova_creds)
assert result[0][0] == 'http_proxy'
def test_retrieve_values_from_keyring(self):
nova_env = 'prod'
nova_creds = {
'prod': {
'OS_PASSWORD': 'USE_KEYRING'
}
}
result = credentials.prep_nova_creds(nova_env, nova_creds)
if six.PY3:
assert result[0][1] == b"password from TestKeyring"
else:
assert result[0][1] == "password from TestKeyring"
def test_retrieve_values_from_keyring_failure(self, monkeypatch):
def mockreturn(nova_env, param, value):
return ('useername', False)
monkeypatch.setattr(credentials, "pull_env_credential", mockreturn)
nova_env = 'prod'
nova_creds = {
'prod': {
'OS_PASSWORD': 'USE_KEYRING'
}
}
with pytest.raises(LookupError) as excinfo:
credentials.prep_nova_creds(nova_env, nova_creds)
assert "No matching credentials" in str(excinfo.value)
def test_pull_env_credential_global(self):
keyring.set_keyring(TestKeyring())
result = credentials.pull_env_credential('prod',
'OS_PASSWORD',
'USE_KEYRING["prodpass"]'
)
assert isinstance(result, tuple)
assert result[0] == 'global:prodpass'
if six.PY3:
assert result[1] == b'password from TestKeyring'
else:
assert result[1] == 'password from TestKeyring'
def test_pull_env_credential_old_style(self):
keyring.set_keyring(TestKeyring())
result = credentials.pull_env_credential('prod',
'OS_PASSWORD',
'USE_KEYRING'
)
assert isinstance(result, tuple)
assert result[0] == 'prod:OS_PASSWORD'
if six.PY3:
assert result[1] == b'password from TestKeyring'
else:
assert result[1] == 'password from TestKeyring'
def test_set_user_password(self):
keyring.set_keyring(TestKeyring())
environment = "prod"
parameter = "prodpass"
password = "supersecurepassword"
result = credentials.set_user_password(environment, parameter,
password)
assert result
|
ktaneishi/deepchem
|
refs/heads/master
|
contrib/atomicconv/feat/atomicnet_coordinates.py
|
7
|
"""
Atomic coordinate featurizer.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Joseph Gomes"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import numpy as np
from rdkit import Chem
from deepchem.feat import Featurizer
from deepchem.feat import ComplexFeaturizer
from deepchem.utils import pad_array
def get_cells(coords, neighbor_cutoff):
"""Computes cells given molecular coordinates.
Parameters
----------
coords: np.array
Cartesian coordaintes [Angstrom]
neighbor_cutoff: float
Threshold distance [Angstroms] for counting neighbors.
Returns
-------
x_bins: list
List contains tuples of x_cell boundaries
y_bins: list
List contains tuples of y_cell boundaries
z_bins: list
List contains tuples of z_cell boundaries
"""
x_max, x_min = np.amax(coords[:, 0]), np.amin(coords[:, 0])
y_max, y_min = np.amax(coords[:, 1]), np.amin(coords[:, 1])
z_max, z_min = np.amax(coords[:, 2]), np.amin(coords[:, 2])
# Compute cells for this molecule. O(constant)
x_bins, y_bins, z_bins = [], [], []
x_current, y_current, z_current = x_min, y_min, z_min
# min == max if molecule is planar in some direction
# we should still create a bin
if not x_min == x_max:
while x_current < x_max:
x_bins.append((x_current, x_current + neighbor_cutoff))
x_current += neighbor_cutoff
else:
x_bins.append((x_current, x_current + neighbor_cutoff))
if not y_min == y_max:
while y_current < y_max:
y_bins.append((y_current, y_current + neighbor_cutoff))
y_current += neighbor_cutoff
else:
y_bins.append((y_current, y_current + neighbor_cutoff))
if not z_min == z_max:
while z_current < z_max:
z_bins.append((z_current, z_current + neighbor_cutoff))
z_current += neighbor_cutoff
else:
z_bins.append((z_current, z_current + neighbor_cutoff))
return x_bins, y_bins, z_bins
def put_atoms_in_cells(coords, x_bins, y_bins, z_bins):
"""Place each atom into cells. O(N) runtime.
Parameters
----------
coords: np.ndarray
(N, 3) array where N is number of atoms
x_bins: list
List of (cell_start, cell_end) for x-coordinate
y_bins: list
List of (cell_start, cell_end) for y-coordinate
z_bins: list
List of (cell_start, cell_end) for z-coordinate
Returns
-------
cell_to_atoms: dict
Dict elements contain atom indices for cell
atom_to_cell: dict
Dict elements contain cell indices for atom
"""
N = coords.shape[0]
cell_to_atoms = {}
atom_to_cell = {}
for x_ind in range(len(x_bins)):
for y_ind in range(len(y_bins)):
for z_ind in range(len(z_bins)):
cell_to_atoms[(x_ind, y_ind, z_ind)] = []
for atom in range(N):
x_coord, y_coord, z_coord = coords[atom]
x_ind, y_ind, z_ind = None, None, None
for ind, (x_cell_min, x_cell_max) in enumerate(x_bins):
if x_coord >= x_cell_min and x_coord <= x_cell_max:
x_ind = ind
break
if x_ind is None:
raise ValueError("No x-cell found!")
for ind, (y_cell_min, y_cell_max) in enumerate(y_bins):
if y_coord >= y_cell_min and y_coord <= y_cell_max:
y_ind = ind
break
if y_ind is None:
raise ValueError("No y-cell found!")
for ind, (z_cell_min, z_cell_max) in enumerate(z_bins):
if z_coord >= z_cell_min and z_coord <= z_cell_max:
z_ind = ind
break
if z_ind is None:
raise ValueError("No z-cell found!")
cell_to_atoms[(x_ind, y_ind, z_ind)].append(atom)
atom_to_cell[atom] = (x_ind, y_ind, z_ind)
return cell_to_atoms, atom_to_cell
def compute_neighbor_cell_map(N_x, N_y, N_z):
"""Compute neighbors of cells in grid.
Parameters
----------
N_x: int
Number of grid cells in x-dimension.
N_y: int
Number of grid cells in y-dimension.
N_z: int
Number of grid cells in z-dimension.
Returns
-------
neighbor_cell_map: dict
Dict elements contain neighbor cell indices
"""
#TODO(JSG): Implement non-PBC version. For now this seems fine ..
neighbor_cell_map = {}
for x_ind in range(N_x):
for y_ind in range(N_y):
for z_ind in range(N_z):
neighbors = []
offsets = [-1, 0, +1]
# Note neighbors contains self!
for x_offset in offsets:
for y_offset in offsets:
for z_offset in offsets:
neighbors.append(((x_ind + x_offset) % N_x, (y_ind + y_offset) %
N_y, (z_ind + z_offset) % N_z))
neighbor_cell_map[(x_ind, y_ind, z_ind)] = neighbors
return neighbor_cell_map
def get_coords(mol):
"""Gets coordinates in Angstrom for RDKit mol.
Parameters
----------
mol: rdkit.Chem.rdchem.mol
Molecule
Returns
-------
coords: np.array
Cartestian coordinates [Angstrom]
"""
N = mol.GetNumAtoms()
coords = np.zeros((N, 3))
coords_raw = [mol.GetConformer(0).GetAtomPosition(i) for i in range(N)]
for atom in range(N):
coords[atom, 0] = coords_raw[atom].x
coords[atom, 1] = coords_raw[atom].y
coords[atom, 2] = coords_raw[atom].z
return coords
class NeighborListAtomicCoordinates(Featurizer):
"""
Adjacency List of neighbors in 3-space
Neighbors determined by user-defined distance cutoff [in Angstrom].
https://en.wikipedia.org/wiki/Cell_list
Ref: http://www.cs.cornell.edu/ron/references/1989/Calculations%20of%20a%20List%20of%20Neighbors%20in%20Molecular%20Dynamics%20Si.pdf
Example:
>>> n_atoms = 6
>>> n_neighbors = 6
>>> cutoff = 12.0
>>> boxsize = None
>>> input_file = "test.sdf"
>>> tasks = ["energy"]
>>> featurizers = NeighborListAtomicCoordinates(n_atoms, n_neighbors, cutoff, boxsize)
>>> featurizer = dc.data.SDFLoader(tasks, smiles_field="smiles", mol_field="mol",
featurizer=featurizers)
>>> dataset = featurizer.featurize(input_file)
"""
def __init__(self,
max_num_atoms,
max_num_neighbors,
neighbor_cutoff,
boxsize=None):
"""Initialize NeighborListAtomicCoordinates featurizer.
Parameters
----------
max_num_atoms: int
Maximum number of atoms.
max_num_neighbors: int
Maximum number of neighbors per atom.
neighbor_cutoff: float
Threshold distance [Angstroms] for counting neighbors.
boxsize: float, optional (default None)
Size of periodic box. If None, no periodic boundary conditions.
"""
if boxsize is not None and boxsize < 2 * neighbor_cutoff:
raise ValueError("boxsize must be greater than 2*neighbor_cutoff")
self.max_num_atoms = max_num_atoms
self.max_num_neighbors = max_num_neighbors
self.neighbor_cutoff = neighbor_cutoff
self.boxsize = boxsize
self.dtype = object
def _featurize(self, mol):
"""Compute neighbor list.
Parameters
----------
mol: rdkit.Chem.rdchem.mol
Molecule
"""
N = mol.GetNumAtoms()
coords = get_coords(mol)
x_bins, y_bins, z_bins = get_cells(coords, self.neighbor_cutoff)
# Associate each atom with cell it belongs to. O(N)
cell_to_atoms, atom_to_cell = put_atoms_in_cells(coords, x_bins, y_bins,
z_bins)
# Associate each cell with its neighbor cells. Assumes periodic boundary
# conditions, so does wrapround. O(constant)
N_x, N_y, N_z = len(x_bins), len(y_bins), len(z_bins)
neighbor_cell_map = compute_neighbor_cell_map(N_x, N_y, N_z)
# For each atom, loop through all atoms in its cell and neighboring cells.
# Accept as neighbors only those within threshold. This computation should be
# O(Nm), where m is the number of atoms within a set of neighboring-cells.
neighbor_list = {}
if self.boxsize is not None:
for atom in range(N):
cell = atom_to_cell[atom]
neighbor_cells = neighbor_cell_map[cell]
neighbor_list[atom] = set()
for neighbor_cell in neighbor_cells:
atoms_in_cell = cell_to_atoms[neighbor_cell]
for neighbor_atom in atoms_in_cell:
if neighbor_atom == atom:
continue
dist = np.linalg.norm(coords[atom] - coords[neighbor_atom])
dist = dist - self.boxsize * np.round(dist / self.boxsize)
if dist < self.neighbor_cutoff:
neighbor_list[atom].add((neighbor_atom, dist))
# Sort neighbors by distance
closest_neighbors = sorted(
list(neighbor_list[atom]), key=lambda elt: elt[1])
closest_neighbors = [nbr for (nbr, dist) in closest_neighbors]
# Pick up to max_num_neighbors
closest_neighbors = closest_neighbors[:self.max_num_neighbors]
neighbor_list[atom] = closest_neighbors
else:
for atom in range(N):
cell = atom_to_cell[atom]
neighbor_cells = neighbor_cell_map[cell]
neighbor_list[atom] = set()
for neighbor_cell in neighbor_cells:
atoms_in_cell = cell_to_atoms[neighbor_cell]
for neighbor_atom in atoms_in_cell:
if neighbor_atom == atom:
continue
dist = np.linalg.norm(coords[atom] - coords[neighbor_atom])
if dist < self.neighbor_cutoff:
neighbor_list[atom].add((neighbor_atom, dist))
closest_neighbors = sorted(
list(neighbor_list[atom]), key=lambda elt: elt[1])
closest_neighbors = [nbr for (nbr, dist) in closest_neighbors]
closest_neighbors = closest_neighbors[:self.max_num_neighbors]
neighbor_list[atom] = closest_neighbors
Z = pad_array(
np.array([atom.GetAtomicNum()
for atom in mol.GetAtoms()]), self.max_num_atoms)
coords = pad_array(coords, (self.max_num_atoms, 3))
return (coords, neighbor_list, Z)
class ComplexNeighborListFragmentAtomicCoordinates(ComplexFeaturizer):
"""
Adjacency list of neighbors for protein-ligand complexes in 3-space.
Neighbors dtermined by user-defined distance cutoff.
Currently only compatible with pdb files.
Example:
>>> frag1_n_atoms = 3
>>> frag2_n_atoms = 3
>>> complex_n_atoms = 6
>>> n_neighbors = 6
>>> cutoff = 12.0
>>> boxsize = None
>>> featurizer = ComplexNeighborListFragmentAtomicCoordinates(frag1_n_atoms,
frag2_n_atoms, complex_n_atoms, n_neighbors, cutoff, boxsize)
>>> frag1 = "frag1.pdb"
>>> frag2 = "frag2.pdb"
>>> feature = featurizer._featurize_complex(str(frag1), str(frag2))
"""
def __init__(self,
frag1_num_atoms,
frag2_num_atoms,
complex_num_atoms,
max_num_neighbors,
neighbor_cutoff=12.0,
boxsize=None):
"""Initialize ComplexNeighborListFragmentAtomicCoordinates featurizer
Parameters
----------
frag1_num_atoms: int
Maximum number of atoms in frag1
frag2_num_atoms: int
Maximum number of atoms in frag2
complex_num_atoms: int
Maximum number of atoms in complex
max_num_neighbors: int
Maximum number of neighbors per atom
neighbor_cutoff: float
Threshold distance [Angstroms] for counting neighbors.
boxsize: float, optional (default None)
Size of periodic box. If None, no periodic boundary conditions.
"""
self.frag1_num_atoms = frag1_num_atoms
self.frag2_num_atoms = frag2_num_atoms
self.complex_num_atoms = complex_num_atoms
self.max_num_neighbors = max_num_neighbors
self.neighbor_cutoff = neighbor_cutoff
self.boxsize = boxsize
# Type of data created by this featurizer
self.dtype = object
self.frag1_featurizer = NeighborListAtomicCoordinates(
self.frag1_num_atoms, self.max_num_neighbors, self.neighbor_cutoff,
self.boxsize)
self.frag2_featurizer = NeighborListAtomicCoordinates(
self.frag2_num_atoms, self.max_num_neighbors, self.neighbor_cutoff,
self.boxsize)
self.complex_featurizer = NeighborListAtomicCoordinates(
self.complex_num_atoms, self.max_num_neighbors, self.neighbor_cutoff,
self.boxsize)
def _featurize_complex(self, frag1_pdb_file, frag2_pdb_file):
"""Featurize fragments and complex.
Parameters
----------
frag1_pdb_file: string
Location of frag1_pdb_file.
frag2_pdb_file: string
Location of frag2_pdb_file.
Returns
-------
retval: tuple
Tuple containing coordinates, neighbor list, and atomic number for
fragment 1, fragment 2, and complex
"""
try:
frag1_mol = Chem.MolFromPDBFile(
frag1_pdb_file, sanitize=False, removeHs=False)
frag2_mol = Chem.MolFromPDBFile(
frag2_pdb_file, sanitize=False, removeHs=False)
except:
frag1_mol = None
frag2_mol = None
if frag1_mol and frag2_mol:
frag1_coords, frag1_neighbor_list, frag1_z = self.frag1_featurizer._featurize(
frag1_mol)
frag2_coords, frag2_neighbor_list, frag2_z = self.frag2_featurizer._featurize(
frag2_mol)
complex_mol = Chem.rdmolops.CombineMols(frag1_mol, frag2_mol)
complex_coords, complex_neighbor_list, complex_z = self.complex_featurizer._featurize(
complex_mol)
return (frag1_coords, frag1_neighbor_list, frag1_z, frag2_coords,
frag2_neighbor_list, frag2_z, complex_coords,
complex_neighbor_list, complex_z)
else:
print("failed to featurize")
return (None, None, None, None, None, None, None, None, None)
|
silenci/neutron
|
refs/heads/master
|
neutron/db/rbac_db_models.py
|
20
|
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import sqlalchemy as sa
from sqlalchemy.orm import validates
from neutron.common import exceptions as n_exc
from neutron.db import model_base
class InvalidActionForType(n_exc.InvalidInput):
message = _("Invalid action '%(action)s' for object type "
"'%(object_type)s'. Valid actions: %(valid_actions)s")
class RBACColumns(model_base.HasId, model_base.HasTenant):
"""Mixin that object-specific RBAC tables should inherit.
All RBAC tables should inherit directly from this one because
the RBAC code uses the __subclasses__() method to discover the
RBAC types.
"""
# the target_tenant is the subject that the policy will affect. this may
# also be a wildcard '*' to indicate all tenants or it may be a role if
# neutron gets better integration with keystone
target_tenant = sa.Column(sa.String(255), nullable=False)
action = sa.Column(sa.String(255), nullable=False)
@abc.abstractproperty
def object_type(self):
# this determines the name that users will use in the API
# to reference the type. sub-classes should set their own
pass
__table_args__ = (
sa.UniqueConstraint('target_tenant', 'object_id', 'action'),
model_base.BASEV2.__table_args__
)
@validates('action')
def _validate_action(self, key, action):
if action not in self.get_valid_actions():
raise InvalidActionForType(
action=action, object_type=self.object_type,
valid_actions=self.get_valid_actions())
return action
@abc.abstractmethod
def get_valid_actions(self):
# object table needs to override this to return an interable
# with the valid actions rbac entries
pass
def get_type_model_map():
return {table.object_type: table for table in RBACColumns.__subclasses__()}
class NetworkRBAC(RBACColumns, model_base.BASEV2):
"""RBAC table for networks."""
object_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
nullable=False)
object_type = 'network'
def get_valid_actions(self):
return ('access_as_shared',)
|
ziir/lumbergh
|
refs/heads/master
|
vendor-local/lib/python/south/tests/db.py
|
12
|
import datetime
from south.db import db, generic
from django.db import connection, models, IntegrityError
from south.tests import unittest, skipUnless
# Create a list of error classes from the various database libraries
errors = []
try:
from psycopg2 import ProgrammingError
errors.append(ProgrammingError)
except ImportError:
pass
errors = tuple(errors)
try:
from south.db import mysql
except ImportError:
mysql = None
class TestOperations(unittest.TestCase):
"""
Tests if the various DB abstraction calls work.
Can only test a limited amount due to DB differences.
"""
def setUp(self):
db.debug = False
db.clear_deferred_sql()
db.start_transaction()
def tearDown(self):
db.rollback_transaction()
def test_create(self):
"""
Test creation of tables.
"""
cursor = connection.cursor()
# It needs to take at least 2 args
self.assertRaises(TypeError, db.create_table)
self.assertRaises(TypeError, db.create_table, "test1")
# Empty tables (i.e. no columns) are not fine, so make at least 1
db.create_table("test1", [('email_confirmed', models.BooleanField(default=False))])
# And should exist
cursor.execute("SELECT * FROM test1")
# Make sure we can't do the same query on an empty table
try:
cursor.execute("SELECT * FROM nottheretest1")
except:
pass
else:
self.fail("Non-existent table could be selected!")
def test_delete(self):
"""
Test deletion of tables.
"""
cursor = connection.cursor()
db.create_table("test_deltable", [('email_confirmed', models.BooleanField(default=False))])
db.delete_table("test_deltable")
# Make sure it went
try:
cursor.execute("SELECT * FROM test_deltable")
except:
pass
else:
self.fail("Just-deleted table could be selected!")
def test_nonexistent_delete(self):
"""
Test deletion of nonexistent tables.
"""
try:
db.delete_table("test_nonexistdeltable")
except:
pass
else:
self.fail("Non-existent table could be deleted!")
def test_foreign_keys(self):
"""
Tests foreign key creation, especially uppercase (see #61)
"""
Test = db.mock_model(model_name='Test', db_table='test5a',
db_tablespace='', pk_field_name='ID',
pk_field_type=models.AutoField, pk_field_args=[])
db.create_table("test5a", [('ID', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True))])
db.create_table("test5b", [
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('UNIQUE', models.ForeignKey(Test)),
])
db.execute_deferred_sql()
@skipUnless(db.supports_foreign_keys, 'Foreign keys can only be deleted on '
'engines that support them.')
def test_recursive_foreign_key_delete(self):
"""
Test that recursive foreign keys are deleted correctly (see #1065)
"""
Test = db.mock_model(model_name='Test', db_table='test_rec_fk_del',
db_tablespace='', pk_field_name='id',
pk_field_type=models.AutoField, pk_field_args=[])
db.create_table('test_rec_fk_del', [
('id', models.AutoField(primary_key=True, auto_created=True)),
('fk', models.ForeignKey(Test)),
])
db.execute_deferred_sql()
db.delete_foreign_key('test_rec_fk_del', 'fk_id')
def test_rename(self):
"""
Test column renaming
"""
cursor = connection.cursor()
db.create_table("test_rn", [('spam', models.BooleanField(default=False))])
# Make sure we can select the column
cursor.execute("SELECT spam FROM test_rn")
# Rename it
db.rename_column("test_rn", "spam", "eggs")
cursor.execute("SELECT eggs FROM test_rn")
db.commit_transaction()
db.start_transaction()
try:
cursor.execute("SELECT spam FROM test_rn")
except:
pass
else:
self.fail("Just-renamed column could be selected!")
db.rollback_transaction()
db.delete_table("test_rn")
db.start_transaction()
def test_dry_rename(self):
"""
Test column renaming while --dry-run is turned on (should do nothing)
See ticket #65
"""
cursor = connection.cursor()
db.create_table("test_drn", [('spam', models.BooleanField(default=False))])
# Make sure we can select the column
cursor.execute("SELECT spam FROM test_drn")
# Rename it
db.dry_run = True
db.rename_column("test_drn", "spam", "eggs")
db.dry_run = False
cursor.execute("SELECT spam FROM test_drn")
db.commit_transaction()
db.start_transaction()
try:
cursor.execute("SELECT eggs FROM test_drn")
except:
pass
else:
self.fail("Dry-renamed new column could be selected!")
db.rollback_transaction()
db.delete_table("test_drn")
db.start_transaction()
def test_table_rename(self):
"""
Test column renaming
"""
cursor = connection.cursor()
db.create_table("testtr", [('spam', models.BooleanField(default=False))])
# Make sure we can select the column
cursor.execute("SELECT spam FROM testtr")
# Rename it
db.rename_table("testtr", "testtr2")
cursor.execute("SELECT spam FROM testtr2")
db.commit_transaction()
db.start_transaction()
try:
cursor.execute("SELECT spam FROM testtr")
except:
pass
else:
self.fail("Just-renamed column could be selected!")
db.rollback_transaction()
db.delete_table("testtr2")
db.start_transaction()
def test_percents_in_defaults(self):
"""
Test that % in a default gets escaped to %%.
"""
try:
db.create_table("testpind", [('cf', models.CharField(max_length=255, default="It should be 2%!"))])
except IndexError:
self.fail("% was not properly escaped in column SQL.")
db.delete_table("testpind")
def test_index(self):
"""
Test the index operations
"""
db.create_table("test3", [
('SELECT', models.BooleanField(default=False)),
('eggs', models.IntegerField(unique=True)),
])
db.execute_deferred_sql()
# Add an index on that column
db.create_index("test3", ["SELECT"])
# Add another index on two columns
db.create_index("test3", ["SELECT", "eggs"])
# Delete them both
db.delete_index("test3", ["SELECT"])
db.delete_index("test3", ["SELECT", "eggs"])
# Delete the unique index/constraint
if db.backend_name != "sqlite3":
db.delete_unique("test3", ["eggs"])
db.delete_table("test3")
def test_primary_key(self):
"""
Test the primary key operations
"""
db.create_table("test_pk", [
('id', models.IntegerField(primary_key=True)),
('new_pkey', models.IntegerField()),
('eggs', models.IntegerField(unique=True)),
])
db.execute_deferred_sql()
# Remove the default primary key, and make eggs it
db.delete_primary_key("test_pk")
db.create_primary_key("test_pk", "new_pkey")
# Try inserting a now-valid row pair
db.execute("INSERT INTO test_pk (id, new_pkey, eggs) VALUES (1, 2, 3)")
db.execute("INSERT INTO test_pk (id, new_pkey, eggs) VALUES (1, 3, 4)")
db.delete_table("test_pk")
def test_primary_key_implicit(self):
"""
Tests that changing primary key implicitly fails.
"""
db.create_table("test_pki", [
('id', models.IntegerField(primary_key=True)),
('new_pkey', models.IntegerField()),
('eggs', models.IntegerField(unique=True)),
])
db.execute_deferred_sql()
# Fiddle with alter_column to attempt to make it remove the primary key
db.alter_column("test_pki", "id", models.IntegerField())
db.alter_column("test_pki", "new_pkey", models.IntegerField(primary_key=True))
# Try inserting a should-be-valid row pair
db.execute("INSERT INTO test_pki (id, new_pkey, eggs) VALUES (1, 2, 3)")
db.execute("INSERT INTO test_pki (id, new_pkey, eggs) VALUES (2, 2, 4)")
db.delete_table("test_pki")
def test_add_columns(self):
"""
Test adding columns
"""
db.create_table("test_addc", [
('spam', models.BooleanField(default=False)),
('eggs', models.IntegerField()),
])
# Add a column
db.add_column("test_addc", "add1", models.IntegerField(default=3), keep_default=False)
# Add a FK with keep_default=False (#69)
User = db.mock_model(model_name='User', db_table='auth_user', db_tablespace='', pk_field_name='id', pk_field_type=models.AutoField, pk_field_args=[], pk_field_kwargs={})
# insert some data so we can test the default value of the added fkey
db.execute("INSERT INTO test_addc (spam, eggs, add1) VALUES (%s, 1, 2)", [False])
db.add_column("test_addc", "user", models.ForeignKey(User, null=True), keep_default=False)
db.execute_deferred_sql()
# try selecting from the user_id column to make sure it was actually created
val = db.execute("SELECT user_id FROM test_addc")[0][0]
self.assertEquals(val, None)
db.delete_column("test_addc", "add1")
db.delete_table("test_addc")
def test_add_nullbool_column(self):
"""
Test adding NullBoolean columns
"""
db.create_table("test_addnbc", [
('spam', models.BooleanField(default=False)),
('eggs', models.IntegerField()),
])
# Add a column
db.add_column("test_addnbc", "add1", models.NullBooleanField())
# Add a column with a default
db.add_column("test_addnbc", "add2", models.NullBooleanField(default=True))
# insert some data so we can test the default values of the added column
db.execute("INSERT INTO test_addnbc (spam, eggs) VALUES (%s, 1)", [False])
# try selecting from the new columns to make sure they were properly created
false, null, true = db.execute("SELECT spam,add1,add2 FROM test_addnbc")[0][0:3]
self.assertTrue(true)
self.assertEquals(null, None)
self.assertEquals(false, False)
db.delete_table("test_addnbc")
def test_alter_columns(self):
"""
Test altering columns
"""
db.create_table("test_alterc", [
('spam', models.BooleanField(default=False)),
('eggs', models.IntegerField()),
])
db.execute_deferred_sql()
# Change eggs to be a FloatField
db.alter_column("test_alterc", "eggs", models.FloatField())
db.execute_deferred_sql()
db.delete_table("test_alterc")
db.execute_deferred_sql()
def test_alter_char_default(self):
"""
Test altering column defaults with char fields
"""
db.create_table("test_altercd", [
('spam', models.CharField(max_length=30)),
('eggs', models.IntegerField()),
])
# Change spam default
db.alter_column("test_altercd", "spam", models.CharField(max_length=30, default="loof"))
def test_mysql_defaults(self):
"""
Test MySQL default handling for BLOB and TEXT.
"""
db.create_table("test_altermyd", [
('spam', models.BooleanField(default=False)),
('eggs', models.TextField()),
])
# Change eggs to be a FloatField
db.alter_column("test_altermyd", "eggs", models.TextField(null=True))
db.delete_table("test_altermyd")
def test_alter_column_postgres_multiword(self):
"""
Tests altering columns with multiple words in Postgres types (issue #125)
e.g. 'datetime with time zone', look at django/db/backends/postgresql/creation.py
"""
db.create_table("test_multiword", [
('col_datetime', models.DateTimeField(null=True)),
('col_integer', models.PositiveIntegerField(null=True)),
('col_smallint', models.PositiveSmallIntegerField(null=True)),
('col_float', models.FloatField(null=True)),
])
# test if 'double precision' is preserved
db.alter_column('test_multiword', 'col_float', models.FloatField('float', null=True))
# test if 'CHECK ("%(column)s" >= 0)' is stripped
db.alter_column('test_multiword', 'col_integer', models.PositiveIntegerField(null=True))
db.alter_column('test_multiword', 'col_smallint', models.PositiveSmallIntegerField(null=True))
# test if 'with timezone' is preserved
if db.backend_name == "postgres":
db.execute("INSERT INTO test_multiword (col_datetime) VALUES ('2009-04-24 14:20:55+02')")
db.alter_column('test_multiword', 'col_datetime', models.DateTimeField(auto_now=True))
assert db.execute("SELECT col_datetime = '2009-04-24 14:20:55+02' FROM test_multiword")[0][0]
db.delete_table("test_multiword")
def test_alter_constraints(self):
"""
Tests that going from a PostiveIntegerField to an IntegerField drops
the constraint on the database.
"""
# Only applies to databases that support CHECK constraints
if not db.has_check_constraints:
return
# Make the test table
db.create_table("test_alterc", [
('num', models.PositiveIntegerField()),
])
db.execute_deferred_sql()
# Add in some test values
db.execute("INSERT INTO test_alterc (num) VALUES (1)")
db.execute("INSERT INTO test_alterc (num) VALUES (2)")
# Ensure that adding a negative number is bad
db.commit_transaction()
db.start_transaction()
try:
db.execute("INSERT INTO test_alterc (num) VALUES (-3)")
except:
db.rollback_transaction()
else:
self.fail("Could insert a negative integer into a PositiveIntegerField.")
# Alter it to a normal IntegerField
db.alter_column("test_alterc", "num", models.IntegerField())
db.execute_deferred_sql()
# It should now work
db.execute("INSERT INTO test_alterc (num) VALUES (-3)")
db.delete_table("test_alterc")
# We need to match up for tearDown
db.start_transaction()
def test_unique(self):
"""
Tests creating/deleting unique constraints.
"""
# SQLite backend doesn't support this yet.
if db.backend_name == "sqlite3":
return
db.create_table("test_unique2", [
('id', models.AutoField(primary_key=True)),
])
db.create_table("test_unique", [
('spam', models.BooleanField(default=False)),
('eggs', models.IntegerField()),
('ham', models.ForeignKey(db.mock_model('Unique2', 'test_unique2'))),
])
db.execute_deferred_sql()
# Add a constraint
db.create_unique("test_unique", ["spam"])
db.execute_deferred_sql()
# Shouldn't do anything during dry-run
db.dry_run = True
db.delete_unique("test_unique", ["spam"])
db.dry_run = False
db.delete_unique("test_unique", ["spam"])
db.create_unique("test_unique", ["spam"])
# Special preparations for Sql Server
if db.backend_name == "pyodbc":
db.execute("SET IDENTITY_INSERT test_unique2 ON;")
db.execute("INSERT INTO test_unique2 (id) VALUES (1)")
db.execute("INSERT INTO test_unique2 (id) VALUES (2)")
db.commit_transaction()
db.start_transaction()
# Test it works
TRUE = (True,)
FALSE = (False,)
db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 0, 1)", TRUE)
db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 1, 2)", FALSE)
try:
db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 2, 1)", FALSE)
except:
db.rollback_transaction()
else:
self.fail("Could insert non-unique item.")
# Drop that, add one only on eggs
db.delete_unique("test_unique", ["spam"])
db.execute("DELETE FROM test_unique")
db.create_unique("test_unique", ["eggs"])
db.start_transaction()
# Test similarly
db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 0, 1)", TRUE)
db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 1, 2)", FALSE)
try:
db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 1, 1)", TRUE)
except:
db.rollback_transaction()
else:
self.fail("Could insert non-unique item.")
# Drop those, test combined constraints
db.delete_unique("test_unique", ["eggs"])
db.execute("DELETE FROM test_unique")
db.create_unique("test_unique", ["spam", "eggs", "ham_id"])
db.start_transaction()
# Test similarly
db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 0, 1)", TRUE)
db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 1, 1)", FALSE)
try:
db.execute("INSERT INTO test_unique (spam, eggs, ham_id) VALUES (%s, 0, 1)", TRUE)
except:
db.rollback_transaction()
else:
self.fail("Could insert non-unique pair.")
db.delete_unique("test_unique", ["spam", "eggs", "ham_id"])
db.start_transaction()
def test_alter_unique(self):
"""
Tests that unique constraints are not affected when
altering columns (that's handled by create_/delete_unique)
"""
db.create_table("test_alter_unique", [
('spam', models.IntegerField()),
('eggs', models.IntegerField(unique=True)),
])
db.execute_deferred_sql()
# Make sure the unique constraint is created
db.execute('INSERT INTO test_alter_unique (spam, eggs) VALUES (0, 42)')
db.commit_transaction()
db.start_transaction()
try:
db.execute("INSERT INTO test_alter_unique (spam, eggs) VALUES (1, 42)")
except:
pass
else:
self.fail("Could insert the same integer twice into a unique field.")
db.rollback_transaction()
# Alter without unique=True (should not affect anything)
db.alter_column("test_alter_unique", "eggs", models.IntegerField())
# Insertion should still fail
db.start_transaction()
try:
db.execute("INSERT INTO test_alter_unique (spam, eggs) VALUES (1, 42)")
except:
pass
else:
self.fail("Could insert the same integer twice into a unique field after alter_column with unique=False.")
db.rollback_transaction()
# Delete the unique index/constraint
if db.backend_name != "sqlite3":
db.delete_unique("test_alter_unique", ["eggs"])
db.delete_table("test_alter_unique")
db.start_transaction()
def test_capitalised_constraints(self):
"""
Under PostgreSQL at least, capitalised constraints must be quoted.
"""
db.create_table("test_capconst", [
('SOMECOL', models.PositiveIntegerField(primary_key=True)),
])
# Alter it so it's not got the check constraint
db.alter_column("test_capconst", "SOMECOL", models.IntegerField())
def test_text_default(self):
"""
MySQL cannot have blank defaults on TEXT columns.
"""
db.create_table("test_textdef", [
('textcol', models.TextField(blank=True)),
])
def test_text_to_char(self):
"""
On Oracle, you can't simply ALTER TABLE MODIFY a textfield to a charfield
"""
value = "kawabanga"
db.create_table("test_text_to_char", [
('textcol', models.TextField()),
])
db.execute_deferred_sql()
db.execute("INSERT INTO test_text_to_char VALUES (%s)", [value])
db.alter_column("test_text_to_char", "textcol", models.CharField(max_length=100))
db.execute_deferred_sql()
after = db.execute("select * from test_text_to_char")[0][0]
self.assertEqual(value, after, "Change from text to char altered value [ %s != %s ]" % (`value`,`after`))
def test_char_to_text(self):
"""
On Oracle, you can't simply ALTER TABLE MODIFY a charfield to a textfield either
"""
value = "agnabawak"
db.create_table("test_char_to_text", [
('textcol', models.CharField(max_length=100)),
])
db.execute_deferred_sql()
db.execute("INSERT INTO test_char_to_text VALUES (%s)", [value])
db.alter_column("test_char_to_text", "textcol", models.TextField())
db.execute_deferred_sql()
after = db.execute("select * from test_char_to_text")[0][0]
after = unicode(after) # Oracle text fields return a sort of lazy string -- force evaluation
self.assertEqual(value, after, "Change from char to text altered value [ %s != %s ]" % (`value`,`after`))
def test_datetime_default(self):
"""
Test that defaults are created correctly for datetime columns
"""
end_of_world = datetime.datetime(2012, 12, 21, 0, 0, 1)
try:
from django.utils import timezone
except ImportError:
pass
else:
from django.conf import settings
if getattr(settings, 'USE_TZ', False):
end_of_world = end_of_world.replace(tzinfo=timezone.utc)
db.create_table("test_datetime_def", [
('col0', models.IntegerField(null=True)),
('col1', models.DateTimeField(default=end_of_world)),
('col2', models.DateTimeField(null=True)),
])
db.execute_deferred_sql()
db.alter_column("test_datetime_def", "col2", models.DateTimeField(default=end_of_world))
db.add_column("test_datetime_def", "col3", models.DateTimeField(default=end_of_world))
db.execute_deferred_sql()
# There should not be a default in the database for col1
db.commit_transaction()
db.start_transaction()
self.assertRaises(
IntegrityError,
db.execute, "insert into test_datetime_def (col0) values (null)"
)
db.rollback_transaction()
db.start_transaction()
# There should be for the others
db.execute("insert into test_datetime_def (col0, col1) values (null, %s)", [end_of_world])
ends = db.execute("select col1,col2,col3 from test_datetime_def")[0]
self.failUnlessEqual(len(ends), 3)
for e in ends:
self.failUnlessEqual(e, end_of_world)
def test_add_unique_fk(self):
"""
Test adding a ForeignKey with unique=True or a OneToOneField
"""
db.create_table("test_add_unique_fk", [
('spam', models.BooleanField(default=False))
])
db.add_column("test_add_unique_fk", "mock1", models.ForeignKey(db.mock_model('Mock', 'mock'), null=True, unique=True))
db.add_column("test_add_unique_fk", "mock2", models.OneToOneField(db.mock_model('Mock', 'mock'), null=True))
db.delete_table("test_add_unique_fk")
def test_column_constraint(self):
"""
Tests that the value constraint of PositiveIntegerField is enforced on
the database level.
"""
if not db.has_check_constraints:
return
db.create_table("test_column_constraint", [
('spam', models.PositiveIntegerField()),
])
db.execute_deferred_sql()
# Make sure we can't insert negative values
db.commit_transaction()
db.start_transaction()
try:
db.execute("INSERT INTO test_column_constraint VALUES (-42)")
except:
pass
else:
self.fail("Could insert a negative value into a PositiveIntegerField.")
db.rollback_transaction()
# remove constraint
db.alter_column("test_column_constraint", "spam", models.IntegerField())
db.execute_deferred_sql()
# make sure the insertion works now
db.execute('INSERT INTO test_column_constraint VALUES (-42)')
db.execute('DELETE FROM test_column_constraint')
# add it back again
db.alter_column("test_column_constraint", "spam", models.PositiveIntegerField())
db.execute_deferred_sql()
# it should fail again
db.start_transaction()
try:
db.execute("INSERT INTO test_column_constraint VALUES (-42)")
except:
pass
else:
self.fail("Could insert a negative value after changing an IntegerField to a PositiveIntegerField.")
db.rollback_transaction()
db.delete_table("test_column_constraint")
db.start_transaction()
def test_sql_defaults(self):
"""
Test that sql default value is correct for non-string field types.
Datetimes are handled in test_datetime_default.
"""
class CustomField(models.CharField):
__metaclass__ = models.SubfieldBase
description = 'CustomField'
def get_default(self):
if self.has_default():
if callable(self.default):
return self.default()
return self.default
return super(CustomField, self).get_default()
def get_prep_value(self, value):
if not value:
return value
return ','.join(map(str, value))
def to_python(self, value):
if not value or isinstance(value, list):
return value
return map(int, value.split(','))
false_value = db.has_booleans and 'False' or '0'
defaults = (
(models.CharField(default='sukasuka'), 'DEFAULT \'sukasuka'),
(models.BooleanField(default=False), 'DEFAULT %s' % false_value),
(models.IntegerField(default=42), 'DEFAULT 42'),
(CustomField(default=[2012, 2018, 2021, 2036]), 'DEFAULT \'2012,2018,2021,2036')
)
for field, sql_test_str in defaults:
sql = db.column_sql('fish', 'YAAAAAAZ', field)
if sql_test_str not in sql:
self.fail("default sql value was not properly generated for field %r.\nSql was %s" % (field, sql))
def test_make_added_foreign_key_not_null(self):
# Table for FK to target
User = db.mock_model(model_name='User', db_table='auth_user', db_tablespace='', pk_field_name='id', pk_field_type=models.AutoField, pk_field_args=[], pk_field_kwargs={})
# Table with no foreign key
db.create_table("test_fk", [
('eggs', models.IntegerField()),
])
db.execute_deferred_sql()
# Add foreign key
db.add_column("test_fk", 'foreik', models.ForeignKey(User, null=True),
keep_default = False)
db.execute_deferred_sql()
# Make the FK null
db.alter_column("test_fk", "foreik_id", models.ForeignKey(User))
db.execute_deferred_sql()
class TestCacheGeneric(unittest.TestCase):
base_ops_cls = generic.DatabaseOperations
def setUp(self):
class CacheOps(self.base_ops_cls):
def __init__(self):
self._constraint_cache = {}
self.cache_filled = 0
self.settings = {'NAME': 'db'}
def _fill_constraint_cache(self, db, table):
self.cache_filled += 1
self._constraint_cache.setdefault(db, {})
self._constraint_cache[db].setdefault(table, {})
@generic.invalidate_table_constraints
def clear_con(self, table):
pass
@generic.copy_column_constraints
def cp_column(self, table, column_old, column_new):
pass
@generic.delete_column_constraints
def rm_column(self, table, column):
pass
@generic.copy_column_constraints
@generic.delete_column_constraints
def mv_column(self, table, column_old, column_new):
pass
def _get_setting(self, attr):
return self.settings[attr]
self.CacheOps = CacheOps
def test_cache(self):
ops = self.CacheOps()
self.assertEqual(0, ops.cache_filled)
self.assertFalse(ops.lookup_constraint('db', 'table'))
self.assertEqual(1, ops.cache_filled)
self.assertFalse(ops.lookup_constraint('db', 'table'))
self.assertEqual(1, ops.cache_filled)
ops.clear_con('table')
self.assertEqual(1, ops.cache_filled)
self.assertFalse(ops.lookup_constraint('db', 'table'))
self.assertEqual(2, ops.cache_filled)
self.assertFalse(ops.lookup_constraint('db', 'table', 'column'))
self.assertEqual(2, ops.cache_filled)
cache = ops._constraint_cache
cache['db']['table']['column'] = 'constraint'
self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column'))
self.assertEqual([('column', 'constraint')], ops.lookup_constraint('db', 'table'))
self.assertEqual(2, ops.cache_filled)
# invalidate_table_constraints
ops.clear_con('new_table')
self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column'))
self.assertEqual(2, ops.cache_filled)
self.assertFalse(ops.lookup_constraint('db', 'new_table'))
self.assertEqual(3, ops.cache_filled)
# delete_column_constraints
cache['db']['table']['column'] = 'constraint'
self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column'))
ops.rm_column('table', 'column')
self.assertEqual([], ops.lookup_constraint('db', 'table', 'column'))
self.assertEqual([], ops.lookup_constraint('db', 'table', 'noexist_column'))
# copy_column_constraints
cache['db']['table']['column'] = 'constraint'
self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column'))
ops.cp_column('table', 'column', 'column_new')
self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column_new'))
self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column'))
# copy + delete
cache['db']['table']['column'] = 'constraint'
self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column'))
ops.mv_column('table', 'column', 'column_new')
self.assertEqual('constraint', ops.lookup_constraint('db', 'table', 'column_new'))
self.assertEqual([], ops.lookup_constraint('db', 'table', 'column'))
return
def test_valid(self):
ops = self.CacheOps()
# none of these should vivify a table into a valid state
self.assertFalse(ops._is_valid_cache('db', 'table'))
self.assertFalse(ops._is_valid_cache('db', 'table'))
ops.clear_con('table')
self.assertFalse(ops._is_valid_cache('db', 'table'))
ops.rm_column('table', 'column')
self.assertFalse(ops._is_valid_cache('db', 'table'))
# these should change the cache state
ops.lookup_constraint('db', 'table')
self.assertTrue(ops._is_valid_cache('db', 'table'))
ops.lookup_constraint('db', 'table', 'column')
self.assertTrue(ops._is_valid_cache('db', 'table'))
ops.clear_con('table')
self.assertFalse(ops._is_valid_cache('db', 'table'))
def test_valid_implementation(self):
# generic fills the cache on a per-table basis
ops = self.CacheOps()
self.assertFalse(ops._is_valid_cache('db', 'table'))
self.assertFalse(ops._is_valid_cache('db', 'other_table'))
ops.lookup_constraint('db', 'table')
self.assertTrue(ops._is_valid_cache('db', 'table'))
self.assertFalse(ops._is_valid_cache('db', 'other_table'))
ops.lookup_constraint('db', 'other_table')
self.assertTrue(ops._is_valid_cache('db', 'table'))
self.assertTrue(ops._is_valid_cache('db', 'other_table'))
ops.clear_con('table')
self.assertFalse(ops._is_valid_cache('db', 'table'))
self.assertTrue(ops._is_valid_cache('db', 'other_table'))
if mysql:
class TestCacheMysql(TestCacheGeneric):
base_ops_cls = mysql.DatabaseOperations
def test_valid_implementation(self):
# mysql fills the cache on a per-db basis
ops = self.CacheOps()
self.assertFalse(ops._is_valid_cache('db', 'table'))
self.assertFalse(ops._is_valid_cache('db', 'other_table'))
ops.lookup_constraint('db', 'table')
self.assertTrue(ops._is_valid_cache('db', 'table'))
self.assertTrue(ops._is_valid_cache('db', 'other_table'))
ops.lookup_constraint('db', 'other_table')
self.assertTrue(ops._is_valid_cache('db', 'table'))
self.assertTrue(ops._is_valid_cache('db', 'other_table'))
ops.clear_con('table')
self.assertFalse(ops._is_valid_cache('db', 'table'))
self.assertTrue(ops._is_valid_cache('db', 'other_table'))
|
ltyscu/ReducingWastedEvaluationsCGP
|
refs/heads/master
|
plotter.py
|
2
|
'''
Takes file names from the output/ folder and parses the information into
readable values and produces a graph. Use this module as an executable to
process all information for a single problem, such as:
python plotter.py output/multiply*
Do not mix problems in a single run. The graph will be saved to a .eps file
named after the problem used.
NOTE: You CANNOT use pypy for this as pylab is current unsupported. Use
python 2.7 instead.
'''
from pylab import show, loglog, legend, savefig, xlabel, ylabel, nan
import json
import sys
from os import path
from collections import defaultdict
from main import combine_results
from util import wilcoxon_signed_rank, linecycler, colorcycler
# Dictionary converter from original name to name used in paper
pretty_name = {"normal": "Normal",
"single": "Single",
"skip": "Skip",
"accumulate": "Accumulate"}
# Specifies what order lines should appear in graphs
order = {'normal': 1,
'skip': 2,
'accumulate': 3,
'single': 4}
if __name__ == '__main__':
# Run through all of the files gathering different seeds into lists
groupings = defaultdict(list)
filecount = 0
for filename in sys.argv[1:]:
base = path.basename(filename)
try:
problem, nodes, rate, version, _ = base.split('_')
with open(filename, 'r') as f:
data = json.load(f)
groupings[problem, int(nodes),
float(rate), version].append(data[1])
filecount += 1
except ValueError:
print filename, "FAILED"
print 'Files Successfully Loaded', filecount
#Find line information and best configurations
lines = defaultdict(list)
rates = set()
bests = defaultdict(list)
for key, results in groupings.iteritems():
problem, nodes, rate, version = key
if version != 'single':
rates.add(rate)
combined = combine_results(results)
toplot = nan
normal = nan
# Only gather data if median is less than the maximum
if combined['evals'][0] < 10000000:
toplot = combined['evals'][0]
if combined['normal'][0] < 10000000:
normal = combined['normal'][0]
lines[version].append((rate, toplot))
# Only include in bests if fully successful
if combined['success'][0] == 1:
bests[version].append((toplot, rate, combined, results))
if version == 'skip':
lines['normal'].append((rate, normal))
# Ensure that normal was fully successful
if max([result['normal'] for result in results]) < 10000000:
bests['normal'].append((normal, rate, combined, results))
# Expand Single across all rates used
try:
lines['single'] = [(rate, lines['single'][0][1])
for rate in sorted(rates)]
except IndexError:
pass
# Plot the lines using the 'order' order
for version, line in sorted(lines.iteritems(), key=lambda X: order[X[0]]):
try:
X, Y = zip(*sorted(line))
except ValueError:
print version, line
continue
loglog(X, Y, label=pretty_name[version], linestyle=next(linecycler),
linewidth=2.5, color=next(colorcycler))
legend(loc='best')
xlabel("Mutation Rate")
ylabel("Median Evaluations until Success")
statify = {}
print '\tBests'
print 'version, mutation rate, (evals, deviation),',
print 'genes not including output'
for version, data in bests.iteritems():
score, rate, combined, results = min(datum for datum
in data if datum[0] is not nan)
pretty = pretty_name[version]
genes = combined['phenotype'][0] * 3
if version != 'normal':
print pretty, rate, combined['evals'], genes
statify[version] = [result['evals'] for result in results]
else:
print pretty, rate, combined['normal'], genes
statify['normal'] = [result['normal'] for result in results]
print "\nStatistical Tests"
for version, data in statify.iteritems():
print "%s with Normal" % pretty_name[version],
print wilcoxon_signed_rank(statify['normal'], data)
savefig(problem + ".eps", dpi=300)
show()
|
Spiderlover/Toontown
|
refs/heads/master
|
toontown/suit/DistributedGridGoon.py
|
5
|
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from toontown.battle.BattleProps import *
from direct.directnotify import DirectNotifyGlobal
import DistributedGoon
from toontown.toonbase import ToontownGlobals
from toontown.coghq import MovingPlatform
class DistributedGridGoon(DistributedGoon.DistributedGoon):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedGoon')
def __init__(self, cr, type = 'sg'):
try:
self.DistributedGridGoon_initialized
except:
self.DistributedGridGoon_initialized = 1
DistributedGoon.DistributedGoon.__init__(self, cr, type)
def generate(self):
DistributedGoon.DistributedGoon.generate(self)
self.ignore(self.uniqueName('wallHit'))
self.mazeWalkTrack = None
return
def delete(self):
if self.mazeWalkTrack:
self.mazeWalkTrack.pause()
del self.mazeWalkTrack
DistributedGoon.DistributedGoon.delete(self)
def setH(self, h):
self.h = h
def setPathPts(self, xi, yi, zi, xf, yf, zf):
self.notify.debug('setPathPts')
if self.mazeWalkTrack:
self.mazeWalkTrack.pause()
del self.mazeWalkTrack
self.mazeWalkTrack = None
curPos = Point3(xi, yi, zi)
nextPos = Point3(xf, yf, zf)
distance = Vec3(curPos - nextPos).length()
duration = distance / self.velocity
self.mazeWalkTrack = Sequence(Func(self.headsUp, nextPos[0], nextPos[1], nextPos[2]), LerpPosInterval(self, duration=duration, pos=nextPos, startPos=curPos), name=self.uniqueName('mazeWalkTrack'))
self.mazeWalkTrack.start()
return
def enterWalk(self, avId = None, ts = 0):
pass
def exitWalk(self):
pass
|
sajuptpm/neutron-ipam
|
refs/heads/stable/icehouse
|
neutron/services/loadbalancer/agent/agent_manager.py
|
4
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
from oslo.config import cfg
from neutron.agent import rpc as agent_rpc
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import topics
from neutron import context
from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import periodic_task
from neutron.plugins.common import constants
from neutron.services.loadbalancer.agent import agent_api
LOG = logging.getLogger(__name__)
OPTS = [
cfg.MultiStrOpt(
'device_driver',
default=['neutron.services.loadbalancer.drivers'
'.haproxy.namespace_driver.HaproxyNSDriver'],
help=_('Drivers used to manage loadbalancing devices'),
),
]
class DeviceNotFoundOnAgent(n_exc.NotFound):
msg = _('Unknown device with pool_id %(pool_id)s')
class LbaasAgentManager(periodic_task.PeriodicTasks):
RPC_API_VERSION = '2.0'
# history
# 1.0 Initial version
# 1.1 Support agent_updated call
# 2.0 Generic API for agent based drivers
# - modify/reload/destroy_pool methods were removed;
# - added methods to handle create/update/delete for every lbaas
# object individually;
def __init__(self, conf):
self.conf = conf
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_api.LbaasAgentApi(
topics.LOADBALANCER_PLUGIN,
self.context,
self.conf.host
)
self._load_drivers()
self.agent_state = {
'binary': 'neutron-lbaas-agent',
'host': conf.host,
'topic': topics.LOADBALANCER_AGENT,
'configurations': {'device_drivers': self.device_drivers.keys()},
'agent_type': n_const.AGENT_TYPE_LOADBALANCER,
'start_flag': True}
self.admin_state_up = True
self._setup_state_rpc()
self.needs_resync = False
# pool_id->device_driver_name mapping used to store known instances
self.instance_mapping = {}
def _load_drivers(self):
self.device_drivers = {}
for driver in self.conf.device_driver:
try:
driver_inst = importutils.import_object(
driver,
self.conf,
self.plugin_rpc
)
except ImportError:
msg = _('Error importing loadbalancer device driver: %s')
raise SystemExit(msg % driver)
driver_name = driver_inst.get_name()
if driver_name not in self.device_drivers:
self.device_drivers[driver_name] = driver_inst
else:
msg = _('Multiple device drivers with the same name found: %s')
raise SystemExit(msg % driver_name)
def _setup_state_rpc(self):
self.state_rpc = agent_rpc.PluginReportStateAPI(
topics.LOADBALANCER_PLUGIN)
report_interval = self.conf.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
instance_count = len(self.instance_mapping)
self.agent_state['configurations']['instances'] = instance_count
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_("Failed reporting state!"))
def initialize_service_hook(self, started_by):
self.sync_state()
@periodic_task.periodic_task
def periodic_resync(self, context):
if self.needs_resync:
self.needs_resync = False
self.sync_state()
@periodic_task.periodic_task(spacing=6)
def collect_stats(self, context):
for pool_id, driver_name in self.instance_mapping.items():
driver = self.device_drivers[driver_name]
try:
stats = driver.get_stats(pool_id)
if stats:
self.plugin_rpc.update_pool_stats(pool_id, stats)
except Exception:
LOG.exception(_('Error updating statistics on pool %s'),
pool_id)
self.needs_resync = True
def sync_state(self):
known_instances = set(self.instance_mapping.keys())
try:
ready_instances = set(self.plugin_rpc.get_ready_devices())
for deleted_id in known_instances - ready_instances:
self._destroy_pool(deleted_id)
for pool_id in ready_instances:
self._reload_pool(pool_id)
except Exception:
LOG.exception(_('Unable to retrieve ready devices'))
self.needs_resync = True
self.remove_orphans()
def _get_driver(self, pool_id):
if pool_id not in self.instance_mapping:
raise DeviceNotFoundOnAgent(pool_id=pool_id)
driver_name = self.instance_mapping[pool_id]
return self.device_drivers[driver_name]
def _reload_pool(self, pool_id):
try:
logical_config = self.plugin_rpc.get_logical_device(pool_id)
driver_name = logical_config['driver']
if driver_name not in self.device_drivers:
LOG.error(_('No device driver '
'on agent: %s.'), driver_name)
self.plugin_rpc.update_status(
'pool', pool_id, constants.ERROR)
return
self.device_drivers[driver_name].deploy_instance(logical_config)
self.instance_mapping[pool_id] = driver_name
self.plugin_rpc.pool_deployed(pool_id)
except Exception:
LOG.exception(_('Unable to deploy instance for pool: %s'), pool_id)
self.needs_resync = True
def _destroy_pool(self, pool_id):
driver = self._get_driver(pool_id)
try:
driver.undeploy_instance(pool_id)
del self.instance_mapping[pool_id]
self.plugin_rpc.pool_destroyed(pool_id)
except Exception:
LOG.exception(_('Unable to destroy device for pool: %s'), pool_id)
self.needs_resync = True
def remove_orphans(self):
for driver_name in self.device_drivers:
pool_ids = [pool_id for pool_id in self.instance_mapping
if self.instance_mapping[pool_id] == driver_name]
try:
self.device_drivers[driver_name].remove_orphans(pool_ids)
except NotImplementedError:
pass # Not all drivers will support this
def _handle_failed_driver_call(self, operation, obj_type, obj_id, driver):
LOG.exception(_('%(operation)s %(obj)s %(id)s failed on device driver '
'%(driver)s'),
{'operation': operation.capitalize(), 'obj': obj_type,
'id': obj_id, 'driver': driver})
self.plugin_rpc.update_status(obj_type, obj_id, constants.ERROR)
def create_vip(self, context, vip):
driver = self._get_driver(vip['pool_id'])
try:
driver.create_vip(vip)
except Exception:
self._handle_failed_driver_call('create', 'vip', vip['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('vip', vip['id'], constants.ACTIVE)
def update_vip(self, context, old_vip, vip):
driver = self._get_driver(vip['pool_id'])
try:
driver.update_vip(old_vip, vip)
except Exception:
self._handle_failed_driver_call('update', 'vip', vip['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('vip', vip['id'], constants.ACTIVE)
def delete_vip(self, context, vip):
driver = self._get_driver(vip['pool_id'])
driver.delete_vip(vip)
def create_pool(self, context, pool, driver_name):
if driver_name not in self.device_drivers:
LOG.error(_('No device driver on agent: %s.'), driver_name)
self.plugin_rpc.update_status('pool', pool['id'], constants.ERROR)
return
driver = self.device_drivers[driver_name]
try:
driver.create_pool(pool)
except Exception:
self._handle_failed_driver_call('create', 'pool', pool['id'],
driver.get_name())
else:
self.instance_mapping[pool['id']] = driver_name
self.plugin_rpc.update_status('pool', pool['id'], constants.ACTIVE)
def update_pool(self, context, old_pool, pool):
driver = self._get_driver(pool['id'])
try:
driver.update_pool(old_pool, pool)
except Exception:
self._handle_failed_driver_call('update', 'pool', pool['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('pool', pool['id'], constants.ACTIVE)
def delete_pool(self, context, pool):
driver = self._get_driver(pool['id'])
driver.delete_pool(pool)
del self.instance_mapping[pool['id']]
def create_member(self, context, member):
driver = self._get_driver(member['pool_id'])
try:
driver.create_member(member)
except Exception:
self._handle_failed_driver_call('create', 'member', member['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('member', member['id'],
constants.ACTIVE)
def update_member(self, context, old_member, member):
driver = self._get_driver(member['pool_id'])
try:
driver.update_member(old_member, member)
except Exception:
self._handle_failed_driver_call('update', 'member', member['id'],
driver.get_name())
else:
self.plugin_rpc.update_status('member', member['id'],
constants.ACTIVE)
def delete_member(self, context, member):
driver = self._get_driver(member['pool_id'])
driver.delete_member(member)
def create_pool_health_monitor(self, context, health_monitor, pool_id):
driver = self._get_driver(pool_id)
assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']}
try:
driver.create_pool_health_monitor(health_monitor, pool_id)
except Exception:
self._handle_failed_driver_call(
'create', 'health_monitor', assoc_id, driver.get_name())
else:
self.plugin_rpc.update_status(
'health_monitor', assoc_id, constants.ACTIVE)
def update_pool_health_monitor(self, context, old_health_monitor,
health_monitor, pool_id):
driver = self._get_driver(pool_id)
assoc_id = {'pool_id': pool_id, 'monitor_id': health_monitor['id']}
try:
driver.update_pool_health_monitor(old_health_monitor,
health_monitor,
pool_id)
except Exception:
self._handle_failed_driver_call(
'update', 'health_monitor', assoc_id, driver.get_name())
else:
self.plugin_rpc.update_status(
'health_monitor', assoc_id, constants.ACTIVE)
def delete_pool_health_monitor(self, context, health_monitor, pool_id):
driver = self._get_driver(pool_id)
driver.delete_pool_health_monitor(health_monitor, pool_id)
def agent_updated(self, context, payload):
"""Handle the agent_updated notification event."""
if payload['admin_state_up'] != self.admin_state_up:
self.admin_state_up = payload['admin_state_up']
if self.admin_state_up:
self.needs_resync = True
else:
for pool_id in self.instance_mapping.keys():
LOG.info(_("Destroying pool %s due to agent disabling"),
pool_id)
self._destroy_pool(pool_id)
LOG.info(_("Agent_updated by server side %s!"), payload)
|
hsoft/pdfmasher
|
refs/heads/master
|
hscommon/tests/__init__.py
|
12133432
| |
noironetworks/python-neutronclient
|
refs/heads/master
|
neutronclient/osc/__init__.py
|
12133432
| |
lixt/lily2-gem5
|
refs/heads/master
|
ext/ply/test/lex_module_import.py
|
174
|
# -----------------------------------------------------------------------------
# lex_module_import.py
#
# A lexer defined in a module, but built in lex_module.py
# -----------------------------------------------------------------------------
tokens = (
'NAME','NUMBER',
'PLUS','MINUS','TIMES','DIVIDE','EQUALS',
'LPAREN','RPAREN',
)
# Tokens
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_EQUALS = r'='
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
def t_NUMBER(t):
r'\d+'
try:
t.value = int(t.value)
except ValueError:
print("Integer value too large %s" % t.value)
t.value = 0
return t
t_ignore = " \t"
def t_newline(t):
r'\n+'
t.lineno += t.value.count("\n")
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
|
elmerdpadilla/iv
|
refs/heads/8.0
|
addons/l10n_multilang/account.py
|
348
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
#in this file, we mostly add the tag translate=True on existing fields that we now want to be translated
class account_account_template(osv.osv):
_inherit = 'account.account.template'
_columns = {
'name': fields.char('Name', required=True, select=True, translate=True),
}
class account_account(osv.osv):
_inherit = 'account.account'
_columns = {
'name': fields.char('Name', required=True, select=True, translate=True),
}
class account_tax(osv.osv):
_inherit = 'account.tax'
_columns = {
'name': fields.char('Tax Name', required=True, select=True, translate=True),
}
class account_tax_template(osv.osv):
_inherit = 'account.tax.template'
_columns = {
'name': fields.char('Tax Name', required=True, select=True, translate=True),
}
class account_tax_code_template(osv.osv):
_inherit = 'account.tax.code.template'
_columns = {
'name': fields.char('Tax Case Name', required=True, translate=True),
}
class account_chart_template(osv.osv):
_inherit = 'account.chart.template'
_columns={
'name': fields.char('Name', required=True, translate=True),
'spoken_languages': fields.char('Spoken Languages', help="State here the languages for which the translations of templates could be loaded at the time of installation of this localization module and copied in the final object when generating them from templates. You must provide the language codes separated by ';'"),
}
_order = 'name'
class account_fiscal_position(osv.osv):
_inherit = 'account.fiscal.position'
_columns = {
'name': fields.char('Fiscal Position', required=True, translate=True),
'note': fields.text('Notes', translate=True),
}
class account_fiscal_position_template(osv.osv):
_inherit = 'account.fiscal.position.template'
_columns = {
'name': fields.char('Fiscal Position Template', required=True, translate=True),
'note': fields.text('Notes', translate=True),
}
class account_journal(osv.osv):
_inherit = 'account.journal'
_columns = {
'name': fields.char('Journal Name', required=True, translate=True),
}
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_columns = {
'name': fields.char('Account Name', required=True, translate=True),
}
class account_analytic_journal(osv.osv):
_inherit = 'account.analytic.journal'
_columns = {
'name': fields.char('Journal Name', required=True, translate=True),
}
|
slac207/cs207project
|
refs/heads/master
|
timeseries/tests/test_lazy.py
|
2
|
from pytest import raises
from timeseries.lazy import lazy_add, lazy_mul, LazyOperation, lazy
def test_type():
assert isinstance(lazy_add(2,3),LazyOperation)==True
assert isinstance(lazy_mul(2,3),LazyOperation)==True
def test_operation():
assert lazy_add(3,4).eval()==7
assert lazy_mul(3,4).eval()==12
def test_composition():
assert lazy_add(3,lazy_add(3,2)).eval()==8
assert lazy_mul(3,lazy_mul(2,5)).eval()==30
assert lazy_add(4,lazy_mul(3,-1)).eval()==1
assert lazy_mul(3,lazy_add(-1,3)).eval()==6
@lazy
def lazy_add_kw(x,y,z=0):
if z == 0:
return -99
return x+y
def test_kw():
assert lazy_add_kw(2,3).eval()==-99
assert lazy_add_kw(2,3,z=0).eval()==-99
assert lazy_add_kw(2,3,z=1).eval()==5
def test_kw_composition():
assert lazy_add_kw(2,3,z=lazy_add_kw(2,3,z=1)).eval()==5
assert lazy_add_kw(2,3,z=lazy_add_kw(2,-2,z=1)).eval()==-99
|
antb/TPT----My-old-mod
|
refs/heads/master
|
src/python/stdlib/test/test_getargs.py
|
130
|
"""
Test the internal getargs.c implementation
PyArg_ParseTuple() is defined here.
The test here is not intended to test all of the module, just the
single case that failed between 2.1 and 2.2a2.
"""
# marshal.loads() uses PyArg_ParseTuple(args, "s#:loads")
# The s code will cause a Unicode conversion to occur. This test
# verify that the error is propagated properly from the C code back to
# Python.
import marshal
import unittest
from test import test_support
class GetArgsTest(unittest.TestCase):
# If the encoding succeeds using the current default encoding,
# this test will fail because it does not test the right part of the
# PyArg_ParseTuple() implementation.
def test_with_marshal(self):
arg = unicode(r'\222', 'unicode-escape')
self.assertRaises(UnicodeError, marshal.loads, arg)
def test_main():
test_support.run_unittest(GetArgsTest)
if __name__ == '__main__':
test_main()
|
erramuzpe/NeuroVault
|
refs/heads/master
|
neurovault/apps/users/tests/test_oauth.py
|
4
|
from django.test import TestCase, Client
from django.http import HttpResponse
from django.core.urlresolvers import reverse
from django.conf.urls import patterns, url, include
from django.contrib.auth import get_user_model
from rest_framework import permissions
from rest_framework.views import APIView
from oauth2_provider.ext.rest_framework import OAuth2Authentication
UserModel = get_user_model()
class MockView(APIView):
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return HttpResponse({'a': 1, 'b': 2, 'c': 3})
class OAuth2View(MockView):
authentication_classes = [OAuth2Authentication]
urlpatterns = patterns(
'',
url(r'^oauth2/', include('oauth2_provider.urls')),
url(r'^oauth2-test/$', OAuth2View.as_view()),
url(r'^accounts/', include('neurovault.apps.users.urls')),
)
class TestPersonalAccessTokens(TestCase):
urls = 'neurovault.apps.users.tests.test_oauth'
def setUp(self):
self.user_password = "l0n6 l1v3 7h3 k1n6!"
self.user = UserModel.objects.create_user("bernardo",
"bernardo@example.com",
self.user_password)
self.client = Client()
def tearDown(self):
self.user.delete()
def _create_authorization_header(self, token):
return "Bearer {0}".format(token)
def test_authentication_empty(self):
response = self.client.get("/oauth2-test/")
self.assertEqual(response.status_code, 401)
def test_authentication_denied(self):
auth = self._create_authorization_header("fake-token")
response = self.client.get("/oauth2-test/", HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 401)
def test_authentication_allow(self):
self.client.login(username=self.user, password=self.user_password)
response = self.client.post(reverse('token_create'))
self.assertEqual(response.status_code, 302)
access_token = self.user.accesstoken_set.first()
auth = self._create_authorization_header(access_token)
response = self.client.get("/oauth2-test/", HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
|
chrta/simulavr
|
refs/heads/master
|
regress/modtest/adc_gain.py
|
3
|
from simtestutil import SimTestCase, SimTestLoader
import pysimulavr
class TestCase(SimTestCase):
ADC_CLOCK = 8000 # ADC clock is 125kHz
adc0_pin = {
"atmega16": "A0",
"atmega644": "A0",
"at90can32": "F0",
"atmega128": "F0",
}
adc1_pin = {
"atmega16": "A1",
"atmega644": "A1",
"at90can32": "F1",
"atmega128": "F1",
}
def assertComplete(self, pValue, nValue, refValue):
v = self.sim.getWordByName(self.dev, "adc_value")
c = self.sim.getByteByName(self.dev, "complete")
e = int((((pValue - nValue) * 200) / refValue) * 512) & 0x3ff
self.assertEqual(v, e, "expected adc value is 0x%x, got 0x%x" % (e, v))
self.assertEqual(c, 1, "conversion is completed")
def test_00(self):
"""check adc conversion, differential channel with gain 200"""
self.assertDevice()
self.assertStartTime()
# create analog pin as injector and connect to ADC1
a1pin = pysimulavr.Pin(1.3305) # set to 1.3305V level
net1 = pysimulavr.Net()
net1.Add(a1pin)
net1.Add(self.dev.GetPin(self.adc1_pin[self.processorName])) # connect to ADC1
# create analog pin as injector and connect to ADC0
a2pin = pysimulavr.Pin(1.32) # set to 1.32V level
net2 = pysimulavr.Net()
net2.Add(a2pin)
net2.Add(self.dev.GetPin(self.adc0_pin[self.processorName])) # connect to ADC0
# skip initialisation
self.assertInitDone()
# check, that we are not in idle loop ...
self.assertEqual(self.sim.getByteByName(self.dev, "in_loop"), 0, "not in idle loop")
# run
self.sim.doRun(self.sim.getCurrentTime() + (15 * self.ADC_CLOCK))
# check, that we are now in idle loop ...
self.assertEqual(self.sim.getByteByName(self.dev, "in_loop"), 1, "in idle loop")
c = self.sim.getByteByName(self.dev, "complete")
self.assertEqual(c, 0, "conversion is not completed")
# run, first conversion, channel A1 / A0
self.sim.doRun(self.sim.getCurrentTime() + (12 * self.ADC_CLOCK))
# get ADC value
self.assertComplete(1.32, 1.32, 2.56)
# start next conversion
self.sim.setByteByName(self.dev, "complete", 2)
# run, further conversion, channel A3 / A1
self.sim.doRun(self.sim.getCurrentTime() + (15 * self.ADC_CLOCK))
# get ADC value
self.assertComplete(1.3305, 1.32, 2.56)
if __name__ == '__main__':
from unittest import TextTestRunner
tests = SimTestLoader("adc_diff_atmega16.elf").loadTestsFromTestCase(TestCase)
TextTestRunner(verbosity = 2).run(tests)
# EOF
|
florian-f/sklearn
|
refs/heads/master
|
sklearn/neighbors/graph.py
|
14
|
"""Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD, (C) INRIA, University of Amsterdam
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def kneighbors_graph(X, n_neighbors, mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.todense()
matrix([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors).fit(X)
return X.kneighbors_graph(X._fit_X, n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.todense()
matrix([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius).fit(X)
return X.radius_neighbors_graph(X._fit_X, radius, mode)
|
rohitwaghchaure/erpnext_develop
|
refs/heads/develop
|
erpnext/docs/user/manual/en/selling/setup/__init__.py
|
12133432
| |
tupolev/plugin.video.mitele
|
refs/heads/master
|
lib/youtube_dl/extractor/thestar.py
|
14
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .brightcove import BrightcoveLegacyIE
from ..compat import compat_parse_qs
class TheStarIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?thestar\.com/(?:[^/]+/)*(?P<id>.+)\.html'
_TEST = {
'url': 'http://www.thestar.com/life/2016/02/01/mankind-why-this-woman-started-a-men-s-skincare-line.html',
'md5': '2c62dd4db2027e35579fefb97a8b6554',
'info_dict': {
'id': '4732393888001',
'ext': 'mp4',
'title': 'Mankind: Why this woman started a men\'s skin care line',
'description': 'Robert Cribb talks to Young Lee, the founder of Uncle Peter\'s MAN.',
'uploader_id': '794267642001',
'timestamp': 1454353482,
'upload_date': '20160201',
},
'params': {
# m3u8 download
'skip_download': True,
}
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/794267642001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
brightcove_legacy_url = BrightcoveLegacyIE._extract_brightcove_url(webpage)
brightcove_id = compat_parse_qs(brightcove_legacy_url)['@videoPlayer'][0]
return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
|
vebin/Wox
|
refs/heads/master
|
PythonHome/Lib/site-packages/requests/packages/urllib3/util/timeout.py
|
303
|
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
def current_time():
"""
Retrieve the current time, this function is mocked out in unit testing.
"""
return time.time()
_Default = object()
# The default timeout to use for socket connections. This is the attribute used
# by httplib to define the default timeout
class Timeout(object):
"""
Utility object for storing timeout values.
Example usage:
.. code-block:: python
timeout = urllib3.util.Timeout(connect=2.0, read=7.0)
pool = HTTPConnectionPool('www.google.com', 80, timeout=timeout)
pool.request(...) # Etc, etc
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response. Specifically, Python's DNS resolver does not obey the
timeout specified on the socket. Other factors that can affect total
request time include high CPU load, high swap, the program running at a
low priority level, or other behaviors. The observed running time for
urllib3 to return a response may be greater than the value passed to
`total`.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not ever trigger, even though the request will
take several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is used
for clear error messages
:return: the value
:raises ValueError: if the type is not an integer or a float, or if it
is a numeric value less than zero
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value passed
to this function.
:param timeout: The legacy timeout value
:type timeout: integer, float, sentinel default object, or None
:return: a Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: the elapsed time
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: the connect timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: the value to use for the read timeout
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# in case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
|
dennis-sheil/commandergenius
|
refs/heads/sdl_android
|
project/jni/python/src/Tools/scripts/finddiv.py
|
102
|
#! /usr/bin/env python
"""finddiv - a grep-like tool that looks for division operators.
Usage: finddiv [-l] file_or_directory ...
For directory arguments, all files in the directory whose name ends in
.py are processed, and subdirectories are processed recursively.
This actually tokenizes the files to avoid false hits in comments or
strings literals.
By default, this prints all lines containing a / or /= operator, in
grep -n style. With the -l option specified, it prints the filename
of files that contain at least one / or /= operator.
"""
import os
import sys
import getopt
import tokenize
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "lh")
except getopt.error, msg:
usage(msg)
return 2
if not args:
usage("at least one file argument is required")
return 2
listnames = 0
for o, a in opts:
if o == "-h":
print __doc__
return
if o == "-l":
listnames = 1
exit = None
for filename in args:
x = process(filename, listnames)
exit = exit or x
return exit
def usage(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Usage: %s [-l] file ...\n" % sys.argv[0])
sys.stderr.write("Try `%s -h' for more information.\n" % sys.argv[0])
def process(filename, listnames):
if os.path.isdir(filename):
return processdir(filename, listnames)
try:
fp = open(filename)
except IOError, msg:
sys.stderr.write("Can't open: %s\n" % msg)
return 1
g = tokenize.generate_tokens(fp.readline)
lastrow = None
for type, token, (row, col), end, line in g:
if token in ("/", "/="):
if listnames:
print filename
break
if row != lastrow:
lastrow = row
print "%s:%d:%s" % (filename, row, line),
fp.close()
def processdir(dir, listnames):
try:
names = os.listdir(dir)
except os.error, msg:
sys.stderr.write("Can't list directory: %s\n" % dir)
return 1
files = []
for name in names:
fn = os.path.join(dir, name)
if os.path.normcase(fn).endswith(".py") or os.path.isdir(fn):
files.append(fn)
files.sort(lambda a, b: cmp(os.path.normcase(a), os.path.normcase(b)))
exit = None
for fn in files:
x = process(fn, listnames)
exit = exit or x
return exit
if __name__ == "__main__":
sys.exit(main())
|
xodus7/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/reshape_test.py
|
24
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Reshape Bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops.bijectors.reshape import Reshape
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions.bijector_test_util import assert_bijective_and_finite
from tensorflow.python.platform import test
class _ReshapeBijectorTest(object):
"""Base class for testing the reshape transformation.
Methods defined in this class call a method self.build_shapes() that
is implemented by subclasses defined below, returning respectively
ReshapeBijectorTestStatic: static shapes,
ReshapeBijectorTestDynamic: shape placeholders of known ndims, and
ReshapeBijectorTestDynamicNdims: shape placeholders of unspecified ndims,
so that each test in this base class is automatically run over all
three cases. The subclasses also implement assertRaisesError to test
for either Python exceptions (in the case of static shapes) or
TensorFlow op errors (dynamic shapes).
"""
def setUp(self):
self._rng = np.random.RandomState(42)
def testBijector(self):
"""Do a basic sanity check of forward, inverse, jacobian."""
expected_x = np.random.randn(4, 3, 2)
expected_y = np.reshape(expected_x, [4, 6])
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([3, 2], [6,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
fldj_,
ildj_) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.forward_log_det_jacobian(expected_x, event_ndims=2),
bijector.inverse_log_det_jacobian(expected_y, event_ndims=2),
), feed_dict=feed_dict)
self.assertEqual("reshape", bijector.name)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(0., fldj_, rtol=1e-6, atol=0)
self.assertAllClose(0., ildj_, rtol=1e-6, atol=0)
def testEventShapeTensor(self):
"""Test event_shape_tensor methods when even ndims may be dynamic."""
shape_in_static = [2, 3]
shape_out_static = [6,]
shape_in, shape_out, feed_dict = self.build_shapes(shape_in_static,
shape_out_static)
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in, validate_args=True)
# using the _tensor methods, we should always get a fully-specified
# result since these are evaluated at graph runtime.
with self.cached_session() as sess:
(shape_out_,
shape_in_) = sess.run((
bijector.forward_event_shape_tensor(shape_in),
bijector.inverse_event_shape_tensor(shape_out),
), feed_dict=feed_dict)
self.assertAllEqual(shape_out_static, shape_out_)
self.assertAllEqual(shape_in_static, shape_in_)
def testScalarReshape(self):
"""Test reshaping to and from a scalar shape ()."""
expected_x = np.random.randn(4, 3, 1)
expected_y = np.reshape(expected_x, [4, 3])
expected_x_scalar = np.random.randn(1,)
expected_y_scalar = expected_x_scalar[0]
shape_in, shape_out, feed_dict = self.build_shapes([], [1,])
with self.cached_session() as sess:
bijector = Reshape(
event_shape_out=shape_in,
event_shape_in=shape_out, validate_args=True)
(x_,
y_,
x_scalar_,
y_scalar_
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
bijector.inverse(expected_y_scalar),
bijector.forward(expected_x_scalar),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
self.assertAllClose(expected_y_scalar, y_scalar_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x_scalar, x_scalar_, rtol=1e-6, atol=0)
def testMultipleUnspecifiedDimensionsOpError(self):
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [4, -1, -1,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(
"elements must have at most one `-1`."):
sess.run(bijector.forward_event_shape_tensor(shape_in),
feed_dict=feed_dict)
# pylint: disable=invalid-name
def _testInvalidDimensionsOpError(self, expected_error_message):
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [1, 2, -2,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(expected_error_message):
sess.run(bijector.forward_event_shape_tensor(shape_in),
feed_dict=feed_dict)
# pylint: enable=invalid-name
def testValidButNonMatchingInputOpError(self):
x = np.random.randn(4, 3, 2)
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, 3], [1, 6, 1,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
# Here we pass in a tensor (x) whose shape is compatible with
# the output shape, so tf.reshape will throw no error, but
# doesn't match the expected input shape.
with self.assertRaisesError(
"Input `event_shape` does not match `event_shape_in`."):
sess.run(bijector.forward(x),
feed_dict=feed_dict)
def testValidButNonMatchingInputPartiallySpecifiedOpError(self):
x = np.random.randn(4, 3, 2)
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([2, -1], [1, 6, 1,])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(
"Input `event_shape` does not match `event_shape_in`."):
sess.run(bijector.forward(x),
feed_dict=feed_dict)
# pylint: disable=invalid-name
def _testInputOutputMismatchOpError(self, expected_error_message):
x1 = np.random.randn(4, 2, 3)
x2 = np.random.randn(4, 1, 1, 5)
with self.cached_session() as sess:
shape_in, shape_out, fd_mismatched = self.build_shapes([2, 3],
[1, 1, 5])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
with self.assertRaisesError(expected_error_message):
sess.run(bijector.forward(x1), feed_dict=fd_mismatched)
with self.assertRaisesError(expected_error_message):
sess.run(bijector.inverse(x2), feed_dict=fd_mismatched)
# pylint: enable=invalid-name
def testOneShapePartiallySpecified(self):
expected_x = np.random.randn(4, 6)
expected_y = np.reshape(expected_x, [4, 2, 3])
with self.cached_session() as sess:
# one of input/output shapes is partially specified
shape_in, shape_out, feed_dict = self.build_shapes([-1,], [2, 3])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def testBothShapesPartiallySpecified(self):
expected_x = np.random.randn(4, 2, 3)
expected_y = np.reshape(expected_x, [4, 3, 2])
with self.cached_session() as sess:
shape_in, shape_out, feed_dict = self.build_shapes([-1, 3], [-1, 2])
bijector = Reshape(
event_shape_out=shape_out,
event_shape_in=shape_in,
validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def testDefaultVectorShape(self):
expected_x = np.random.randn(4, 4)
expected_y = np.reshape(expected_x, [4, 2, 2])
with self.cached_session() as sess:
_, shape_out, feed_dict = self.build_shapes([-1,], [-1, 2])
bijector = Reshape(shape_out,
validate_args=True)
(x_,
y_,
) = sess.run((
bijector.inverse(expected_y),
bijector.forward(expected_x),
), feed_dict=feed_dict)
self.assertAllClose(expected_y, y_, rtol=1e-6, atol=0)
self.assertAllClose(expected_x, x_, rtol=1e-6, atol=0)
def build_shapes(self, *args, **kwargs):
raise NotImplementedError("Subclass failed to implement `build_shapes`.")
class ReshapeBijectorTestStatic(test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
shape_in_static = shape_in
shape_out_static = shape_out
feed_dict = {}
return shape_in_static, shape_out_static, feed_dict
def assertRaisesError(self, msg):
return self.assertRaisesRegexp(Exception, msg)
def testEventShape(self):
shape_in_static = tensor_shape.TensorShape([2, 3])
shape_out_static = tensor_shape.TensorShape([6,])
bijector = Reshape(
event_shape_out=shape_out_static,
event_shape_in=shape_in_static, validate_args=True)
# test that forward_ and inverse_event_shape do sensible things
# when shapes are statically known.
self.assertEqual(
bijector.forward_event_shape(shape_in_static),
shape_out_static)
self.assertEqual(
bijector.inverse_event_shape(shape_out_static),
shape_in_static)
def testBijectiveAndFinite(self):
x = np.random.randn(4, 2, 3)
y = np.reshape(x, [4, 1, 2, 3])
with self.cached_session():
bijector = Reshape(
event_shape_in=[2, 3],
event_shape_out=[1, 2, 3],
validate_args=True)
assert_bijective_and_finite(
bijector, x, y, event_ndims=2, rtol=1e-6, atol=0)
def testInvalidDimensionsOpError(self):
self._testInvalidDimensionsOpError(
"Invalid value in tensor used for shape: -2")
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Cannot reshape a tensor with")
class ReshapeBijectorTestDynamic(test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
shape_in_ph = array_ops.placeholder(shape=(len(shape_in),),
dtype=dtypes.int32)
shape_out_ph = array_ops.placeholder(shape=(len(shape_out),),
dtype=dtypes.int32)
feed_dict = {shape_in_ph: shape_in, shape_out_ph: shape_out}
return shape_in_ph, shape_out_ph, feed_dict
def assertRaisesError(self, msg):
return self.assertRaisesOpError(msg)
def testInvalidDimensionsOpError(self):
self._testInvalidDimensionsOpError(
"elements must be either positive integers or `-1`.")
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Input to reshape is a tensor with")
class ReshapeBijectorTestDynamicNdims(test.TestCase, _ReshapeBijectorTest):
def build_shapes(self, shape_in, shape_out):
shape_in_ph = array_ops.placeholder(shape=None, dtype=dtypes.int32)
shape_out_ph = array_ops.placeholder(shape=None, dtype=dtypes.int32)
feed_dict = {shape_in_ph: shape_in, shape_out_ph: shape_out}
return shape_in_ph, shape_out_ph, feed_dict
def assertRaisesError(self, msg):
return self.assertRaisesOpError(msg)
def testInvalidDimensionsOpError(self):
self._testInvalidDimensionsOpError(
"elements must be either positive integers or `-1`.")
def testInputOutputMismatchOpError(self):
self._testInputOutputMismatchOpError("Input to reshape is a tensor with")
if __name__ == "__main__":
test.main()
|
aurelieladier/openturns
|
refs/heads/master
|
python/test/t_LaplaceFactory_std.py
|
4
|
#! /usr/bin/env python
from __future__ import print_function
from openturns import *
TESTPREAMBLE()
RandomGenerator.SetSeed(0)
try:
distribution = Laplace(2.5, -1.3)
size = 10000
sample = distribution.getSample(size)
factory = LaplaceFactory()
estimatedDistribution = factory.build(sample)
print("distribution=", repr(distribution))
print("Estimated distribution=", repr(estimatedDistribution))
estimatedDistribution = factory.build()
print("Default distribution=", estimatedDistribution)
estimatedDistribution = factory.build(
distribution.getParameter())
print("Distribution from parameters=", estimatedDistribution)
estimatedLaplace = factory.buildAsLaplace(sample)
print("Laplace =", distribution)
print("Estimated laplace=", estimatedLaplace)
estimatedLaplace = factory.buildAsLaplace()
print("Default laplace=", estimatedLaplace)
estimatedLaplace = factory.buildAsLaplace(
distribution.getParameter())
print("Laplace from parameters=", estimatedLaplace)
except:
import sys
print("t_LaplaceFactory_std.py", sys.exc_info()[0], sys.exc_info()[1])
|
christophercrouzet/bana
|
refs/heads/master
|
tests/OpenMaya/test_MPoint.py
|
2
|
#!/usr/bin/env mayapy
import os
import sys
import unittest
import maya.standalone
from maya import OpenMaya
_HERE = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(_HERE, *((os.pardir,) * 2))))
import bana
bana.initialize()
maya.standalone.initialize()
class MPointTest(unittest.TestCase):
def test__str__(self):
self.assertEqual(str(OpenMaya.MPoint()), "[0.0, 0.0, 0.0, 1.0]")
def testBnGet(self):
point = OpenMaya.MPoint(1.0, 2.0, 3.0, 4.0)
self.assertEqual(point.bnGet(), [1.0, 2.0, 3.0, 4.0])
if __name__ == '__main__':
from tests.run import run
run('__main__')
|
intervigilium/android_kernel_motorola_ghost
|
refs/heads/cm-12.1
|
tools/perf/scripts/python/net_dropmonitor.py
|
4235
|
# Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
|
alphonzo79/ShaveKeeper
|
refs/heads/master
|
ProductDataCompiler/src/main/com/rowley/shavekeeper/productdatacompiler/utils/__init__.py
|
12133432
| |
mmcdermo/helpinghand
|
refs/heads/master
|
server/venv/lib/python2.7/site-packages/django/conf/locale/lt/__init__.py
|
12133432
| |
valtech-mooc/edx-platform
|
refs/heads/master
|
common/djangoapps/course_modes/helpers.py
|
12133432
| |
pixelgremlins/ztruck
|
refs/heads/master
|
dj/lib/python2.7/site-packages/django/conf/locale/ml/__init__.py
|
12133432
| |
andlima/olheiro
|
refs/heads/master
|
olheiro.py
|
1
|
#!/usr/bin/python
import os
import mercado
ARQUIVO_MOD = 'olheiro.mod'
ARQUIVO_DAT = 'olheiro.dat'
GLPSOL_EXEC = 'glpsol -m %s -d %s' % (ARQUIVO_MOD, ARQUIVO_DAT)
LISTA_CONJUNTOS = ['S_Jogadores', 'S_Posicoes', 'S_Formacoes']
LISTA_PARAMETROS = ['P_Patrimonio', 'P_Preco', 'P_Media',
'Pe_Posicao', 'P_Quantidade']
DEFAULT = {'P_Patrimonio': 0.0, 'P_Preco': 0.0,
'P_Media': 0.0, 'P_Quantidade': 0.0}
FORMACOES = {
'343': {
'TEC': 1,
'GOL': 1,
'ZAG': 3,
'MEI': 4,
'ATA': 3,
},
'352': {
'TEC': 1,
'GOL': 1,
'ZAG': 3,
'MEI': 5,
'ATA': 2,
},
'433': {
'TEC': 1,
'GOL': 1,
'ZAG': 2,
'LAT': 2,
'MEI': 3,
'ATA': 3,
},
'442': {
'TEC': 1,
'GOL': 1,
'ZAG': 2,
'LAT': 2,
'MEI': 4,
'ATA': 2,
},
'451': {
'TEC': 1,
'GOL': 1,
'ZAG': 2,
'LAT': 2,
'MEI': 5,
'ATA': 1,
},
}
def gravaConjunto(arq, nome, conteudo):
'''Grava o conteudo de um conjunto no formato DAT.'''
_conteudo = ' '.join(map(lambda z: str(z), conteudo))
arq.write('set %s := %s;\n' % (nome, _conteudo))
def gravaParametro(arq, nome, conteudo, default_=None):
'''Grava o conteudo de um parametro no formato DAT;
o parametro que pode ou nao ter um valor
default (argumento 'default_').'''
arq.write('param %s ' % nome)
if default_ is not None:
arq.write('default %s ' % str(default_))
arq.write(':= \n')
for item in conteudo:
arq.write(' ')
for i in item:
arq.write('%s ' % str(i))
arq.write('\n')
arq.write(';\n')
def identify(j):
return ("'%s : %s'" % (j.apelido, j.clube.slug)).encode('iso-8859-1')
if __name__ == '__main__':
cenario = mercado.busca_mercado()
print ('Filtrando apenas jogadores com status provavel e '
'pelo menos tres jogos.')
cenario.jogadores = [j for j in cenario.jogadores
if (j.status_id == 7 and
j.jogos >= 3)]
data = {}
data['S_Jogadores'] = [identify(j) for j in cenario.jogadores]
data['S_Posicoes'] = list(set("'%s'" % j.posicao
for j in cenario.jogadores))
data['S_Formacoes'] = ["'%s'" % f for f in FORMACOES.keys()]
data['P_Patrimonio'] = [(raw_input('Patrimonio: '),)]
data['P_Preco'] = [(identify(j), j.preco) for j in cenario.jogadores]
data['P_Media'] = [(identify(j), j.media) for j in cenario.jogadores]
data['Pe_Posicao'] = [(identify(j), j.posicao) for j in cenario.jogadores]
data['P_Quantidade'] = [
("'%s'" % f, "'%s'" % p, FORMACOES[f][p])
for p in FORMACOES[f].keys()
for f in FORMACOES.keys()]
arq = open(ARQUIVO_DAT, 'w')
for conj in LISTA_CONJUNTOS:
gravaConjunto(arq, conj, data.get(conj, []))
for param in LISTA_PARAMETROS:
gravaParametro(arq, param, data.get(param, []),
DEFAULT.get(param))
arq.close()
output = os.popen(GLPSOL_EXEC)
skip = True
for linha in output:
if 'Model has been successfully processed' in linha:
break
if 'Resultado da otimizacao' in linha:
skip = False
if not skip:
print linha,
|
iambibhas/django
|
refs/heads/master
|
tests/urlpatterns_reverse/included_urls2.py
|
47
|
"""
These URL patterns are included in two different ways in the main urls.py, with
an extra argument present in one case. Thus, there are two different ways for
each name to resolve and Django must distinguish the possibilities based on the
argument list.
"""
from django.conf.urls import url
from .views import empty_view
urlpatterns = [
url(r'^part/(?P<value>\w+)/$', empty_view, name="part"),
url(r'^part2/(?:(?P<value>\w+)/)?$', empty_view, name="part2"),
]
|
shootsoft/practice
|
refs/heads/master
|
LeetCode/python/061-090/082-remove-duplicates-from-sorted-list-ii/solution.py
|
1
|
__author__ = 'yinjun'
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
# @param head, a ListNode
# @return a ListNode
def deleteDuplicates(self, head):
h = ListNode(0)
h.next = head
c = h
a = h.next
if a == None or a.next == None:
return h.next
b = a.next
while b != None:
find = a.val == b.val
while b!=None and a.val == b.val:
b = b.next
if find:
c.next = b
a = b
if b!=None:
b = b.next
else:
c = a
a = a.next
b = a.next
return h.next
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
|
Microsoft/PTVS
|
refs/heads/master
|
Python/Product/Miniconda/Miniconda3-x64/Lib/unittest/test/test_skipping.py
|
25
|
import unittest
from unittest.test.support import LoggingResult
class Test_TestSkipping(unittest.TestCase):
def test_skipping(self):
class Foo(unittest.TestCase):
def test_skip_me(self):
self.skipTest("skip")
events = []
result = LoggingResult(events)
test = Foo("test_skip_me")
test.run(result)
self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])
self.assertEqual(result.skipped, [(test, "skip")])
# Try letting setUp skip the test now.
class Foo(unittest.TestCase):
def setUp(self):
self.skipTest("testing")
def test_nothing(self): pass
events = []
result = LoggingResult(events)
test = Foo("test_nothing")
test.run(result)
self.assertEqual(events, ['startTest', 'addSkip', 'stopTest'])
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(result.testsRun, 1)
def test_skipping_subtests(self):
class Foo(unittest.TestCase):
def test_skip_me(self):
with self.subTest(a=1):
with self.subTest(b=2):
self.skipTest("skip 1")
self.skipTest("skip 2")
self.skipTest("skip 3")
events = []
result = LoggingResult(events)
test = Foo("test_skip_me")
test.run(result)
self.assertEqual(events, ['startTest', 'addSkip', 'addSkip',
'addSkip', 'stopTest'])
self.assertEqual(len(result.skipped), 3)
subtest, msg = result.skipped[0]
self.assertEqual(msg, "skip 1")
self.assertIsInstance(subtest, unittest.TestCase)
self.assertIsNot(subtest, test)
subtest, msg = result.skipped[1]
self.assertEqual(msg, "skip 2")
self.assertIsInstance(subtest, unittest.TestCase)
self.assertIsNot(subtest, test)
self.assertEqual(result.skipped[2], (test, "skip 3"))
def test_skipping_decorators(self):
op_table = ((unittest.skipUnless, False, True),
(unittest.skipIf, True, False))
for deco, do_skip, dont_skip in op_table:
class Foo(unittest.TestCase):
@deco(do_skip, "testing")
def test_skip(self): pass
@deco(dont_skip, "testing")
def test_dont_skip(self): pass
test_do_skip = Foo("test_skip")
test_dont_skip = Foo("test_dont_skip")
suite = unittest.TestSuite([test_do_skip, test_dont_skip])
events = []
result = LoggingResult(events)
suite.run(result)
self.assertEqual(len(result.skipped), 1)
expected = ['startTest', 'addSkip', 'stopTest',
'startTest', 'addSuccess', 'stopTest']
self.assertEqual(events, expected)
self.assertEqual(result.testsRun, 2)
self.assertEqual(result.skipped, [(test_do_skip, "testing")])
self.assertTrue(result.wasSuccessful())
def test_skip_class(self):
@unittest.skip("testing")
class Foo(unittest.TestCase):
def test_1(self):
record.append(1)
record = []
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(record, [])
def test_skip_non_unittest_class(self):
@unittest.skip("testing")
class Mixin:
def test_1(self):
record.append(1)
class Foo(Mixin, unittest.TestCase):
pass
record = []
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
self.assertEqual(record, [])
def test_expected_failure(self):
class Foo(unittest.TestCase):
@unittest.expectedFailure
def test_die(self):
self.fail("help me!")
events = []
result = LoggingResult(events)
test = Foo("test_die")
test.run(result)
self.assertEqual(events,
['startTest', 'addExpectedFailure', 'stopTest'])
self.assertEqual(result.expectedFailures[0][0], test)
self.assertTrue(result.wasSuccessful())
def test_expected_failure_with_wrapped_class(self):
@unittest.expectedFailure
class Foo(unittest.TestCase):
def test_1(self):
self.assertTrue(False)
events = []
result = LoggingResult(events)
test = Foo("test_1")
test.run(result)
self.assertEqual(events,
['startTest', 'addExpectedFailure', 'stopTest'])
self.assertEqual(result.expectedFailures[0][0], test)
self.assertTrue(result.wasSuccessful())
def test_expected_failure_with_wrapped_subclass(self):
class Foo(unittest.TestCase):
def test_1(self):
self.assertTrue(False)
@unittest.expectedFailure
class Bar(Foo):
pass
events = []
result = LoggingResult(events)
test = Bar("test_1")
test.run(result)
self.assertEqual(events,
['startTest', 'addExpectedFailure', 'stopTest'])
self.assertEqual(result.expectedFailures[0][0], test)
self.assertTrue(result.wasSuccessful())
def test_expected_failure_subtests(self):
# A failure in any subtest counts as the expected failure of the
# whole test.
class Foo(unittest.TestCase):
@unittest.expectedFailure
def test_die(self):
with self.subTest():
# This one succeeds
pass
with self.subTest():
self.fail("help me!")
with self.subTest():
# This one doesn't get executed
self.fail("shouldn't come here")
events = []
result = LoggingResult(events)
test = Foo("test_die")
test.run(result)
self.assertEqual(events,
['startTest', 'addSubTestSuccess',
'addExpectedFailure', 'stopTest'])
self.assertEqual(len(result.expectedFailures), 1)
self.assertIs(result.expectedFailures[0][0], test)
self.assertTrue(result.wasSuccessful())
def test_unexpected_success(self):
class Foo(unittest.TestCase):
@unittest.expectedFailure
def test_die(self):
pass
events = []
result = LoggingResult(events)
test = Foo("test_die")
test.run(result)
self.assertEqual(events,
['startTest', 'addUnexpectedSuccess', 'stopTest'])
self.assertFalse(result.failures)
self.assertEqual(result.unexpectedSuccesses, [test])
self.assertFalse(result.wasSuccessful())
def test_unexpected_success_subtests(self):
# Success in all subtests counts as the unexpected success of
# the whole test.
class Foo(unittest.TestCase):
@unittest.expectedFailure
def test_die(self):
with self.subTest():
# This one succeeds
pass
with self.subTest():
# So does this one
pass
events = []
result = LoggingResult(events)
test = Foo("test_die")
test.run(result)
self.assertEqual(events,
['startTest',
'addSubTestSuccess', 'addSubTestSuccess',
'addUnexpectedSuccess', 'stopTest'])
self.assertFalse(result.failures)
self.assertEqual(result.unexpectedSuccesses, [test])
self.assertFalse(result.wasSuccessful())
def test_skip_doesnt_run_setup(self):
class Foo(unittest.TestCase):
wasSetUp = False
wasTornDown = False
def setUp(self):
Foo.wasSetUp = True
def tornDown(self):
Foo.wasTornDown = True
@unittest.skip('testing')
def test_1(self):
pass
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
self.assertFalse(Foo.wasSetUp)
self.assertFalse(Foo.wasTornDown)
def test_decorated_skip(self):
def decorator(func):
def inner(*a):
return func(*a)
return inner
class Foo(unittest.TestCase):
@decorator
@unittest.skip('testing')
def test_1(self):
pass
result = unittest.TestResult()
test = Foo("test_1")
suite = unittest.TestSuite([test])
suite.run(result)
self.assertEqual(result.skipped, [(test, "testing")])
if __name__ == "__main__":
unittest.main()
|
wevote/WeVoteServer
|
refs/heads/develop
|
import_export_wikipedia/views.py
|
2
|
# import_export_wikipedia/views.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
# See also import_export_wikipedia/views_admin.py for views used in the admin area
|
sjperkins/tensorflow
|
refs/heads/master
|
tensorflow/python/debug/examples/debug_fibonacci.py
|
58
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Demo of the tfdbg curses UI: A TF network computing Fibonacci sequence."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python import debug as tf_debug
FLAGS = None
def main(_):
sess = tf.Session()
# Construct the TensorFlow network.
n0 = tf.Variable(
np.ones([FLAGS.tensor_size] * 2), dtype=tf.int32, name="node_00")
n1 = tf.Variable(
np.ones([FLAGS.tensor_size] * 2), dtype=tf.int32, name="node_01")
for i in xrange(2, FLAGS.length):
n0, n1 = n1, tf.add(n0, n1, name="node_%.2d" % i)
sess.run(tf.global_variables_initializer())
# Wrap the TensorFlow Session object for debugging.
if FLAGS.debug:
sess = tf_debug.LocalCLIDebugWrapperSession(sess)
def has_negative(_, tensor):
return np.any(tensor < 0)
sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
sess.add_tensor_filter("has_negative", has_negative)
print("Fibonacci number at position %d: %d" %
(FLAGS.length, int(sess.run(n1))))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.register("type", "bool", lambda v: v.lower() == "true")
parser.add_argument(
"--tensor_size",
type=int,
default=1,
help="""\
Size of tensor. E.g., if the value is 30, the tensors will have shape
[30, 30].\
""")
parser.add_argument(
"--length",
type=int,
default=20,
help="Length of the fibonacci sequence to compute.")
parser.add_argument(
"--ui_type",
type=str,
default="curses",
help="Command-line user interface type (curses | readline)")
parser.add_argument(
"--debug",
dest="debug",
action="store_true",
help="Use TensorFlow Debugger (tfdbg).")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
mpavone/vCloudPy
|
refs/heads/master
|
src/vCloud.py
|
1
|
# vCloudPy: VMWare vCloud Automation for Python Devops
# Copyright (c) 2014 Martino Pavone. All Rights Reserved.
#
# Licensed under the MIT License , (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://opensource.org/licenses/MIT
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import requests
import time
import xml.etree.ElementTree as ET
class vCloud(object):
def __init__(self):
self.login = None
self.headers = None
self.endpoint = None
self.org_href = None
self.status = {'code':0, 'description':'ok', 'xml':'', 'request':None}
def __extract_info__(self, content, select=None):
ret = {}
root = ET.fromstring(content)
if select == None :
for k,v in root.attrib.iteritems():
ret[k] = str(v)
else :
for k in select :
if root.attrib.has_key(k) : ret[k] = root.attrib[k]
ret['xml'] = content
return ret
def __extract_info_for_type__(self, content, type, filter=None, select=None):
ret = []
root = ET.fromstring(content)
for child in root:
if child.attrib.has_key('type') :
if child.attrib['type'] == type :
include = True
if filter is not None :
for f in filter:
if child.attrib.has_key(f) :
if child.attrib[f] != filter[f] :
include = False
break
else :
include = False
break
if include :
retItem = {};
if select is None :
for k,v in child.attrib.iteritems():
retItem[k] = str(v)
else :
for k in select :
if child.attrib.has_key(k) : retItem[k] = child.attrib[k]
ret.append(retItem)
if len(ret) == 1 :
ret = ret[0]
elif len(ret) == 0 :
ret = {}
return ret
def __extract_info_for_element__(self, content, element, subelement=None, filter=None, select=None):
ret = []
root = ET.fromstring(content)
for child in root:
if child.tag == element:
if subelement == None :
include = True
if filter is not None :
for f in filter:
if child.attrib.has_key(f) :
if child.attrib[f] != filter[f] :
include = False
break
else :
include = False
break
if include :
retItem = {};
if select is None :
for k,v in child.attrib.iteritems():
retItem[k] = str(v)
else :
for k in select :
if child.attrib.has_key(k) : retItem[k] = child.attrib[k]
ret.append(retItem)
else :
for subchild in child :
include = True
if filter is not None :
for f in filter:
if subchild.attrib.has_key(f) :
if subchild.attrib[f] != filter[f] :
include = False
break
else :
include = False
break
if include :
retItem = {};
if select is None :
for k,v in subchild.attrib.iteritems():
retItem[k] = str(v)
else :
for k in select :
if subchild.attrib.has_key(k) : retItem[k] = subchild.attrib[k]
ret.append(retItem)
if len(ret) == 1 :
ret = ret[0]
elif len(ret) == 0 :
ret = {}
return ret
def connect(self, org, username, password, endpoint):
'Connect to vCloud server'
self.endpoint = endpoint
self.login = {'Accept':'application/*+xml;version=5.1', \
'Authorization':'Basic '+ base64.b64encode(username + "@" + org + ":" + password)}
p = requests.post(self.endpoint + 'sessions', headers = self.login)
self.headers = {'Accept':'application/*+xml;version=5.1'}
for k,v in p.headers.iteritems():
if k == "x-vcloud-authorization" : self.headers[k]=v
self.get_org()
def is_connected(self):
'Check if the connection to vCloud is estabilished'
is_connected = (self.headers != None)
return is_connected;
def disconnect(self):
self.login = None
self.headers = None
self.endpoint = None
self.org_href = None
#TODO: logout from vCloud
def __set_status__(self, code, description, xml=None, request=None) :
self.status['code']=code
self.status['description']=description
self.status['xml']=xml
req = {}
req['url']=request.url
req['method']=request.method
req['headers']=request.headers
req['body']=request.body
self.status['request']=req
def get_request (self, href) :
is_ok = True
if not(self.is_connected()) :
self.__set_status__(code=-1, description='Not Connected')
result = None
if is_ok :
result = requests.get(href, data=None, headers = self.headers)
#TODO: the description must be taken form the attribute Message
self.__set_status__ (result.status_code, result.reason, xml=result.content, request=result.request )
is_ok = result.ok
if not(is_ok) : result = None
return result
def post_request (self, href, data=None, headers=None) :
is_ok = True
if not(self.is_connected()) :
self.__set_status__(code=-1, description='Not Connected')
is_ok = False
if is_ok :
if headers == None :
post_headers = self.headers
else :
post_headers = dict(headers.items() + self.headers.items())
result = requests.post(href, data=data, headers=post_headers)
#TODO: the description must be taken form the attribute Message
self.__set_status__ (result.status_code, result.reason, xml=result.content, request=result.request )
is_ok = result.ok
if not(is_ok) : result = None
return result
def delete_request (self, href) :
is_ok = True
if not(self.is_connected()) :
self.__set_status__(code=-1, description='Not Connected')
result = None
if is_ok :
result = requests.delete(href, data=None, headers = self.headers)
#TODO: the description must be taken form the attribute Message
self.__set_status__ (result.status_code, result.reason, xml=result.content, request=result.request )
is_ok = result.ok
if not(is_ok) : result = None
return result
def get_status(self):
return self.status
#TODO: multi-organization
def get_org(self):
is_ok = True
g= self.get_request(self.endpoint + 'org')
is_ok = not(g==None)
result = {}
if is_ok :
root = ET.fromstring(g.content)
for child in root:
for k,v in child.attrib.iteritems():
result[k] = str(v)
self.org_href = result["href"]
g= self.get_request(self.org_href)
is_ok = not(g==None)
if is_ok :
root = ET.fromstring(g.content)
for child in root:
if child.tag == '{http://www.vmware.com/vcloud/v1.5}FullName':
result['FullName'] = child.text
if not(is_ok) : result = None
return result
def get_vdc(self, filter=None, select=None ):
is_ok = True
g= self.get_request(self.org_href)
is_ok = not(g==None)
if is_ok :
result = self.__extract_info_for_type__(g.content, 'application/vnd.vmware.vcloud.vdc+xml', filter, select)
if not(is_ok) : result = None
return result
def get_catalog(self, filter=None, select=None):
is_ok = True
g= self.get_request(self.org_href)
is_ok = not(g==None)
if is_ok :
result = self.__extract_info_for_type__(g.content, 'application/vnd.vmware.vcloud.catalog+xml', filter, select)
if not(is_ok) : result = None
return result
def get_catalogItem(self, catalog_href, filter=None, select=None):
is_ok = True
g= self.get_request(catalog_href)
is_ok = not(g==None)
if is_ok :
result = self.__extract_info_for_element__(g.content, '{http://www.vmware.com/vcloud/v1.5}CatalogItems', '{http://www.vmware.com/vcloud/v1.5}CatalogItem', filter=filter, select=select )
if not(is_ok) : result = None
return result
def get_storageProfiles(self, vdc_href, filter=None, select=None):
is_ok = True
g= self.get_request(vdc_href)
is_ok = not(g==None)
if is_ok :
result = self.__extract_info_for_element__(g.content, '{http://www.vmware.com/vcloud/v1.5}VdcStorageProfiles', '{http://www.vmware.com/vcloud/v1.5}VdcStorageProfile', filter=filter, select=select )
if not(is_ok) : result = None
return result
def get_network(self, filter=None):
is_ok = True
g= self.get_request(self.org_href)
is_ok = not(g==None)
if is_ok :
result = self.__extract_info_for_type__(g.content, 'application/vnd.vmware.vcloud.orgNetwork+xml', filter)
if not(is_ok) : result = None
return result
def get_vapp(self, filter=None, select=None):
is_ok = True
g= self.get_request(self.endpoint + 'vApps/query')
is_ok = not(g==None)
if is_ok :
result = self.__extract_info_for_element__(g.content, '{http://www.vmware.com/vcloud/v1.5}VAppRecord', filter=filter, select=select )
if not(is_ok) : result = None
return result
def get_vapp_templates(self, filter=None, select=None):
is_ok = True
g= self.get_request(self.endpoint + '/vAppTemplates/query')
is_ok = not(g==None)
if is_ok :
result = self.__extract_info_for_element__(g.content, '{http://www.vmware.com/vcloud/v1.5}VAppTemplateRecord', filter=filter, select=select )
if not(is_ok) : result = None
return result
def get_vm(self, filter=None, select=None):
is_ok = True
g= self.get_request(self.endpoint + 'vms/query')
is_ok = not(g==None)
if filter == None :
filter = {}
filter['isVAppTemplate']='false'
if is_ok :
result = self.__extract_info_for_element__(g.content, '{http://www.vmware.com/vcloud/v1.5}VMRecord', filter=filter, select=select )
if not(is_ok) : result = None
return result
def delete_vapp(self, vm_href):
is_ok = True
delete= self.delete_request(vm_href)
is_ok = not(delete==None)
if is_ok :
select = ['name', 'id', 'href']
result = self.__extract_info__(delete.text, select=select)
if not(is_ok) : result = None
return result
def undeploy_vapp(self, vapp_href):
is_ok = True
post_headers={}
post_headers['Content-Type']='application/vnd.vmware.vcloud.undeployVAppParams+xml'
xml = """<?xml version="1.0" encoding="UTF-8"?>
<UndeployVAppParams
xmlns="http://www.vmware.com/vcloud/v1.5">
<UndeployPowerAction>powerOff</UndeployPowerAction>
</UndeployVAppParams>"""
post = self.post_request (vapp_href + '/action/undeploy', data=xml, headers=post_headers)
is_ok = not(post==None)
if is_ok :
select = ['name', 'id', 'href']
result = self.__extract_info__(post.text, select=select)
if not(is_ok) : result = None
return result
#TODO: method __recompose_vapp_to_remove__
def __recompose_vapp_to_add__(self, vapp_href, item_href, description, source_delete=False ):
is_ok = True
post_headers={}
post_headers['Content-Type']='application/vnd.vmware.vcloud.recomposeVAppParams+xml'
if (source_delete) :
source_delete_text = "true"
else :
source_delete_text = "false"
xml = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<RecomposeVAppParams
xmlns="http://www.vmware.com/vcloud/v1.5"
xmlns:ns2="http://schemas.dmtf.org/ovf/envelope/1"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
xmlns:environment_1="http://schemas.dmtf.org/ovf/environment/1">
<Description> "%s" </Description>
<SourcedItem sourceDelete="%s">
<Source href="%s"/>
</SourcedItem>
<AllEULAsAccepted>true</AllEULAsAccepted>
</RecomposeVAppParams>""" % (description, source_delete_text, item_href)
post = self.post_request (vapp_href + '/action/recomposeVApp', data=xml, headers=post_headers)
is_ok = not(post==None)
if is_ok :
select = ['name', 'id', 'href']
result = self.__extract_info__(post.text, select=select)
if not(is_ok) : result = None
return result
def copy_vm(self, vapp_href, vm_href, description):
return self.__recompose_vapp_to_add__(vapp_href, vm_href, description, False )
def copy_vapp(self, vapp_dest_href, vapp_source_href, description):
return self.__recompose_vapp_to_add__(vapp_dest_href, vapp_source_href, description, False )
def move_vm(self, vapp_href, vm_href, description):
return self.__recompose_vapp_to_add__(vapp_href, vm_href, description, True )
def move_vapp(self, vapp_dest_href, vapp_source_href, description):
return self.__recompose_vapp_to_add__(vapp_dest_href, vapp_source_href, description, True )
def new_vapp_from_template(self, vdc_href, vapp_template_href, new_vapp_name, new_vapp_description ='' ):
is_ok = True
post_headers={}
post_headers['Content-Type']='application/vnd.vmware.vcloud.instantiateVAppTemplateParams+xml'
xml = """<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<InstantiateVAppTemplateParams
xmlns="http://www.vmware.com/vcloud/v1.5"
xmlns:ovf="http://schemas.dmtf.org/ovf/envelope/1"
name="%s"
deploy="true"
powerOn="true">
<Description>%s</Description>
<Source href="%s"/>
</InstantiateVAppTemplateParams>""" % (new_vapp_name, new_vapp_description, vapp_template_href)
post = self.post_request(vdc_href + '/action/instantiateVAppTemplate', data=xml, headers=post_headers)
is_ok = not(post==None)
if is_ok :
select = ['name', 'id', 'href']
result = self.__extract_info_for_element__(post.text, '{http://www.vmware.com/vcloud/v1.5}Tasks', '{http://www.vmware.com/vcloud/v1.5}Task', select=select )
if not(is_ok) : result = None
return result
def get_task(self, task_id):
is_ok = True
get= self.get_request(self.org_href)
is_ok = not(get==None)
if is_ok :
tasks = self.__extract_info_for_type__(get.content, 'application/vnd.vmware.vcloud.tasksList+xml')
is_ok = not(tasks==None)
if is_ok :
if type(tasks) == list :
for task in tasks :
get_task = self.get_request(task['href'])
select = ['name', 'id', 'href']
filter ={'id':task_id}
result = self.__extract_info_for_element__(get_task.content, '{http://www.vmware.com/vcloud/v1.5}Task', filter=filter, select=select )
if result != {} : break
else :
get_task = self.get_request(tasks['href'])
is_ok = not(get_task==None)
if is_ok :
select = ['name', 'id', 'href']
filter ={'id':task_id}
result = self.__extract_info_for_element__(get_task.content, '{http://www.vmware.com/vcloud/v1.5}Task', filter=filter, select=select )
if not(is_ok) : result = None
return result
def wait_task(self, task_href, print_progress=False):
is_ok = True
x=0
while (x == 0):
time.sleep(5)
get = self.get_request(task_href)
is_ok = not(get==None)
if is_ok :
root = ET.fromstring(get.content)
found_progress = False
for child in root.iter():
if child.tag == "{http://www.vmware.com/vcloud/v1.5}Progress" :
if print_progress : print("Progress:"+ child.text)
if child.text == '100' : x = 1
found_progress = True
if not(found_progress) : x=1
if is_ok :
time.sleep(5)
result = True
if not(is_ok) : result = None
return result
def stop_vm(self, href):
post = self.post_request(href + '/power/action/powerOff')
select = ['name', 'id', 'href']
result = self.__extract_info_for_element__(post.text, '{http://www.vmware.com/vcloud/v1.5}Tasks', '{http://www.vmware.com/vcloud/v1.5}Task', select=select )
return result
def reboot_vm(self, href):
post = self.post_request(href + '/power/action/reboot')
select = ['name', 'id', 'href']
result = self.__extract_info_for_element__(post.text, '{http://www.vmware.com/vcloud/v1.5}Tasks', '{http://www.vmware.com/vcloud/v1.5}Task', select=select )
return result
def reset_vm(self, href):
post = self.post_request(href + '/power/action/reset')
select = ['name', 'id', 'href']
result = self.__extract_info_for_element__(post.text, '{http://www.vmware.com/vcloud/v1.5}Tasks', '{http://www.vmware.com/vcloud/v1.5}Task', select=select )
return result
def shutdown_vm(self, href):
post = self.post_request(href + '/power/action/shutdown')
select = ['name', 'id', 'href']
result = self.__extract_info_for_element__(post.text, '{http://www.vmware.com/vcloud/v1.5}Tasks', '{http://www.vmware.com/vcloud/v1.5}Task', select=select )
return result
def suspend_vm(self, href):
post = self.post_request(href + '/power/action/suspend')
select = ['name', 'id', 'href']
result = self.__extract_info_for_element__(post.text, '{http://www.vmware.com/vcloud/v1.5}Tasks', '{http://www.vmware.com/vcloud/v1.5}Task', select=select )
return result
def start_vapp(self, href):
post = self.post_request(href + '/power/action/powerOn')
select = ['name', 'id', 'href']
result = self.__extract_info_for_element__(post.text, '{http://www.vmware.com/vcloud/v1.5}Tasks', '{http://www.vmware.com/vcloud/v1.5}Task', select=select )
return result
def stop_vapp(self, href):
post = self.post_request(href + '/power/action/powerOff')
select = ['name', 'id', 'href']
result = self.__extract_info_for_element__(post.text, '{http://www.vmware.com/vcloud/v1.5}Tasks', '{http://www.vmware.com/vcloud/v1.5}Task', select=select )
return result
def reboot_vapp(self, href):
post = self.post_request(href + '/power/action/reboot')
select = ['name', 'id', 'href']
result = self.__extract_info_for_element__(post.text, '{http://www.vmware.com/vcloud/v1.5}Tasks', '{http://www.vmware.com/vcloud/v1.5}Task', select=select )
return result
def reset_vapp(self, href):
post = self.post_request(href + '/power/action/reset')
select = ['name', 'id', 'href']
result = self.__extract_info_for_element__(post.text, '{http://www.vmware.com/vcloud/v1.5}Tasks', '{http://www.vmware.com/vcloud/v1.5}Task', select=select )
return result
def shutdown_vapp(self, href):
post = self.post_request(href + '/power/action/shutdown')
select = ['name', 'id', 'href']
result = self.__extract_info_for_element__(post.text, '{http://www.vmware.com/vcloud/v1.5}Tasks', '{http://www.vmware.com/vcloud/v1.5}Task', select=select )
return result
def suspend_vapp(self, href):
post = self.post_request(href + '/power/action/suspend')
select = ['name', 'id', 'href']
result = self.__extract_info_for_element__(post.text, '{http://www.vmware.com/vcloud/v1.5}Tasks', '{http://www.vmware.com/vcloud/v1.5}Task', select=select )
return result
#TODO: method create_snapshot
#TODO: method revert_snapshot
def get_item(self, href):
is_ok = True
g= self.get_request(href)
is_ok = not(g==None)
if is_ok :
result = self.__extract_info__(g.content)
if not(is_ok) : result = None
return result
def print_item(self, item, select=None, table=False):
if item == None : return
cols={}
if select is None :
if type(item) is list :
cols = item[0].keys()
else :
cols = item.keys()
else :
cols = select
f = ''
if table :
for k in cols :
f = f + '{0[' + k +']:15} '
n = dict(zip(cols, cols))
print (f.format(n))
if type(item) is list :
for i in item :
f=''
for k in cols :
if i.has_key(k) :
if table :
f = f + '{0[' + k +']:15} '
else :
f = f + k + ': {0[' + k +']:15} \n'
else :
if table :
f = f + ' '
else :
f = f + k + ': '
print (f.format(i))
else :
for k in cols :
if item.has_key(k) :
f = k + ': {0[' + k +']:15} '
print (f.format(item))
else :
f = ' '
|
lrowe/selenium
|
refs/heads/master
|
py/selenium/webdriver/common/by.py
|
69
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The By implementation.
"""
class By(object):
"""
Set of supported locator strategies.
"""
ID = "id"
XPATH = "xpath"
LINK_TEXT = "link text"
PARTIAL_LINK_TEXT = "partial link text"
NAME = "name"
TAG_NAME = "tag name"
CLASS_NAME = "class name"
CSS_SELECTOR = "css selector"
@classmethod
def is_valid(cls, by):
for attr in dir(cls):
if by == getattr(cls, attr):
return True
return False
|
zhouyejoe/spark
|
refs/heads/master
|
dev/sparktestsupport/shellutils.py
|
42
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
import subprocess
import sys
subprocess_check_output = subprocess.check_output
subprocess_check_call = subprocess.check_call
def exit_from_command_with_retcode(cmd, retcode):
if retcode < 0:
print("[error] running", ' '.join(cmd), "; process was terminated by signal", -retcode)
else:
print("[error] running", ' '.join(cmd), "; received return code", retcode)
sys.exit(int(os.environ.get("CURRENT_BLOCK", 255)))
def rm_r(path):
"""
Given an arbitrary path, properly remove it with the correct Python construct if it exists.
From: http://stackoverflow.com/a/9559881
"""
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.exists(path):
os.remove(path)
def run_cmd(cmd, return_output=False):
"""
Given a command as a list of arguments will attempt to execute the command
and, on failure, print an error message and exit.
"""
if not isinstance(cmd, list):
cmd = cmd.split()
try:
if return_output:
return subprocess_check_output(cmd)
else:
return subprocess_check_call(cmd)
except subprocess.CalledProcessError as e:
exit_from_command_with_retcode(e.cmd, e.returncode)
def is_exe(path):
"""
Check if a given path is an executable file.
From: http://stackoverflow.com/a/377028
"""
return os.path.isfile(path) and os.access(path, os.X_OK)
def which(program):
"""
Find and return the given program by its absolute path or 'None' if the program cannot be found.
From: http://stackoverflow.com/a/377028
"""
fpath = os.path.split(program)[0]
if fpath:
if is_exe(program):
return program
else:
for path in os.environ.get("PATH").split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
|
oinopion/django
|
refs/heads/master
|
tests/sites_tests/__init__.py
|
12133432
| |
tdsymonds/djangocms-flexslider
|
refs/heads/master
|
djangocms_flexslider/__init__.py
|
12133432
| |
Vixionar/django
|
refs/heads/master
|
tests/migrations/migrations_test_apps/migrated_app/migrations/__init__.py
|
12133432
| |
indro/t2c
|
refs/heads/master
|
apps/external_apps/ajax_validation/urls.py
|
12133432
| |
pwz3n0/buck
|
refs/heads/master
|
third-party/py/twitter-commons/src/python/twitter/common/python/testing.py
|
23
|
import contextlib
import os
import random
import tempfile
from textwrap import dedent
import zipfile
from .common import safe_mkdir, safe_mkdtemp, safe_rmtree
from .installer import EggInstaller
from .util import DistributionHelper
@contextlib.contextmanager
def temporary_dir():
td = tempfile.mkdtemp()
try:
yield td
finally:
safe_rmtree(td)
@contextlib.contextmanager
def create_layout(*filelist):
with temporary_dir() as td:
for fl in filelist:
for fn in fl:
with open(os.path.join(td, fn), 'w') as fp:
fp.write('junk')
yield td
def random_bytes(length):
return ''.join(
map(chr, (random.randint(ord('a'), ord('z')) for _ in range(length)))).encode('utf-8')
@contextlib.contextmanager
def temporary_content(content_map, interp=None, seed=31337):
"""Write content to disk where content is map from string => (int, string).
If target is int, write int random bytes. Otherwise write contents of string."""
random.seed(seed)
interp = interp or {}
with temporary_dir() as td:
for filename, size_or_content in content_map.items():
safe_mkdir(os.path.dirname(os.path.join(td, filename)))
with open(os.path.join(td, filename), 'wb') as fp:
if isinstance(size_or_content, int):
fp.write(random_bytes(size_or_content))
else:
fp.write((size_or_content % interp).encode('utf-8'))
yield td
def yield_files(directory):
for root, _, files in os.walk(directory):
for f in files:
filename = os.path.join(root, f)
rel_filename = os.path.relpath(filename, directory)
yield filename, rel_filename
def write_zipfile(directory, dest, reverse=False):
with contextlib.closing(zipfile.ZipFile(dest, 'w')) as zf:
for filename, rel_filename in sorted(yield_files(directory), reverse=reverse):
zf.write(filename, arcname=rel_filename)
return dest
PROJECT_CONTENT = {
'setup.py': dedent('''
from setuptools import setup
setup(
name=%(project_name)r,
version='0.0.0',
zip_safe=%(zip_safe)r,
packages=['my_package'],
package_data={'my_package': ['package_data/*.dat']},
)
'''),
'my_package/__init__.py': 0,
'my_package/my_module.py': 'def do_something():\n print("hello world!")\n',
'my_package/package_data/resource1.dat': 1000,
'my_package/package_data/resource2.dat': 1000,
}
@contextlib.contextmanager
def make_distribution(name='my_project', installer_impl=EggInstaller, zipped=False, zip_safe=True):
interp = {'project_name': name, 'zip_safe': zip_safe}
with temporary_content(PROJECT_CONTENT, interp=interp) as td:
installer = installer_impl(td)
dist_location = installer.bdist()
if zipped:
yield DistributionHelper.distribution_from_path(dist_location)
else:
with temporary_dir() as td:
extract_path = os.path.join(td, os.path.basename(dist_location))
with contextlib.closing(zipfile.ZipFile(dist_location)) as zf:
zf.extractall(extract_path)
yield DistributionHelper.distribution_from_path(extract_path)
|
ramosian-glider/kasan
|
refs/heads/kasan_slab_slub
|
scripts/gdb/linux/__init__.py
|
2010
|
# nothing to do for the initialization of this package
|
RedHatQE/cfme_tests
|
refs/heads/master
|
cfme/fixtures/rdb.py
|
1
|
"""Rdb: Remote debugger
Given the following configuration in conf/rdb.yaml::
breakpoints:
- subject: Brief explanation of a problem
exceptions:
- cfme.exceptions.ImportableExampleException
- BuiltinException (e.g. ValueError)
recipients:
- user@example.com
Any time an exception listed in a breakpoint's "exceptions" list is raised in :py:func:`rdb_catch`
context in the course of a test run, a remote debugger will be started on a random port, and the
users listed in "recipients" will be emailed instructions to access the remote debugger via telnet.
The exceptions will be imported, so their fully-qualified importable path is required.
Exceptions without a module path are assumed to be builtins.
An Rdb instance can be used just like a :py:class:`Pdb <python:Pdb>` instance.
Additionally, a signal handler has been set up to allow for triggering Rdb during a test run. To
invoke it, ``kill -USR1`` a test-running process and Rdb will start up. No emails are sent when
operating in this mode, so check the py.test console for the endpoint address.
By default, Rdb assumes that there is a working MTA available on localhost, but this can
be configured in ``conf['env']['smtp']['server']``.
Note:
This is very insecure, and should be used as a last resort for debugging elusive failures.
"""
from __future__ import print_function
import os
import signal
import smtplib
import socket
import sys
from contextlib import contextmanager
from email.mime.text import MIMEText
from importlib import import_module
from pdb import Pdb
from textwrap import dedent
from cfme.fixtures.pytest_store import store
from cfme.fixtures.pytest_store import write_line
from cfme.utils import conf
from cfme.utils.log import logger
_breakpoint_exceptions = {}
# defaults
smtp_conf = {
'server': '127.0.0.1'
}
# Update defaults from conf
smtp_conf.update(conf.env.get('smtp', {}))
for breakpoint in (conf.rdb.get('breakpoints') or []):
for i, exc_name in enumerate(breakpoint['exceptions']):
split_exc = exc_name.rsplit('.', 1)
if len(split_exc) == 1:
# If no module is given to import from, assume builtin
split_exc = ['__builtin__', exc_name]
exc = getattr(import_module(split_exc[0]), split_exc[1])
# stash exceptions for easy matching in exception handlers
_breakpoint_exceptions[exc] = breakpoint
def rdb_handle_signal(signal, frame):
# only registered for USR1, no need to inspect the signal,
# just hand the frame off to Rdb
Rdb('Debugger started on user signal').set_trace(frame)
signal.signal(signal.SIGUSR1, rdb_handle_signal)
# XXX: Pdb (and its bases) are old-style classobjs, so don't use super
class Rdb(Pdb):
"""Remote Debugger
When set_trace is called, it will open a socket on a random unprivileged port connected to a
Pdb debugging session. This session can be accessed via telnet, and will end when "continue"
is called in the Pdb session.
"""
def __init__(self, prompt_msg=''):
self._prompt_msg = str(prompt_msg)
self._stdout = sys.stdout
self._stdin = sys.stdin
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind to random port
self.sock.bind(('0.0.0.0', 0))
def do_continue(self, arg):
sys.stdout = self._stdout
sys.stdin = self._stdin
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
self.set_continue()
return 1
do_c = do_cont = do_continue
def interaction(self, *args, **kwargs):
print(self._prompt_msg, stream=self.stdout)
Pdb.interaction(self, *args, **kwargs)
def set_trace(self, *args, **kwargs):
"""Start a pdb debugger available via telnet, and optionally email people the endpoint
The endpoint will always be seen in the py.test runner output.
Keyword Args:
recipients: A list where, if set, an email will be sent to email addresses
in this list.
subject: If set, an optional custom email subject
"""
host, port = self.sock.getsockname()
endpoint = 'host {} port {}'.format(store.my_ip_address, port)
recipients = kwargs.pop('recipients', None)
if recipients:
# write and send an email
subject = kwargs.pop('subject', 'RDB Breakpoint: Manually invoked')
body = dedent("""\
A py.test run encountered an error. The remote debugger is running
on {} (TCP), waiting for telnet connection.
""").format(endpoint)
try:
smtp_server = smtp_conf['server']
smtp = smtplib.SMTP(smtp_server)
msg = MIMEText(body)
msg['Subject'] = subject
msg['To'] = ', '.join(recipients)
smtp.sendmail('rdb-breakpoint@example.com', recipients, msg.as_string())
except socket.error:
logger.critical("Couldn't send email")
msg = 'Remote debugger listening on {}'.format(endpoint)
logger.critical(msg)
write_line(msg, red=True, bold=True)
self.sock.listen(1)
(client_socket, address) = self.sock.accept()
client_fh = client_socket.makefile('rw')
Pdb.__init__(self, completekey='tab', stdin=client_fh, stdout=client_fh)
sys.stdout = sys.stdin = client_fh
Pdb.set_trace(self, *args, **kwargs)
msg = 'Debugger on {} shut down'.format(endpoint)
logger.critical(msg)
write_line(msg, green=True, bold=True)
def send_breakpoint_email(exctype, msg=''):
job_name = os.environ.get('JOB_NAME', 'Non-jenkins')
breakpoint = _breakpoint_exceptions[exctype]
subject = 'RDB Breakpoint: {} {}'.format(job_name, breakpoint['subject'])
rdb = Rdb(msg)
rdb.set_trace(subject=subject, recipients=breakpoint['recipients'])
def pytest_internalerror(excrepr, excinfo):
if excinfo.type in _breakpoint_exceptions:
msg = "A py.test internal error has triggered RDB:\n"
msg += str(excrepr)
send_breakpoint_email(excinfo.type, msg)
@contextmanager
def rdb_catch():
"""Context Manager used to wrap mysterious failures for remote debugging."""
try:
yield
except tuple(_breakpoint_exceptions) as exc:
send_breakpoint_email(type(exc))
|
PatidarWeb/poedit
|
refs/heads/master
|
deps/boost/tools/build/src/util/regex.py
|
34
|
# (C) Copyright David Abrahams 2001. Permission to copy, use, modify, sell and
# distribute this software is granted provided this copyright notice appears in
# all copies. This software is provided "as is" without express or implied
# warranty, and with no claim as to its suitability for any purpose.
import re
def transform (list, pattern, indices = [1]):
""" Matches all elements of 'list' agains the 'pattern'
and returns a list of the elements indicated by indices of
all successfull matches. If 'indices' is omitted returns
a list of first paranthethised groups of all successfull
matches.
"""
result = []
for e in list:
m = re.match (pattern, e)
if m:
for i in indices:
result.append (m.group (i))
return result
|
3L3N4/volatility
|
refs/heads/master
|
volatility/plugins/linux/banner.py
|
58
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.obj as obj
import volatility.debug as debug
import volatility.plugins.linux.flags as linux_flags
import volatility.plugins.linux.common as linux_common
import volatility.plugins.linux.pslist as linux_pslist
class linux_banner(linux_common.AbstractLinuxCommand):
""" Prints the Linux banner information """
def calculate(self):
linux_common.set_plugin_members(self)
banner_addr = self.addr_space.profile.get_symbol("linux_banner")
if banner_addr:
banner = obj.Object("String", offset = banner_addr, vm = self.addr_space, length = 256)
else:
debug.error("linux_banner symbol not found. Please report this as a bug on the issue tracker: https://code.google.com/p/volatility/issues/list")
yield banner.strip()
def render_text(self, outfd, data):
for banner in data:
outfd.write("{0:s}\n".format(banner))
|
yebeloved/idapython
|
refs/heads/master
|
build.py
|
15
|
#!/usr/bin/env python
#---------------------------------------------------------------------
# IDAPython - Python plugin for Interactive Disassembler
#
# (c) The IDAPython Team <idapython@googlegroups.com>
#
# All rights reserved.
#
# For detailed copyright information see the file COPYING in
# the root of the distribution archive.
#---------------------------------------------------------------------
# build.py - Custom build script
#---------------------------------------------------------------------
import os
import platform
import shutil
import sys
import types
import zipfile
import glob
from distutils import sysconfig
# Start of user configurable options
VERBOSE = True
IDA_MAJOR_VERSION = 6
IDA_MINOR_VERSION = 7
if 'IDA' in os.environ:
IDA_SDK = os.environ['IDA']
else:
IDA_SDK = os.path.join("..", "..", "include")
if not os.path.exists(IDA_SDK):
IDA_SDK = os.path.join("..", "swigsdk-versions", ("%d.%d" % (IDA_MAJOR_VERSION, IDA_MINOR_VERSION)))
assert os.path.exists(IDA_SDK), "Could not find IDA SDK include path"
# End of user configurable options
# IDAPython version
VERSION_MAJOR = 1
VERSION_MINOR = 7
VERSION_PATCH = 1
# Determine Python version
PYTHON_MAJOR_VERSION = int(platform.python_version()[0])
PYTHON_MINOR_VERSION = int(platform.python_version()[2])
# Find Python headers
PYTHON_INCLUDE_DIRECTORY = sysconfig.get_config_var('INCLUDEPY')
S_EA64 = 'ea64'
S_WITH_HEXRAYS = 'with-hexrays'
S_NO_OPT = 'no-opt'
# Swig command-line parameters
SWIG_OPTIONS = '-modern -python -threads -c++ -w451 -shadow -D__GNUC__'
# Common macros for all compilations
COMMON_MACROS = [
("VER_MAJOR", "%d" % VERSION_MAJOR),
("VER_MINOR", "%d" % VERSION_MINOR),
("VER_PATCH", "%d" % VERSION_PATCH),
"__IDP__",
("MAXSTR", "1024"),
"USE_DANGEROUS_FUNCTIONS",
"USE_STANDARD_FILE_FUNCTIONS" ]
# Common includes for all compilations
COMMON_INCLUDES = [ ".", "swig" ]
# -----------------------------------------------------------------------
# List files for the binary distribution
BINDIST_MANIFEST = [
"README.txt",
"COPYING.txt",
"CHANGES.txt",
"AUTHORS.txt",
"STATUS.txt",
"python.cfg",
"docs/notes.txt",
"examples/chooser.py",
"examples/colours.py",
"examples/ex_idphook_asm.py",
"examples/ex_uirequests.py",
"examples/debughook.py",
"examples/ex_cli.py",
"examples/ex1.idc",
"examples/ex_custdata.py",
"examples/ex1_idaapi.py",
"examples/ex1_idautils.py",
"examples/hotkey.py",
"examples/structure.py",
"examples/ex_gdl_qflow_chart.py",
"examples/ex_strings.py",
"examples/ex_actions.py",
"examples/ex_func_chooser.py",
"examples/ex_choose2.py",
"examples/ex_debug_names.py",
"examples/ex_graph.py",
"examples/ex_hotkey.py",
"examples/ex_patch.py",
"examples/ex_expr.py",
"examples/ex_timer.py",
"examples/ex_dbg.py",
"examples/ex_custview.py",
"examples/ex_prefix_plugin.py",
"examples/ex_pyside.py",
"examples/ex_pyqt.py",
"examples/ex_askusingform.py",
"examples/ex_uihook.py",
"examples/ex_idphook_asm.py",
"examples/ex_imports.py"
]
# -----------------------------------------------------------------------
# List files for the source distribution (appended to binary list)
SRCDIST_MANIFEST = [
"BUILDING.txt",
"python.cpp",
"basetsd.h",
"build.py",
"python.cfg",
"swig/allins.i",
"swig/area.i",
"swig/auto.i",
"swig/bytes.i",
"swig/dbg.i",
"swig/diskio.i",
"swig/entry.i",
"swig/enum.i",
"swig/expr.i",
"swig/fixup.i",
"swig/frame.i",
"swig/funcs.i",
"swig/gdl.i",
"swig/ida.i",
"swig/idaapi.i",
"swig/idd.i",
"swig/idp.i",
"swig/ints.i",
"swig/kernwin.i",
"swig/lines.i",
"swig/loader.i",
"swig/moves.i",
"swig/nalt.i",
"swig/name.i",
"swig/netnode.i",
"swig/offset.i",
"swig/pro.i",
"swig/queue.i",
"swig/search.i",
"swig/segment.i",
"swig/srarea.i",
"swig/strlist.i",
"swig/struct.i",
"swig/typeconv.i",
"swig/typeinf.i",
"swig/ua.i",
"swig/xref.i",
"swig/graph.i",
"swig/fpro.i",
"swig/hexrays.i",
]
# -----------------------------------------------------------------------
def parse_options(args):
"""Parse arguments and returned a dictionary of options"""
no_opt = '--' + S_NO_OPT in sys.argv
ea64 = '--' + S_EA64 in sys.argv
with_hexrays = '--' + S_WITH_HEXRAYS in sys.argv
return {
S_EA64: ea64,
S_WITH_HEXRAYS: with_hexrays,
S_NO_OPT: no_opt
}
# -----------------------------------------------------------------------
class BuilderBase:
""" Base class for builders """
def __init__(self):
pass
def compile(self, source, objectname=None, includes=[], macros=[]):
"""
Compile the source file
"""
allmacros = []
allmacros.extend(COMMON_MACROS)
allmacros.extend(self.basemacros)
allmacros.extend(macros)
macrostring = self._build_command_string(allmacros, self.macro_delimiter)
allincludes = []
allincludes.extend(COMMON_INCLUDES)
allincludes.extend(includes)
includestring = self._build_command_string(allincludes, self.include_delimiter)
if not objectname:
objectname = source + self.object_extension
cmdstring = "%s %s %s %s %s %s" % (self.compiler,
self.compiler_parameters,
self.compiler_out_string(objectname),
self.compiler_in_string(source + self.source_extension),
includestring,
macrostring)
if VERBOSE:
print cmdstring
return os.system(cmdstring)
def link(self, objects, outfile, libpaths=[], libraries=[], extra_parameters=None):
""" Link the binary from objects and libraries """
cmdstring = "%s %s %s" % (self.linker,
self.linker_parameters,
self.linker_out_string(outfile))
for objectfile in objects:
cmdstring = "%s %s" % (cmdstring, objectfile + self.object_extension)
for libpath in libpaths:
cmdstring = "%s %s%s" % (cmdstring, self.libpath_delimiter, libpath)
for library in libraries:
cmdstring = "%s %s" % (cmdstring, library)
if extra_parameters:
cmdstring = "%s %s" % (cmdstring, extra_parameters)
if VERBOSE: print cmdstring
return os.system(cmdstring)
def _build_command_string(self, macros, argument_delimiter):
macrostring = ""
for item in macros:
if type(item) == types.TupleType:
macrostring += '%s%s="%s" ' % (argument_delimiter, item[0], item[1])
else:
macrostring += '%s%s ' % (argument_delimiter, item)
return macrostring
# -----------------------------------------------------------------------
class GCCBuilder(BuilderBase):
""" Generic GCC compiler class """
def __init__(self):
self.include_delimiter = "-I"
self.macro_delimiter = "-D"
self.libpath_delimiter = "-L"
self.compiler_parameters = "-fpermissive -Wno-write-strings"
self.linker_parameters = "-shared"
self.basemacros = [ ]
self.compiler = "g++ -m32"
self.linker = "g++ -m32"
self.source_extension = ".cpp"
self.object_extension = ".o"
def compiler_in_string(self, filename):
return "-c %s" % filename
def compiler_out_string(self, filename):
return "-o %s" % filename
def linker_out_string(self, filename):
return "-o %s" % filename
# -----------------------------------------------------------------------
class MSVCBuilder(BuilderBase):
""" Generic Visual C compiler class """
def __init__(self):
self.include_delimiter = "/I"
self.macro_delimiter = "/D"
self.libpath_delimiter = "/LIBPATH:"
self.compiler_parameters = "/nologo /EHsc"
self.linker_parameters = "/nologo /dll /export:PLUGIN"
self.basemacros = [ "WIN32",
"_USRDLL",
"__NT__" ]
self.compiler = "cl"
self.linker = "link"
self.source_extension = ".cpp"
self.object_extension = ".obj"
def compiler_in_string(self, filename):
return "/c %s" % filename
def compiler_out_string(self, filename):
return "/Fo%s" % filename
def linker_out_string(self, filename):
return "/out:%s" % filename
# -----------------------------------------------------------------------
def build_distribution(manifest, distrootdir, ea64, nukeold):
""" Create a distibution to a directory and a ZIP file """
# (Re)create the output directory
if os.path.exists(distrootdir):
if nukeold:
shutil.rmtree(distrootdir)
os.makedirs(distrootdir)
else:
os.makedirs(distrootdir)
# Also make a ZIP archive of the build
zippath = distrootdir + ".zip"
zip = zipfile.ZipFile(zippath, nukeold and "w" or "a", zipfile.ZIP_DEFLATED)
# Copy files, one by one
for f in manifest:
if type(f) == types.TupleType:
srcfilepath = f[0]
srcfilename = os.path.basename(srcfilepath)
dstdir = os.path.join(distrootdir, f[1])
dstfilepath = os.path.join(dstdir, srcfilename)
else:
srcfilepath = f
srcfilename = os.path.basename(f)
srcdir = os.path.dirname(f)
if srcdir == "":
dstdir = distrootdir
else:
dstdir = os.path.join(distrootdir, srcdir)
if not os.path.exists(dstdir):
os.makedirs(dstdir)
dstfilepath = os.path.join(dstdir, srcfilename)
shutil.copyfile(srcfilepath, dstfilepath)
zip.write(dstfilepath)
zip.close()
# -----------------------------------------------------------------------
def build_plugin(
platform,
idasdkdir,
plugin_name,
options):
""" Build the plugin from the SWIG wrapper and plugin main source """
global SWIG_OPTIONS
# Get the arguments
ea64 = options[S_EA64]
with_hexrays = options[S_WITH_HEXRAYS]
# Path to the IDA SDK headers
ida_include_directory = os.path.join(idasdkdir, "include")
builder = None
# Platform-specific settings for the Linux build
if platform == "linux":
builder = GCCBuilder()
platform_macros = [ "__LINUX__" ]
python_libpath = os.path.join(sysconfig.EXEC_PREFIX, "lib")
python_library = "-Bdynamic -lpython%d.%d" % (PYTHON_MAJOR_VERSION, PYTHON_MINOR_VERSION)
ida_libpath = os.path.join(idasdkdir, "lib", ea64 and "x86_linux_gcc_64" or "x86_linux_gcc_32")
ida_lib = ""
extra_link_parameters = " -s"
builder.compiler_parameters += " -O2"
# Platform-specific settings for the Windows build
elif platform == "win32":
builder = MSVCBuilder()
platform_macros = [ "__NT__" ]
python_libpath = os.path.join(sysconfig.EXEC_PREFIX, "libs")
python_library = "python%d%d.lib" % (PYTHON_MAJOR_VERSION, PYTHON_MINOR_VERSION)
ida_libpath = os.path.join(idasdkdir, "lib", ea64 and "x86_win_vc_64" or "x86_win_vc_32")
ida_lib = "ida.lib"
SWIG_OPTIONS += " -D__NT__ "
extra_link_parameters = ""
if not options[S_NO_OPT]:
builder.compiler_parameters += " -Ox"
# Platform-specific settings for the Mac OS X build
elif platform == "macosx":
builder = GCCBuilder()
builder.linker_parameters = "-dynamiclib"
platform_macros = [ "__MAC__" ]
python_libpath = "."
python_library = "-framework Python"
ida_libpath = os.path.join(idasdkdir, "lib", ea64 and "x86_mac_gcc_64" or "x86_mac_gcc_32")
ida_lib = ea64 and "-lida64" or "-lida"
extra_link_parameters = " -s"
builder.compiler_parameters += " -O3"
assert builder, "Unknown platform! No idea how to build here..."
# Enable EA64 for the compiler if necessary
if ea64:
platform_macros.append("__EA64__")
# Build with Hex-Rays decompiler
if with_hexrays:
platform_macros.append("WITH_HEXRAYS")
SWIG_OPTIONS += ' -DWITH_HEXRAYS '
platform_macros.append("NDEBUG")
if not '--no-early-load' in sys.argv:
platform_macros.append("PLUGINFIX")
# Turn off obsolete functions
#platform_macros.append("NO_OBSOLETE_FUNCS")
# Build the wrapper from the interface files
ea64flag = ea64 and "-D__EA64__" or ""
swigcmd = "swig %s -Iswig -o idaapi.cpp %s -I%s idaapi.i" % (SWIG_OPTIONS, ea64flag, ida_include_directory)
if VERBOSE: print swigcmd
res = os.system(swigcmd)
assert res == 0, "Failed to build the wrapper with SWIG"
# If we are running on windows, we have to patch some directors'
# virtual methods, so they have the right calling convention.
# Without that, compilation just won't succeed.
if platform == "win32":
res = os.system("python patch_directors_cc.py -f idaapi.h")
assert res == 0, "Failed to patch directors' calling conventions"
# Compile the wrapper
res = builder.compile("idaapi",
includes=[ PYTHON_INCLUDE_DIRECTORY, ida_include_directory ],
macros=platform_macros)
assert res == 0, "Failed to build the wrapper module"
# Compile the main plugin source
res = builder.compile("python",
includes=[ PYTHON_INCLUDE_DIRECTORY, ida_include_directory ],
macros=platform_macros)
assert res == 0, "Failed to build the main plugin object"
# Link the final binary
res = builder.link( ["idaapi", "python"],
plugin_name,
[ python_libpath, ida_libpath ],
[ python_library, ida_lib ],
extra_link_parameters)
assert res == 0, "Failed to link the plugin binary"
# -----------------------------------------------------------------------
def detect_platform(ea64):
# Detect the platform
system = platform.system()
if system == "Windows" or system == "Microsoft":
system = "Windows"
platform_string = "win32"
plugin_name = ea64 and "python.p64" or "python.plw"
elif system == "Linux":
platform_string = "linux"
plugin_name = ea64 and "python.plx64" or "python.plx"
elif system == "Darwin":
platform_string = "macosx"
plugin_name = ea64 and "python.pmc64" or "python.pmc"
else:
print "Unknown platform!"
sys.exit(-1)
return (system, platform_string, plugin_name)
# -----------------------------------------------------------------------
def build_binary_package(options, nukeold):
ea64 = options[S_EA64]
system, platform_string, plugin_name = detect_platform(ea64)
BINDISTDIR = "idapython-%d.%d.%d_ida%d.%d_py%d.%d_%s" % (VERSION_MAJOR,
VERSION_MINOR,
VERSION_PATCH,
IDA_MAJOR_VERSION,
IDA_MINOR_VERSION,
PYTHON_MAJOR_VERSION,
PYTHON_MINOR_VERSION,
platform_string)
# Build the plugin
build_plugin(platform_string, IDA_SDK, plugin_name, options)
# Build the binary distribution
binmanifest = []
if nukeold:
binmanifest.extend(BINDIST_MANIFEST)
if not ea64 or nukeold:
binmanifest.extend([(x, "python") for x in "python/init.py", "python/idc.py", "python/idautils.py", "idaapi.py"])
binmanifest.append((plugin_name, "plugins"))
build_distribution(binmanifest, BINDISTDIR, ea64, nukeold)
# -----------------------------------------------------------------------
def build_source_package():
""" Build a directory and a ZIP file with all the sources """
SRCDISTDIR = "idapython-%d.%d.%d" % (VERSION_MAJOR,
VERSION_MINOR,
VERSION_PATCH)
# Build the source distribution
srcmanifest = []
srcmanifest.extend(BINDIST_MANIFEST)
srcmanifest.extend(SRCDIST_MANIFEST)
srcmanifest.extend([(x, "python") for x in "python/init.py", "python/idc.py", "python/idautils.py"])
build_distribution(srcmanifest, SRCDISTDIR, ea64=False, nukeold=True)
# -----------------------------------------------------------------------
def gen_docs(z = False):
print "Generating documentation....."
old_dir = os.getcwd()
try:
curdir = os.getcwd() + os.sep
docdir = 'idapython-reference-%d.%d.%d' % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
sys.path.append(curdir + 'python')
sys.path.append(curdir + 'tools')
sys.path.append(curdir + 'docs')
import epydoc.cli
import swigdocs
os.chdir('docs')
PYWRAPS_FN = 'pywraps'
swigdocs.gen_docs(outfn = PYWRAPS_FN + '.py')
epydoc.cli.optparse.sys.argv = [ 'epydoc',
'--no-sourcecode',
'-u', 'http://code.google.com/p/idapython/',
'--navlink', '<a href="http://www.hex-rays.com/idapro/idapython_docs/">IDAPython Reference</a>',
'--no-private',
'--simple-term',
'-o', docdir,
'--html',
'idc', 'idautils', PYWRAPS_FN, 'idaapi']
# Generate the documentation
epydoc.cli.cli()
print "Documentation generated!"
# Clean temp files
for f in [PYWRAPS_FN + '.py', PYWRAPS_FN + '.pyc']:
if os.path.exists(f):
os.unlink(f)
if z:
z = docdir + '-doc.zip'
zip = zipfile.ZipFile(z, "w", zipfile.ZIP_DEFLATED)
for fn in glob.glob(docdir + os.sep + '*'):
zip.write(fn)
zip.close()
print "Documentation compressed to", z
except Exception, e:
print 'Failed to generate documentation:', e
finally:
os.chdir(old_dir)
return
# -----------------------------------------------------------------------
def usage():
print """IDAPython build script.
Available switches:
--doc:
Generate documentation into the 'docs' directory
--zip:
Used with '--doc' switch. It will compress the generated documentation
--ea64:
Builds also the 64bit version of the plugin
--with-hexrays:
Build with the Hex-Rays Decompiler wrappings
--no-early-load:
The plugin will be compiled as normal plugin
This switch disables processor, plugin and loader scripts
"""
# -----------------------------------------------------------------------
def main():
if '--help' in sys.argv:
return usage()
elif '--doc' in sys.argv:
return gen_docs(z = '--zip' in sys.argv)
# Parse options
options = parse_options(sys.argv)
ea64 = options[S_EA64]
# Always build the non __EA64__ version
options[S_EA64] = False
build_binary_package(options, nukeold=True)
# Rebuild package with __EA64__ if needed
if ea64:
options[S_EA64] = True
build_binary_package(options, nukeold=False)
# Always build the source package
build_source_package()
# -----------------------------------------------------------------------
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.