repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
nbeaver/numpy
|
refs/heads/master
|
tools/swig/test/setup.py
|
113
|
#! /usr/bin/env python
from __future__ import division, print_function
# System imports
from distutils.core import *
from distutils import sysconfig
# Third-party modules - we depend on numpy for everything
import numpy
# Obtain the numpy include directory.
numpy_include = numpy.get_include()
# Array extension module
_Array = Extension("_Array",
["Array_wrap.cxx",
"Array1.cxx",
"Array2.cxx",
"ArrayZ.cxx"],
include_dirs = [numpy_include],
)
# Farray extension module
_Farray = Extension("_Farray",
["Farray_wrap.cxx",
"Farray.cxx"],
include_dirs = [numpy_include],
)
# _Vector extension module
_Vector = Extension("_Vector",
["Vector_wrap.cxx",
"Vector.cxx"],
include_dirs = [numpy_include],
)
# _Matrix extension module
_Matrix = Extension("_Matrix",
["Matrix_wrap.cxx",
"Matrix.cxx"],
include_dirs = [numpy_include],
)
# _Tensor extension module
_Tensor = Extension("_Tensor",
["Tensor_wrap.cxx",
"Tensor.cxx"],
include_dirs = [numpy_include],
)
_Fortran = Extension("_Fortran",
["Fortran_wrap.cxx",
"Fortran.cxx"],
include_dirs = [numpy_include],
)
_Flat = Extension("_Flat",
["Flat_wrap.cxx",
"Flat.cxx"],
include_dirs = [numpy_include],
)
# NumyTypemapTests setup
setup(name = "NumpyTypemapTests",
description = "Functions that work on arrays",
author = "Bill Spotz",
py_modules = ["Array", "Farray", "Vector", "Matrix", "Tensor",
"Fortran", "Flat"],
ext_modules = [_Array, _Farray, _Vector, _Matrix, _Tensor,
_Fortran, _Flat]
)
|
yglazko/socorro
|
refs/heads/master
|
socorro/unittest/external/filesystem/test_filesystem.py
|
11
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import shutil
import types
from nose.tools import *
import socorro.external.filesystem.filesystem as f
from socorro.unittest.testbase import TestCase
# Describes the directory/file structure we will look at:
# key is a name
# if value is a dictionary, the named item is a directory
# if value is None, the named item is an empty file
# otherwise, the named item is a file holding str(value)
testDir = {'TestDir':
{'f0': 'file TestDir/f0',
'g0': 'file TestDir/g0',
'0': {'f0a': 'file TestDir/0/f0a', 'f0b': 'file TestDir/0/f0b' },
'1': {'f1a': None,'f1b': None,
'10': {},
'11': {},
},
'2': {'f2a': None,'f2b':None,
'20':
{'200':
{'2000':
{'d0': 'file TestDir/2/20/200/2000/d0',
'd1': 'file TestDir/2/20/200/2000/d1',
},
},
},
},
'4': {'f4': None,
'40':
{'f40':None,
'400':
{'f400':None,
'4000':
{'f4000':None
},
},
},
},
},
}
def acceptDirOnly(t):
return os.path.isdir(t[2])
def acceptFileOnly(t):
return os.path.isfile(t[2])
def accept2Dirs(t):
return t[1].startswith('2')
def revcmp(d0,d1):
return cmp(d1,d0)
class TestFilesystem(TestCase):
def createTestbed(self):
self.deleteTestbed() # just in case
self.createTestDir('.',testDir)
def createTestDir(self,root,dict):
for k in dict.keys():
v = dict[k]
if type(v) == types.DictionaryType:
newroot = os.path.join(root,k)
os.mkdir(newroot)
self.createTestDir(newroot,dict.get(k))
elif type(v) == types.NoneType:
open(os.path.join(root,k),'w').close()
else:
f = open(os.path.join(root,k),'w')
f.write("%s\n" %(v))
f.close()
def deleteTestbed(self):
for topLevelDir in testDir.keys():
if(os.path.exists(os.path.join('.',topLevelDir))):
shutil.rmtree(os.path.join('.',topLevelDir))
def setUp(self):
self.createTestbed()
assert 1 == len(testDir.keys()), 'Only one top-level test directory'
self.tdir = testDir.keys()[0]
def tearDown(self):
self.deleteTestbed()
def testLevel0(self):
for depth in [ -12,-1,0]:
tst = f.findFileGenerator(self.tdir,maxDepth = depth)
items = [x for x in tst]
assert not items, 'Expect nothing for 0 or negative. For %d, got %s' %(depth,items)
def testLevel1(self):
# Look for all top level items regardless of type.
for depth in [1] :
tst = f.findFileGenerator(self.tdir,maxDepth = depth)
items = []
expected = [ x for x in testDir[self.tdir].keys() ]
for (x,o,p) in tst:
items.append(o)
assert o in expected ,'Item %s must be expected: %s' %(o,expected)
for k in expected:
assert k in items, 'Expected item %s must be found in %s' %(k,items)
# look for only top level files
items = []
expected = ['f0','g0']
t = f.findFileGenerator(self.tdir,acceptanceFunction = acceptFileOnly, maxDepth = depth)
for (x,o,p) in t:
items.append(o)
assert o in expected, 'depth=%d,expect a top level file, got '+o+' not in '+str(expected) % depth
for x in expected:
assert x in items, 'depth=%d,expect both top level files' % depth
# look for only top level directories
items = []
expected = ['0','1','2','4']
t = f.findFileGenerator(testDir.keys()[0],acceptanceFunction = acceptDirOnly, maxDepth = depth)
for (x,o,p) in t:
items.append(o)
assert o in expected, 'depth=%d,expect a top level directory' % depth
for x in expected:
assert x in items, 'depth=%d,expect all top level directories' % depth
def testLevels(self):
tst = f.findFileGenerator(self.tdir,maxDepth = 2)
items = []
expected = ['f0a', 'f0b', '0', '10', '11', 'f1a', 'f1b', '1', '20', 'f2a', 'f2b', '2', '40', 'f4', '4', 'f0', 'g0']
for (x,o,p) in tst:
items.append(o)
assert o in expected
for o in expected:
assert o in items
tst = f.findFileGenerator(self.tdir,maxDepth = 3)
items = []
expected = ['f0a', 'f0b', '0', '10', '11', 'f1a', 'f1b', '1', '200', '20', 'f2a', 'f2b', '2', '400', 'f40', '40', 'f4', '4', 'f0', 'g0']
for (x,o,p) in tst:
items.append(o)
assert o in expected
for o in expected:
assert o in items
tst = f.findFileGenerator(self.tdir,maxDepth = 4)
items = []
expected = ['f0a', 'f0b', '0', '10', '11', 'f1a', 'f1b', '1', '2000', '200', '20', 'f2a', 'f2b', '2', '4000', 'f400', '400', 'f40', '40', 'f4', '4', 'f0', 'g0']
for (x,o,p) in tst:
items.append(o)
assert o in expected
for o in expected:
assert o in items
tst = f.findFileGenerator(self.tdir,maxDepth = 100)
items = []
expected = ['f0a', 'f0b', '0', '10', '11', 'f1a', 'f1b', '1', 'd0', 'd1', '2000', '200', '20', 'f2a', 'f2b', '2', 'f4000', '4000', 'f400', '400', 'f40', '40', 'f4', '4', 'f0', 'g0']
for (x,o,p) in tst:
items.append(o)
assert o in expected
for o in expected:
assert o in items
def testCompare(self):
#This test won't work for depth > 1 since the directories are visited individually
tst = f.findFileGenerator(self.tdir,maxDepth = 1)
items = []
for (x,o,p) in tst:
items.append(o)
tst = f.findFileGenerator(self.tdir,maxDepth = 1,directorySortFunction=revcmp)
ritems = []
for (x,o,p) in tst:
ritems.append(o)
ritems.reverse()
assert(items == ritems)
def testDirAcceptance(self):
tst = f.findFileGenerator(self.tdir,maxDepth = 100,directoryAcceptanceFunction=accept2Dirs)
items = []
expected = ['0', '1', 'd0', 'd1', '2000', '200', '20', 'f2a', 'f2b', '2', '4', 'f0', 'g0']
for (x,o,p) in tst:
items.append(o)
assert o in expected
for o in expected:
assert o in items
def testFailMakedirsOnFileInPath(self):
path = 'TestDir/1/2/3/4'
tpath = path
while True:
head,tail = os.path.split(tpath)
if tail == 'TestDir': break
try:
shutil.rmtree('TestDir')
except:
pass
f.makedirs(head)
t = open(tpath,'w')
t.write('nothing\n')
t.close()
try:
f.makedirs(path)
assert False, 'We should have had an OSError, but success for %s a file'%tpath
except OSError:
pass
except Exception,x:
assert False, 'We should have had an OSError, got %s: %s'%(type(x),x)
tpath = head
def testCleanEmptySubdirectories(self):
f.makedirs('TestDir/A/B/C/D')
f.makedirs('TestDir/AA/BB/C')
f.makedirs('TestDir/AA/BB/CC/DD')
fi = open('TestDir/A/a','w')
fi.write('file a\n')
fi.close()
# Test short-circuit path, full stopper
assert os.path.isdir('TestDir/A/B/C/D')
f.cleanEmptySubdirectories('TestDir/A/B/C/D','TestDir/A/B/C/D')
assert os.path.isdir('TestDir/A/B/C/D')
# Test short-circuit path, name stopper
f.cleanEmptySubdirectories('D','TestDir/A/B/C/D')
assert os.path.isdir('TestDir/A/B/C/D')
# Test some empties, name stopper
f.cleanEmptySubdirectories('C','TestDir/A/B/C/D')
assert not os.path.exists('TestDir/A/B/C/D')
assert os.path.isdir('TestDir/A/B/C')
# Test some empties, path stopper
f.cleanEmptySubdirectories('TestDir/A/B','TestDir/A/B/C')
assert not os.path.exists('TestDir/A/B/C')
assert os.path.isdir('TestDir/A/B')
#Test stopping on a file in a subdir
f.cleanEmptySubdirectories('TestDir','TestDir/A/B')
assert not os.path.exists('TestDir/A/B')
assert os.path.isdir('TestDir/A')
#Test stopping on another subdir
f.cleanEmptySubdirectories('TestDir/AA','TestDir/AA/BB/CC/DD')
assert not os.path.exists('TestDir/AA/BB/CC')
assert os.path.isdir('TestDir/AA/BB')
#Test for stopper not in path
assert_raises(OSError,f.cleanEmptySubdirectories,'Woo','TestDir/AA/BB')
#Test for non-existent leaf
assert_raises(OSError,f.cleanEmptySubdirectories,'TestDir','TestDir/AA/BB/CC/DD')
def testVisitPath(self):
f.makedirs('TestDir/a/b/c/d/e/f')
fi = open('TestDir/a/b/c/d/D0','w')
fi.write("hi\n")
fi.close
seen = set()
def collector(x):
seen.add(x)
top = 'TestDir/a'
last = 'TestDir/a/b/c/d'
absTop = os.path.normpath(top)
expected = set([absTop])
for i in [['b'],['b','c'],['b','c','d']]:
expected.add(os.path.join(absTop,os.sep.join(i)))
f.visitPath(top,last,collector)
assert expected == seen, 'but x-s=%s and s-x=%s'%(expected-seen,seen-expected)
seen.clear()
top = 'TestDir/a/b'
last = 'TestDir/a/b/c/d/D0'
normTop = os.path.normpath(top)
expected = set([normTop])
for i in [['c'],['c','d']]:
expected.add(os.path.join(normTop,os.sep.join(i)))
f.visitPath(top,last,collector)
assert expected == seen, 'but x-s=%s and s-x=%s'%(expected-seen,seen-expected)
#Test for non-existent leaf
assert_raises(OSError,f.visitPath,'TestDir','TestDir/A/BB',collector)
#Test for rootDir not abover fullPath
assert_raises(OSError,f.visitPath,'TestDir/A/B','TestDir/A',collector)
if __name__ == "__main__":
unittest.main()
|
geotagx/pybossa
|
refs/heads/master
|
pybossa/oauth_providers.py
|
1
|
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2016 SciFabric LTD.
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from flask_oauthlib.client import OAuth
class Twitter(object):
"""Class Twitter to enable Twitter signin."""
def __init__(self, app=None):
"""Init method."""
self.app = app
if app is not None: # pragma: no cover
self.init_app(app)
def init_app(self, app):
"""Init app using factories."""
self.oauth = OAuth().remote_app(
'twitter',
base_url='https://api.twitter.com/1/',
request_token_url='https://api.twitter.com/oauth/request_token',
access_token_url='https://api.twitter.com/oauth/access_token',
authorize_url='https://api.twitter.com/oauth/authenticate',
consumer_key=app.config['TWITTER_CONSUMER_KEY'],
consumer_secret=app.config['TWITTER_CONSUMER_SECRET'])
class Facebook(object):
"""Class Facebook to enable Facebook signin."""
def __init__(self, app=None):
"""Init method."""
self.app = app
if app is not None: # pragma: no cover
self.init_app(app)
def init_app(self, app):
"""Init app using factories pattern."""
self.oauth = OAuth().remote_app(
'facebook',
base_url='https://graph.facebook.com/',
request_token_url=None,
access_token_url='/oauth/access_token',
authorize_url='https://www.facebook.com/dialog/oauth',
consumer_key=app.config['FACEBOOK_APP_ID'],
consumer_secret=app.config['FACEBOOK_APP_SECRET'],
request_token_params={'scope': 'email'})
class Google(object):
"""Class Google to enable Google signin."""
def __init__(self, app=None):
"""Init method."""
self.app = app
if app is not None: # pragma: no cover
self.init_app(app)
def init_app(self, app):
"""Init app using factories pattern."""
self.oauth = OAuth().remote_app(
'google',
base_url='https://www.googleapis.com/oauth2/v1/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={'scope': 'profile email'},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
consumer_key=app.config['GOOGLE_CLIENT_ID'],
consumer_secret=app.config['GOOGLE_CLIENT_SECRET'])
class Flickr(object):
def __init__(self, app=None):
self.app = app
if app is not None: # pragma: no cover
self.init_app(app)
def init_app(self, app): # pragma: no cover
from flask import session
self.app = app
self.oauth = OAuth().remote_app(
'flickr',
request_token_url='https://www.flickr.com/services/oauth/request_token',
access_token_url='https://www.flickr.com/services/oauth/access_token',
authorize_url='https://www.flickr.com/services/oauth/authorize',
consumer_key=app.config['FLICKR_API_KEY'],
consumer_secret=app.config['FLICKR_SHARED_SECRET'],
access_token_method='GET')
|
Solinea/horizon
|
refs/heads/master
|
openstack_dashboard/dashboards/project/stacks/resource_types/views.py
|
51
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import yaml
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import tables
from horizon import tabs
from openstack_dashboard import api
import openstack_dashboard.dashboards.project.stacks.resource_types.tables \
as project_tables
import openstack_dashboard.dashboards.project.stacks.resource_types.tabs \
as project_tabs
class ResourceTypesView(tables.DataTableView):
table_class = project_tables.ResourceTypesTable
template_name = 'project/stacks.resource_types/index.html'
page_title = _("Resource Types")
def get_data(self):
try:
r_types = sorted(api.heat.resource_types_list(self.request),
key=lambda resource: resource.resource_type)
except Exception:
r_types = []
msg = _('Unable to retrieve stack resource types.')
exceptions.handle(self.request, msg)
return r_types
class DetailView(tabs.TabView):
tab_group_class = project_tabs.ResourceTypeDetailsTabs
template_name = 'project/stacks.resource_types/details.html'
page_title = _("Resource Type Details")
def get_resource_type(self, request, **kwargs):
try:
resource_type_overview = api.heat.resource_type_get(
request,
kwargs['resource_type'])
return resource_type_overview
except Exception:
msg = _('Unable to retrieve resource type details.')
exceptions.handle(request, msg, redirect=self.get_redirect_url())
def get_tabs(self, request, **kwargs):
resource_type_overview = self.get_resource_type(request, **kwargs)
r_type = resource_type_overview['resource_type']
r_type_attributes = resource_type_overview['attributes']
r_type_properties = resource_type_overview['properties']
return self.tab_group_class(
request,
rt=r_type,
rt_attributes=yaml.safe_dump(r_type_attributes, indent=2),
rt_properties=yaml.safe_dump(r_type_properties, indent=2),
**kwargs)
@staticmethod
def get_redirect_url():
return reverse('horizon:project:stacks.resources:index')
|
jiajiax/crosswalk-test-suite
|
refs/heads/master
|
webapi/webapi-input-html5-tests/inst.wgt.py
|
372
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code is not None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user)
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex + 1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
if root.endswith("mediasrc"):
continue
for file in files:
if file.endswith(".wgt"):
if not doRemoteCopy(
os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
# Do some special copy/delete... steps
'''
(return_code, output) = doRemoteCMD(
"mkdir -p %s/tests" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
if not doRemoteCopy("specname/tests", "%s/tests" % PKG_SRC_DIR):
action_status = False
'''
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception as e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0:
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket" % str(
userid)
else:
print "[Error] cmd commands error : %s" % str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
Manojkumar91/odoo_inresto
|
refs/heads/master
|
openerp/addons/base/tests/test_search.py
|
290
|
import unittest2
import openerp.tests.common as common
class test_search(common.TransactionCase):
def test_00_search_order(self):
registry, cr, uid = self.registry, self.cr, self.uid
# Create 6 partners with a given name, and a given creation order to
# ensure the order of their ID. Some are set as unactive to verify they
# are by default excluded from the searches and to provide a second
# `order` argument.
partners = registry('res.partner')
c = partners.create(cr, uid, {'name': 'test_search_order_C'})
d = partners.create(cr, uid, {'name': 'test_search_order_D', 'active': False})
a = partners.create(cr, uid, {'name': 'test_search_order_A'})
b = partners.create(cr, uid, {'name': 'test_search_order_B'})
ab = partners.create(cr, uid, {'name': 'test_search_order_AB'})
e = partners.create(cr, uid, {'name': 'test_search_order_E', 'active': False})
# The tests.
# The basic searches should exclude records that have active = False.
# The order of the returned ids should be given by the `order`
# parameter of search().
name_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%')], order="name asc")
self.assertEqual([a, ab, b, c], name_asc, "Search with 'NAME ASC' order failed.")
name_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%')], order="name desc")
self.assertEqual([c, b, ab, a], name_desc, "Search with 'NAME DESC' order failed.")
id_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%')], order="id asc")
self.assertEqual([c, a, b, ab], id_asc, "Search with 'ID ASC' order failed.")
id_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%')], order="id desc")
self.assertEqual([ab, b, a, c], id_desc, "Search with 'ID DESC' order failed.")
# The inactive records shouldn't be excluded as soon as a condition on
# that field is present in the domain. The `order` parameter of
# search() should support any legal coma-separated values.
active_asc_id_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active asc, id asc")
self.assertEqual([d, e, c, a, b, ab], active_asc_id_asc, "Search with 'ACTIVE ASC, ID ASC' order failed.")
active_desc_id_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active desc, id asc")
self.assertEqual([c, a, b, ab, d, e], active_desc_id_asc, "Search with 'ACTIVE DESC, ID ASC' order failed.")
active_asc_id_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active asc, id desc")
self.assertEqual([e, d, ab, b, a, c], active_asc_id_desc, "Search with 'ACTIVE ASC, ID DESC' order failed.")
active_desc_id_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="active desc, id desc")
self.assertEqual([ab, b, a, c, e, d], active_desc_id_desc, "Search with 'ACTIVE DESC, ID DESC' order failed.")
id_asc_active_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id asc, active asc")
self.assertEqual([c, d, a, b, ab, e], id_asc_active_asc, "Search with 'ID ASC, ACTIVE ASC' order failed.")
id_asc_active_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id asc, active desc")
self.assertEqual([c, d, a, b, ab, e], id_asc_active_desc, "Search with 'ID ASC, ACTIVE DESC' order failed.")
id_desc_active_asc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id desc, active asc")
self.assertEqual([e, ab, b, a, d, c], id_desc_active_asc, "Search with 'ID DESC, ACTIVE ASC' order failed.")
id_desc_active_desc = partners.search(cr, uid, [('name', 'like', 'test_search_order%'), '|', ('active', '=', True), ('active', '=', False)], order="id desc, active desc")
self.assertEqual([e, ab, b, a, d, c], id_desc_active_desc, "Search with 'ID DESC, ACTIVE DESC' order failed.")
def test_10_inherits_m2order(self):
registry, cr, uid = self.registry, self.cr, self.uid
users_obj = registry('res.users')
# Find Employee group
group_employee_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_user')
group_employee_id = group_employee_ref and group_employee_ref[1] or False
# Get country/state data
country_us_id = registry('res.country').search(cr, uid, [('code', 'like', 'US')])[0]
state_ids = registry('res.country.state').search(cr, uid, [('country_id', '=', country_us_id)], limit=2)
country_be_id = registry('res.country').search(cr, uid, [('code', 'like', 'BE')])[0]
# Create test users
search_user = users_obj.create(cr, uid, {'name': '__search', 'login': '__search', 'groups_id': [(6, 0, [group_employee_id])]})
a = users_obj.create(cr, uid, {'name': '__test_A', 'login': '__test_A', 'country_id': country_be_id, 'state_id': country_be_id})
b = users_obj.create(cr, uid, {'name': '__test_B', 'login': '__a_test_B', 'country_id': country_us_id, 'state_id': state_ids[1]})
c = users_obj.create(cr, uid, {'name': '__test_B', 'login': '__z_test_B', 'country_id': country_us_id, 'state_id': state_ids[0]})
# Do: search on res.users, order on a field on res.partner to try inherits'd fields, then res.users
user_ids = users_obj.search(cr, search_user, [], order='name asc, login desc')
expected_ids = [search_user, a, c, b]
test_user_ids = filter(lambda x: x in expected_ids, user_ids)
self.assertEqual(test_user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
# Do: order on many2one and inherits'd fields
user_ids = users_obj.search(cr, search_user, [], order='state_id asc, country_id desc, name asc, login desc')
expected_ids = [c, b, a, search_user]
test_user_ids = filter(lambda x: x in expected_ids, user_ids)
self.assertEqual(test_user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
# Do: order on many2one and inherits'd fields
user_ids = users_obj.search(cr, search_user, [], order='country_id desc, state_id desc, name asc, login desc')
expected_ids = [search_user, b, c, a]
test_user_ids = filter(lambda x: x in expected_ids, user_ids)
self.assertEqual(test_user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
# Do: order on many2one, but not by specifying in order parameter of search, but by overriding _order of res_users
old_order = users_obj._order
users_obj._order = 'country_id desc, name asc, login desc'
user_ids = users_obj.search(cr, search_user, [])
expected_ids = [search_user, c, b, a]
test_user_ids = filter(lambda x: x in expected_ids, user_ids)
self.assertEqual(test_user_ids, expected_ids, 'search on res_users did not provide expected ids or expected order')
users_obj._order = old_order
if __name__ == '__main__':
unittest2.main()
|
piquadrat/django
|
refs/heads/master
|
django/contrib/postgres/validators.py
|
19
|
import copy
from django.core.exceptions import ValidationError
from django.core.validators import (
MaxLengthValidator, MaxValueValidator, MinLengthValidator,
MinValueValidator,
)
from django.utils.deconstruct import deconstructible
from django.utils.translation import gettext_lazy as _, ngettext_lazy
class ArrayMaxLengthValidator(MaxLengthValidator):
message = ngettext_lazy(
'List contains %(show_value)d item, it should contain no more than %(limit_value)d.',
'List contains %(show_value)d items, it should contain no more than %(limit_value)d.',
'limit_value')
class ArrayMinLengthValidator(MinLengthValidator):
message = ngettext_lazy(
'List contains %(show_value)d item, it should contain no fewer than %(limit_value)d.',
'List contains %(show_value)d items, it should contain no fewer than %(limit_value)d.',
'limit_value')
@deconstructible
class KeysValidator:
"""A validator designed for HStore to require/restrict keys."""
messages = {
'missing_keys': _('Some keys were missing: %(keys)s'),
'extra_keys': _('Some unknown keys were provided: %(keys)s'),
}
strict = False
def __init__(self, keys, strict=False, messages=None):
self.keys = set(keys)
self.strict = strict
if messages is not None:
self.messages = copy.copy(self.messages)
self.messages.update(messages)
def __call__(self, value):
keys = set(value)
missing_keys = self.keys - keys
if missing_keys:
raise ValidationError(
self.messages['missing_keys'],
code='missing_keys',
params={'keys': ', '.join(missing_keys)},
)
if self.strict:
extra_keys = keys - self.keys
if extra_keys:
raise ValidationError(
self.messages['extra_keys'],
code='extra_keys',
params={'keys': ', '.join(extra_keys)},
)
def __eq__(self, other):
return (
isinstance(other, self.__class__) and
self.keys == other.keys and
self.messages == other.messages and
self.strict == other.strict
)
class RangeMaxValueValidator(MaxValueValidator):
def compare(self, a, b):
return a.upper > b
message = _('Ensure that this range is completely less than or equal to %(limit_value)s.')
class RangeMinValueValidator(MinValueValidator):
def compare(self, a, b):
return a.lower < b
message = _('Ensure that this range is completely greater than or equal to %(limit_value)s.')
|
infobloxopen/infoblox-netmri
|
refs/heads/master
|
infoblox_netmri/api/broker/v3_1_0/spm_end_hosts_vlan_changes_grid_broker.py
|
14
|
from ..broker import Broker
class SpmEndHostsVlanChangesGridBroker(Broker):
controller = "spm_end_hosts_vlan_changes_grids"
def index(self, **kwargs):
"""Lists the available spm end hosts vlan changes grids. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the spm end hosts vlan changes grids with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the spm end hosts vlan changes grids with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, VirtualNetworkID, NeighborDeviceID, NeighborType, NeighborIPDotted, NeighborIPNumeric, Network, NeighborName, NeighborMAC, NeighborIfIndex, OrgUniqueId, NetBIOSName, FirstSeen, LastSeen, DeviceID, DeviceType, DeviceName, DeviceIPDotted, DeviceIPNumeric, InterfaceID, Interface, VirtualNetworkMemberName, ifIndex, ifDescr, ifAlias, ifMAC, ifAdminStatus, ifOperStatus, ifSpeed, ifAdminDuplex, ifDuplex, VlanIndex, VlanName, VlanID, TenantDn, BridgeDomainDn, EPGDn, ApName, ApIpAddress, ApSsid, VTPDomain, Packets, Errors, ErrorPercentage.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each SpmEndHostsVlanChangesGrid. Valid values are id, VirtualNetworkID, NeighborDeviceID, NeighborType, NeighborIPDotted, NeighborIPNumeric, Network, NeighborName, NeighborMAC, NeighborIfIndex, OrgUniqueId, NetBIOSName, FirstSeen, LastSeen, DeviceID, DeviceType, DeviceName, DeviceIPDotted, DeviceIPNumeric, InterfaceID, Interface, VirtualNetworkMemberName, ifIndex, ifDescr, ifAlias, ifMAC, ifAdminStatus, ifOperStatus, ifSpeed, ifAdminDuplex, ifDuplex, VlanIndex, VlanName, VlanID, TenantDn, BridgeDomainDn, EPGDn, ApName, ApIpAddress, ApSsid, VTPDomain, Packets, Errors, ErrorPercentage. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param refresh_ind: If true, the grid will be regenerated, rather than using any available cached grid data.
:type refresh_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param async_ind: If true and if grid data is not yet available, it will return immediately with 202 status. User should retry again later.
:type async_ind: Boolean
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return spm_end_hosts_vlan_changes_grids: An array of the SpmEndHostsVlanChangesGrid objects that match the specified input criteria.
:rtype spm_end_hosts_vlan_changes_grids: Array of SpmEndHostsVlanChangesGrid
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return summary: A summary of calculation of selected columns, when applicable.
:rtype summary: Hash
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def data_partitions(self, **kwargs):
"""Returns data partitions with their statuses for specified grid. 0 - data not available for that date, 1 - data available but must be prepared, 2 - data prepared and immediately available
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("data_partitions"), kwargs)
|
fastturtle/torch-randomkit
|
refs/heads/master
|
luasrc/generateDocsAndTests.py
|
2
|
""" This script tries to generate docs and call tests for randomkit.* """
import scipy
import re
docFile = "doc/randomkit.html"
testFile = "tests/testCalls.lua"
exclude = ['ffi', '_check1DParams']
randomkitFuncsPath = '/Users/daniel.horgan/randomkit_funcs'
def funcTest(name, sig, doc):
match = re.match(r"(.*)\((.*)\)", sig)
func = match.group(1)
args = [x for x in match.group(2).split(",") if x.strip()]
numArgs = len(args)
yield """function myTests.test_%s()""" % (name,)
# Call with scalar args, and no result tensor
testArgs = ["0.5"] * (numArgs - 1)
if name == 'zipf':
testArgs = ["1.5"] * (numArgs - 1)
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with scalar args and a result tensor
testArgs = ["torch.Tensor(10)"] + testArgs
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with 1D tensor args and no result tensor
testArgs = ["torch.Tensor(10):fill(0.5)"] * (numArgs - 1)
if name == 'zipf':
testArgs = ["torch.Tensor(10):fill(1.5)"] * (numArgs - 1)
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with 1D tensor args and a 1D result tensor
testArgs = ["torch.Tensor(10)"] + testArgs
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with 2D tensor args and no result tensor
testArgs = ["torch.Tensor(3, 4):fill(0.5)"] * (numArgs - 1)
if name == 'zipf':
testArgs = ["torch.Tensor(3, 4):fill(1.5)"] * (numArgs - 1)
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with 2D tensor args and a 2D result tensor
testArgs = ["torch.Tensor(2, 6)"] + testArgs
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with one arg number and the rest 2D tensors, and no result tensor
# Call with 2D tensor args and no result tensor
testArgs = ["torch.Tensor(3, 4):fill(0.5)"] * (numArgs - 1)
if len(testArgs) > 1:
testArgs[0] = "0.5"
if name == 'zipf':
testArgs = ["torch.Tensor(3, 4):fill(1.5)"] * (numArgs - 1)
testArgs[0] = "1.5"
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with one arg number and the rest tensors, and a 2D result tensor
testArgs = ["torch.Tensor(2, 6)"] + testArgs
yield """ tester:assert(%s(%s))""" % (func, ", ".join(testArgs))
# Call with one too many params - should break
testArgs = ["0.5"] * numArgs
yield """ tester:assertError(function() %s(%s) end)""" % (func, ", ".join(testArgs))
yield """end"""
def funcDoc(name, sig, doc):
yield "<hr /><a id='%s'>" % (name,)
yield "<h2>%s</h2>" % (sig,)
yield "<pre>"
yield doc
yield "</pre>"
def genIndex(funcNames):
index = "<h1>torch-randomkit</h1><ul>"
for funcName in funcNames:
index += "<li><a href='#%s'>%s</a></li>" % (funcName, funcName)
index += "</ul>"
return index
def funcNames():
with open(randomkitFuncsPath, 'r') as f:
for l in f.readlines():
yield l.strip()
def getDocStrings(funcNames):
for funcName in funcNames:
func = getattr(scipy.random, funcName, None)
if not func:
print("Could not find scipy docstring for %s" % (funcName,))
continue
docLines = func.__doc__.strip().split("\n")
funcSig = re.sub("=[^,)]+", "", docLines[0])
funcSig = re.sub(",?\s*size", "", funcSig)
funcSig = re.sub("\(", "([output], ", funcSig)
funcSig = "randomkit." + funcSig
doc = "\n".join(x.strip() for x in docLines[1:])
yield funcName, funcSig, doc
def writeHTMLdoc(funcNames, funcInfo):
with open(docFile, 'w') as f:
f.write("<html>")
index = genIndex(funcNames)
f.write(index)
for name, sig, doc in funcInfo:
for line in funcDoc(name, sig, doc):
f.write(line)
print("Generated doc for " + name)
f.write("</html>")
def writeCallTests(funcNames, funcInfo):
with open(testFile, 'w') as f:
f.write("""
require 'randomkit'
local myTests = {}
local tester = torch.Tester()
""")
for name, sig, doc in funcInfo:
for line in funcTest(name, sig, doc):
f.write(line + "\n")
print("Generated tests for " + name)
f.write("""
tester:add(myTests)
tester:run()
""")
funcNames = sorted(list(set(funcNames()) - set(exclude)))
funcInfo = list(getDocStrings(funcNames))
writeHTMLdoc(funcNames, funcInfo)
writeCallTests(funcNames, funcInfo)
|
klunwebale/odoo
|
refs/heads/8.0
|
addons/stock_account/wizard/__init__.py
|
351
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import stock_change_standard_price
import stock_invoice_onshipping
import stock_valuation_history
import stock_return_picking
|
legalsylvain/OpenUpgrade
|
refs/heads/master
|
addons/email_template/email_template.py
|
16
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 Sharoon Thomas
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import base64
import datetime
import dateutil.relativedelta as relativedelta
import logging
import lxml
import urlparse
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp import tools
from openerp.tools.translate import _
from urllib import urlencode, quote as quote
_logger = logging.getLogger(__name__)
try:
# We use a jinja2 sandboxed environment to render mako templates.
# Note that the rendering does not cover all the mako syntax, in particular
# arbitrary Python statements are not accepted, and not all expressions are
# allowed: only "public" attributes (not starting with '_') of objects may
# be accessed.
# This is done on purpose: it prevents incidental or malicious execution of
# Python code that may break the security of the server.
from jinja2.sandbox import SandboxedEnvironment
mako_template_env = SandboxedEnvironment(
block_start_string="<%",
block_end_string="%>",
variable_start_string="${",
variable_end_string="}",
comment_start_string="<%doc>",
comment_end_string="</%doc>",
line_statement_prefix="%",
line_comment_prefix="##",
trim_blocks=True, # do not output newline after blocks
autoescape=True, # XML/HTML automatic escaping
)
mako_template_env.globals.update({
'str': str,
'quote': quote,
'urlencode': urlencode,
'datetime': datetime,
'len': len,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'filter': filter,
'reduce': reduce,
'map': map,
'round': round,
# dateutil.relativedelta is an old-style class and cannot be directly
# instanciated wihtin a jinja2 expression, so a lambda "proxy" is
# is needed, apparently.
'relativedelta': lambda *a, **kw : relativedelta.relativedelta(*a, **kw),
})
except ImportError:
_logger.warning("jinja2 not available, templating features will not work!")
class email_template(osv.osv):
"Templates for sending email"
_name = "email.template"
_description = 'Email Templates'
_order = 'name'
def default_get(self, cr, uid, fields, context=None):
res = super(email_template, self).default_get(cr, uid, fields, context)
if res.get('model'):
res['model_id'] = self.pool['ir.model'].search(cr, uid, [('model', '=', res.pop('model'))], context=context)[0]
return res
def _replace_local_links(self, cr, uid, html, context=None):
""" Post-processing of html content to replace local links to absolute
links, using web.base.url as base url. """
if not html:
return html
# form a tree
root = lxml.html.fromstring(html)
if not len(root) and root.text is None and root.tail is None:
html = '<div>%s</div>' % html
root = lxml.html.fromstring(html)
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
(base_scheme, base_netloc, bpath, bparams, bquery, bfragment) = urlparse.urlparse(base_url)
def _process_link(url):
new_url = url
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
if not scheme and not netloc:
new_url = urlparse.urlunparse((base_scheme, base_netloc, path, params, query, fragment))
return new_url
# check all nodes, replace :
# - img src -> check URL
# - a href -> check URL
for node in root.iter():
if node.tag == 'a':
node.set('href', _process_link(node.get('href')))
elif node.tag == 'img' and not node.get('src', 'data').startswith('data'):
node.set('src', _process_link(node.get('src')))
html = lxml.html.tostring(root, pretty_print=False, method='html')
# this is ugly, but lxml/etree tostring want to put everything in a 'div' that breaks the editor -> remove that
if html.startswith('<div>') and html.endswith('</div>'):
html = html[5:-6]
return html
def render_post_process(self, cr, uid, html, context=None):
html = self._replace_local_links(cr, uid, html, context=context)
return html
def render_template_batch(self, cr, uid, template, model, res_ids, context=None, post_process=False):
"""Render the given template text, replace mako expressions ``${expr}``
with the result of evaluating these expressions with
an evaluation context containing:
* ``user``: browse_record of the current user
* ``object``: browse_record of the document record this mail is
related to
* ``context``: the context passed to the mail composition wizard
:param str template: the template text to render
:param str model: model name of the document record this mail is related to.
:param int res_ids: list of ids of document records those mails are related to.
"""
if context is None:
context = {}
results = dict.fromkeys(res_ids, u"")
# try to load the template
try:
template = mako_template_env.from_string(tools.ustr(template))
except Exception:
_logger.exception("Failed to load template %r", template)
return results
# prepare template variables
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
records = self.pool[model].browse(cr, uid, res_ids, context=context) or [None]
variables = {
'user': user,
'ctx': context, # context kw would clash with mako internals
}
for record in records:
res_id = record.id if record else None
variables['object'] = record
try:
render_result = template.render(variables)
except Exception:
_logger.exception("Failed to render template %r using values %r" % (template, variables))
render_result = u""
if render_result == u"False":
render_result = u""
results[res_id] = render_result
if post_process:
for res_id, result in results.iteritems():
results[res_id] = self.render_post_process(cr, uid, result, context=context)
return results
def get_email_template_batch(self, cr, uid, template_id=False, res_ids=None, context=None):
if context is None:
context = {}
if res_ids is None:
res_ids = [None]
results = dict.fromkeys(res_ids, False)
if not template_id:
return results
template = self.browse(cr, uid, template_id, context)
langs = self.render_template_batch(cr, uid, template.lang, template.model, res_ids, context)
for res_id, lang in langs.iteritems():
if lang:
# Use translated template if necessary
ctx = context.copy()
ctx['lang'] = lang
template = self.browse(cr, uid, template.id, ctx)
else:
template = self.browse(cr, uid, int(template_id), context)
results[res_id] = template
return results
def onchange_model_id(self, cr, uid, ids, model_id, context=None):
mod_name = False
if model_id:
mod_name = self.pool.get('ir.model').browse(cr, uid, model_id, context).model
return {'value': {'model': mod_name}}
_columns = {
'name': fields.char('Name'),
'model_id': fields.many2one('ir.model', 'Applies to', help="The kind of document with with this template can be used"),
'model': fields.related('model_id', 'model', type='char', string='Related Document Model',
size=128, select=True, store=True, readonly=True),
'lang': fields.char('Language',
help="Optional translation language (ISO code) to select when sending out an email. "
"If not set, the english version will be used. "
"This should usually be a placeholder expression "
"that provides the appropriate language code, e.g. "
"${object.partner_id.lang.code}.",
placeholder="${object.partner_id.lang.code}"),
'user_signature': fields.boolean('Add Signature',
help="If checked, the user's signature will be appended to the text version "
"of the message"),
'subject': fields.char('Subject', translate=True, help="Subject (placeholders may be used here)",),
'email_from': fields.char('From',
help="Sender address (placeholders may be used here). If not set, the default "
"value will be the author's email alias if configured, or email address."),
'use_default_to': fields.boolean(
'Default recipients',
help="Default recipients of the record:\n"
"- partner (using id on a partner or the partner_id field) OR\n"
"- email (using email_from or email field)"),
'email_to': fields.char('To (Emails)', help="Comma-separated recipient addresses (placeholders may be used here)"),
'partner_to': fields.char('To (Partners)',
help="Comma-separated ids of recipient partners (placeholders may be used here)",
oldname='email_recipients'),
'email_cc': fields.char('Cc', help="Carbon copy recipients (placeholders may be used here)"),
'reply_to': fields.char('Reply-To', help="Preferred response address (placeholders may be used here)"),
'mail_server_id': fields.many2one('ir.mail_server', 'Outgoing Mail Server', readonly=False,
help="Optional preferred server for outgoing mails. If not set, the highest "
"priority one will be used."),
'body_html': fields.html('Body', translate=True, sanitize=False, help="Rich-text/HTML version of the message (placeholders may be used here)"),
'report_name': fields.char('Report Filename', translate=True,
help="Name to use for the generated report file (may contain placeholders)\n"
"The extension can be omitted and will then come from the report type."),
'report_template': fields.many2one('ir.actions.report.xml', 'Optional report to print and attach'),
'ref_ir_act_window': fields.many2one('ir.actions.act_window', 'Sidebar action', readonly=True,
help="Sidebar action to make this template available on records "
"of the related document model"),
'ref_ir_value': fields.many2one('ir.values', 'Sidebar Button', readonly=True,
help="Sidebar button to open the sidebar action"),
'attachment_ids': fields.many2many('ir.attachment', 'email_template_attachment_rel', 'email_template_id',
'attachment_id', 'Attachments',
help="You may attach files to this template, to be added to all "
"emails created from this template"),
'auto_delete': fields.boolean('Auto Delete', help="Permanently delete this email after sending it, to save space"),
# Fake fields used to implement the placeholder assistant
'model_object_field': fields.many2one('ir.model.fields', string="Field",
help="Select target field from the related document model.\n"
"If it is a relationship field you will be able to select "
"a target field at the destination of the relationship."),
'sub_object': fields.many2one('ir.model', 'Sub-model', readonly=True,
help="When a relationship field is selected as first field, "
"this field shows the document model the relationship goes to."),
'sub_model_object_field': fields.many2one('ir.model.fields', 'Sub-field',
help="When a relationship field is selected as first field, "
"this field lets you select the target field within the "
"destination document model (sub-model)."),
'null_value': fields.char('Default Value', help="Optional value to use if the target field is empty"),
'copyvalue': fields.char('Placeholder Expression', help="Final placeholder expression, to be copy-pasted in the desired template field."),
}
_defaults = {
'auto_delete': True,
}
def create_action(self, cr, uid, ids, context=None):
action_obj = self.pool.get('ir.actions.act_window')
data_obj = self.pool.get('ir.model.data')
for template in self.browse(cr, uid, ids, context=context):
src_obj = template.model_id.model
model_data_id = data_obj._get_id(cr, uid, 'mail', 'email_compose_message_wizard_form')
res_id = data_obj.browse(cr, uid, model_data_id, context=context).res_id
button_name = _('Send Mail (%s)') % template.name
act_id = action_obj.create(cr, SUPERUSER_ID, {
'name': button_name,
'type': 'ir.actions.act_window',
'res_model': 'mail.compose.message',
'src_model': src_obj,
'view_type': 'form',
'context': "{'default_composition_mode': 'mass_mail', 'default_template_id' : %d, 'default_use_template': True}" % (template.id),
'view_mode':'form,tree',
'view_id': res_id,
'target': 'new',
'auto_refresh':1
}, context)
ir_values_id = self.pool.get('ir.values').create(cr, SUPERUSER_ID, {
'name': button_name,
'model': src_obj,
'key2': 'client_action_multi',
'value': "ir.actions.act_window,%s" % act_id,
'object': True,
}, context)
template.write({
'ref_ir_act_window': act_id,
'ref_ir_value': ir_values_id,
})
return True
def unlink_action(self, cr, uid, ids, context=None):
for template in self.browse(cr, uid, ids, context=context):
try:
if template.ref_ir_act_window:
self.pool.get('ir.actions.act_window').unlink(cr, SUPERUSER_ID, template.ref_ir_act_window.id, context)
if template.ref_ir_value:
ir_values_obj = self.pool.get('ir.values')
ir_values_obj.unlink(cr, SUPERUSER_ID, template.ref_ir_value.id, context)
except Exception:
raise osv.except_osv(_("Warning"), _("Deletion of the action record failed."))
return True
def unlink(self, cr, uid, ids, context=None):
self.unlink_action(cr, uid, ids, context=context)
return super(email_template, self).unlink(cr, uid, ids, context=context)
def copy(self, cr, uid, id, default=None, context=None):
template = self.browse(cr, uid, id, context=context)
if default is None:
default = {}
default = default.copy()
default.update(
name=_("%s (copy)") % (template.name),
ref_ir_act_window=False,
ref_ir_value=False)
return super(email_template, self).copy(cr, uid, id, default, context)
def build_expression(self, field_name, sub_field_name, null_value):
"""Returns a placeholder expression for use in a template field,
based on the values provided in the placeholder assistant.
:param field_name: main field name
:param sub_field_name: sub field name (M2O)
:param null_value: default value if the target value is empty
:return: final placeholder expression
"""
expression = ''
if field_name:
expression = "${object." + field_name
if sub_field_name:
expression += "." + sub_field_name
if null_value:
expression += " or '''%s'''" % null_value
expression += "}"
return expression
def onchange_sub_model_object_value_field(self, cr, uid, ids, model_object_field, sub_model_object_field=False, null_value=None, context=None):
result = {
'sub_object': False,
'copyvalue': False,
'sub_model_object_field': False,
'null_value': False
}
if model_object_field:
fields_obj = self.pool.get('ir.model.fields')
field_value = fields_obj.browse(cr, uid, model_object_field, context)
if field_value.ttype in ['many2one', 'one2many', 'many2many']:
res_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', field_value.relation)], context=context)
sub_field_value = False
if sub_model_object_field:
sub_field_value = fields_obj.browse(cr, uid, sub_model_object_field, context)
if res_ids:
result.update({
'sub_object': res_ids[0],
'copyvalue': self.build_expression(field_value.name, sub_field_value and sub_field_value.name or False, null_value or False),
'sub_model_object_field': sub_model_object_field or False,
'null_value': null_value or False
})
else:
result.update({
'copyvalue': self.build_expression(field_value.name, False, null_value or False),
'null_value': null_value or False
})
return {'value': result}
def generate_recipients_batch(self, cr, uid, results, template_id, res_ids, context=None):
"""Generates the recipients of the template. Default values can ben generated
instead of the template values if requested by template or context.
Emails (email_to, email_cc) can be transformed into partners if requested
in the context. """
if context is None:
context = {}
template = self.browse(cr, uid, template_id, context=context)
if template.use_default_to or context.get('tpl_force_default_to'):
ctx = dict(context, thread_model=template.model)
default_recipients = self.pool['mail.thread'].message_get_default_recipients(cr, uid, res_ids, context=ctx)
for res_id, recipients in default_recipients.iteritems():
results[res_id].pop('partner_to', None)
results[res_id].update(recipients)
for res_id, values in results.iteritems():
partner_ids = values.get('partner_ids', list())
if context and context.get('tpl_partners_only'):
mails = tools.email_split(values.pop('email_to', '')) + tools.email_split(values.pop('email_cc', ''))
for mail in mails:
partner_id = self.pool.get('res.partner').find_or_create(cr, uid, mail, context=context)
partner_ids.append(partner_id)
partner_to = values.pop('partner_to', '')
if partner_to:
# placeholders could generate '', 3, 2 due to some empty field values
tpl_partner_ids = [int(pid) for pid in partner_to.split(',') if pid]
partner_ids += self.pool['res.partner'].exists(cr, SUPERUSER_ID, tpl_partner_ids, context=context)
results[res_id]['partner_ids'] = partner_ids
return results
def generate_email_batch(self, cr, uid, template_id, res_ids, context=None, fields=None):
"""Generates an email from the template for given the given model based on
records given by res_ids.
:param template_id: id of the template to render.
:param res_id: id of the record to use for rendering the template (model
is taken from template definition)
:returns: a dict containing all relevant fields for creating a new
mail.mail entry, with one extra key ``attachments``, in the
format [(report_name, data)] where data is base64 encoded.
"""
if context is None:
context = {}
if fields is None:
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to']
report_xml_pool = self.pool.get('ir.actions.report.xml')
res_ids_to_templates = self.get_email_template_batch(cr, uid, template_id, res_ids, context)
# templates: res_id -> template; template -> res_ids
templates_to_res_ids = {}
for res_id, template in res_ids_to_templates.iteritems():
templates_to_res_ids.setdefault(template, []).append(res_id)
results = dict()
for template, template_res_ids in templates_to_res_ids.iteritems():
# generate fields value for all res_ids linked to the current template
for field in fields:
generated_field_values = self.render_template_batch(
cr, uid, getattr(template, field), template.model, template_res_ids,
post_process=(field == 'body_html'),
context=context)
for res_id, field_value in generated_field_values.iteritems():
results.setdefault(res_id, dict())[field] = field_value
# compute recipients
results = self.generate_recipients_batch(cr, uid, results, template.id, template_res_ids, context=context)
# update values for all res_ids
for res_id in template_res_ids:
values = results[res_id]
# body: add user signature, sanitize
if 'body_html' in fields and template.user_signature:
signature = self.pool.get('res.users').browse(cr, uid, uid, context).signature
values['body_html'] = tools.append_content_to_html(values['body_html'], signature)
if values.get('body_html'):
values['body'] = tools.html_sanitize(values['body_html'])
# technical settings
values.update(
mail_server_id=template.mail_server_id.id or False,
auto_delete=template.auto_delete,
model=template.model,
res_id=res_id or False,
attachment_ids=[attach.id for attach in template.attachment_ids],
)
# Add report in attachments: generate once for all template_res_ids
if template.report_template:
for res_id in template_res_ids:
attachments = []
report_name = self.render_template(cr, uid, template.report_name, template.model, res_id, context=context)
report = report_xml_pool.browse(cr, uid, template.report_template.id, context)
report_service = report.report_name
# Ensure report is rendered using template's language
ctx = context.copy()
if template.lang:
ctx['lang'] = self.render_template_batch(cr, uid, template.lang, template.model, [res_id], context)[res_id] # take 0 ?
if report.report_type in ['qweb-html', 'qweb-pdf']:
result, format = self.pool['report'].get_pdf(cr, uid, [res_id], report_service, context=ctx), 'pdf'
else:
result, format = openerp.report.render_report(cr, uid, [res_id], report_service, {'model': template.model}, ctx)
# TODO in trunk, change return format to binary to match message_post expected format
result = base64.b64encode(result)
if not report_name:
report_name = 'report.' + report_service
ext = "." + format
if not report_name.endswith(ext):
report_name += ext
attachments.append((report_name, result))
results[res_id]['attachments'] = attachments
return results
def send_mail(self, cr, uid, template_id, res_id, force_send=False, raise_exception=False, context=None):
"""Generates a new mail message for the given template and record,
and schedules it for delivery through the ``mail`` module's scheduler.
:param int template_id: id of the template to render
:param int res_id: id of the record to render the template with
(model is taken from the template)
:param bool force_send: if True, the generated mail.message is
immediately sent after being created, as if the scheduler
was executed for this message only.
:returns: id of the mail.message that was created
"""
if context is None:
context = {}
mail_mail = self.pool.get('mail.mail')
ir_attachment = self.pool.get('ir.attachment')
# create a mail_mail based on values, without attachments
values = self.generate_email(cr, uid, template_id, res_id, context=context)
if not values.get('email_from'):
raise osv.except_osv(_('Warning!'), _("Sender email is missing or empty after template rendering. Specify one to deliver your message"))
values['recipient_ids'] = [(4, pid) for pid in values.get('partner_ids', list())]
attachment_ids = values.pop('attachment_ids', [])
attachments = values.pop('attachments', [])
msg_id = mail_mail.create(cr, uid, values, context=context)
mail = mail_mail.browse(cr, uid, msg_id, context=context)
# manage attachments
for attachment in attachments:
attachment_data = {
'name': attachment[0],
'datas_fname': attachment[0],
'datas': attachment[1],
'res_model': 'mail.message',
'res_id': mail.mail_message_id.id,
}
context.pop('default_type', None)
attachment_ids.append(ir_attachment.create(cr, uid, attachment_data, context=context))
if attachment_ids:
values['attachment_ids'] = [(6, 0, attachment_ids)]
mail_mail.write(cr, uid, msg_id, {'attachment_ids': [(6, 0, attachment_ids)]}, context=context)
if force_send:
mail_mail.send(cr, uid, [msg_id], raise_exception=raise_exception, context=context)
return msg_id
# Compatibility method
def render_template(self, cr, uid, template, model, res_id, context=None):
return self.render_template_batch(cr, uid, template, model, [res_id], context)[res_id]
def get_email_template(self, cr, uid, template_id=False, record_id=None, context=None):
return self.get_email_template_batch(cr, uid, template_id, [record_id], context)[record_id]
def generate_email(self, cr, uid, template_id, res_id, context=None):
return self.generate_email_batch(cr, uid, template_id, [res_id], context)[res_id]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
frankosan/pypers
|
refs/heads/master
|
pypers/steps/samtools/mpileup.py
|
1
|
from pypers.core.step import CmdLineStep
class MPileup(CmdLineStep):
spec = {
"descr": [
"Generate BCF or pileup for one or multiple BAM files."
],
"version":"0.1.19",
"args":
{
"inputs": [
{
"name" : "input_bams",
"type" : "file",
"iterable" : True,
"descr" : "one or more bam files",
},
{
"name" : "ref_path",
"type" : "ref_genome",
"tool" : "bwa",
"descr" : "path to the directory containing the reference genome"
}
],
"outputs": [
{
"name" : "output_files",
"type" : "file",
"descr" : "the output mpileup file",
"value" : "{{input_bams}}.mpileup"
},
],
"params": [
{
"name" : "coefficient",
"type" : "int",
"descr" : "Mapping quality adjustment",
"value" : 50
},
{
"name" : "minq",
"type" : "int",
"descr" : "Minimum mapping quality for an alignment to be used",
"value" : 50
},
{
"name" : "minQ",
"type" : "int",
"descr" : "Minimum base quality for a base to be considered",
"value" : 35
},
{
"name" : "maxreads",
"type" : "int",
"descr" : "Maximum number of reads to be read from BAM at a position",
"value" : 2000000
},
]
},
"cmd": [
"/software/pypers/samtools/samtools-0.1.19/bin/samtools mpileup",
" -C {{coefficient}} -q {{minq}} -Q {{minQ}} -d {{maxreads}}",
" -f {{ref_path}} {{input_bams}} > {{output_files}}",
]
}
|
TribeMedia/synapse
|
refs/heads/master
|
tests/rest/client/v1/utils.py
|
2
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# twisted imports
from twisted.internet import defer
# trial imports
from tests import unittest
from synapse.api.constants import Membership
import json
import time
class RestTestCase(unittest.TestCase):
"""Contains extra helper functions to quickly and clearly perform a given
REST action, which isn't the focus of the test.
This subclass assumes there are mock_resource and auth_user_id attributes.
"""
def __init__(self, *args, **kwargs):
super(RestTestCase, self).__init__(*args, **kwargs)
self.mock_resource = None
self.auth_user_id = None
@defer.inlineCallbacks
def create_room_as(self, room_creator, is_public=True, tok=None):
temp_id = self.auth_user_id
self.auth_user_id = room_creator
path = "/createRoom"
content = "{}"
if not is_public:
content = '{"visibility":"private"}'
if tok:
path = path + "?access_token=%s" % tok
(code, response) = yield self.mock_resource.trigger("POST", path, content)
self.assertEquals(200, code, msg=str(response))
self.auth_user_id = temp_id
defer.returnValue(response["room_id"])
@defer.inlineCallbacks
def invite(self, room=None, src=None, targ=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=src, targ=targ, tok=tok,
membership=Membership.INVITE,
expect_code=expect_code)
@defer.inlineCallbacks
def join(self, room=None, user=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=user, targ=user, tok=tok,
membership=Membership.JOIN,
expect_code=expect_code)
@defer.inlineCallbacks
def leave(self, room=None, user=None, expect_code=200, tok=None):
yield self.change_membership(room=room, src=user, targ=user, tok=tok,
membership=Membership.LEAVE,
expect_code=expect_code)
@defer.inlineCallbacks
def change_membership(self, room, src, targ, membership, tok=None,
expect_code=200):
temp_id = self.auth_user_id
self.auth_user_id = src
path = "/rooms/%s/state/m.room.member/%s" % (room, targ)
if tok:
path = path + "?access_token=%s" % tok
data = {
"membership": membership
}
(code, response) = yield self.mock_resource.trigger(
"PUT", path, json.dumps(data)
)
self.assertEquals(
expect_code, code,
msg="Expected: %d, got: %d, resp: %r" % (expect_code, code, response)
)
self.auth_user_id = temp_id
@defer.inlineCallbacks
def register(self, user_id):
(code, response) = yield self.mock_resource.trigger(
"POST",
"/register",
json.dumps({
"user": user_id,
"password": "test",
"type": "m.login.password"
}))
self.assertEquals(200, code)
defer.returnValue(response)
@defer.inlineCallbacks
def send(self, room_id, body=None, txn_id=None, tok=None,
expect_code=200):
if txn_id is None:
txn_id = "m%s" % (str(time.time()))
if body is None:
body = "body_text_here"
path = "/rooms/%s/send/m.room.message/%s" % (room_id, txn_id)
content = '{"msgtype":"m.text","body":"%s"}' % body
if tok:
path = path + "?access_token=%s" % tok
(code, response) = yield self.mock_resource.trigger("PUT", path, content)
self.assertEquals(expect_code, code, msg=str(response))
def assert_dict(self, required, actual):
"""Does a partial assert of a dict.
Args:
required (dict): The keys and value which MUST be in 'actual'.
actual (dict): The test result. Extra keys will not be checked.
"""
for key in required:
self.assertEquals(required[key], actual[key],
msg="%s mismatch. %s" % (key, actual))
|
codeinthehole/purl
|
refs/heads/master
|
purl/__init__.py
|
1
|
from .url import URL # noqa
from .template import expand, Template # noqa
__version__ = '1.6'
__all__ = ['URL', 'expand', 'Template']
|
jimberlage/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/tools/third_party/certifi/certifi/__init__.py
|
59
|
from .core import where, old_where
__version__ = "2018.04.16"
|
openwebinars-django/newspaper
|
refs/heads/master
|
newspaper/newspaper/news/management/commands/count_news.py
|
3
|
from django.core.management.base import BaseCommand
from newspaper.news.models import News
class Command(BaseCommand):
help = 'Count news.'
def handle(self, *args, **options):
print(News.objects.count())
|
NetDBNCKU/GAE-Conference-Web-App
|
refs/heads/master
|
django/contrib/localflavor/ch/__init__.py
|
12133432
| |
shitolepriya/Saloon_erp
|
refs/heads/master
|
erpnext/hub_node/page/hub/__init__.py
|
12133432
| |
jimmymunoz/jeuxdemots
|
refs/heads/master
|
public/package/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja_test.py
|
1843
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import gyp.generator.ninja as ninja
import unittest
import StringIO
import sys
import TestCommon
class TestPrefixesAndSuffixes(unittest.TestCase):
def test_BinaryNamesWindows(self):
# These cannot run on non-Windows as they require a VS installation to
# correctly handle variable expansion.
if sys.platform.startswith('win'):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'win')
spec = { 'target_name': 'wee' }
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
endswith('.exe'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.dll'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.lib'))
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'linux')
spec = { 'target_name': 'wee' }
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
'executable'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.so'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.a'))
if __name__ == '__main__':
unittest.main()
|
p0psicles/SickRage
|
refs/heads/master
|
lib/bs4/tests/__init__.py
|
668
|
"The beautifulsoup tests."
|
andresriancho/PyGithub
|
refs/heads/master
|
github/tests/Enterprise.py
|
2
|
# -*- coding: utf-8 -*-
# Copyright 2012 Vincent Jacques
# vincent@vincent-jacques.net
# This file is part of PyGithub. http://vincent-jacques.net/PyGithub
# PyGithub is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License along with PyGithub. If not, see <http://www.gnu.org/licenses/>.
import github
import Framework
# Replay data for this test case is forged, because I don't have access to a real Github Enterprise install
class Enterprise(Framework.BasicTestCase):
def testHttps(self):
g = github.Github(self.login, self.password, base_url="https://my.enterprise.com")
self.assertListKeyEqual(g.get_user().get_repos(), lambda r: r.name, ["TestPyGithub", "django", "PyGithub", "developer.github.com", "acme-public-website", "C4Planner", "Hacking", "vincent-jacques.net", "Contests", "Candidates", "Tests", "DrawTurksHead", "DrawSyntax", "QuadProgMm", "Boost.HierarchicalEnum", "ViDE"])
def testHttp(self):
g = github.Github(self.login, self.password, base_url="http://my.enterprise.com")
self.assertListKeyEqual(g.get_user().get_repos(), lambda r: r.name, ["TestPyGithub", "django", "PyGithub", "developer.github.com", "acme-public-website", "C4Planner", "Hacking", "vincent-jacques.net", "Contests", "Candidates", "Tests", "DrawTurksHead", "DrawSyntax", "QuadProgMm", "Boost.HierarchicalEnum", "ViDE"])
def testLongUrl(self):
g = github.Github(self.login, self.password, base_url="http://my.enterprise.com/path/to/github")
repos = g.get_user().get_repos()
self.assertListKeyEqual(repos, lambda r: r.name, ["TestPyGithub", "django", "PyGithub", "developer.github.com", "acme-public-website", "C4Planner", "Hacking", "vincent-jacques.net", "Contests", "Candidates", "Tests", "DrawTurksHead", "DrawSyntax", "QuadProgMm", "Boost.HierarchicalEnum", "ViDE"])
self.assertEqual(repos[0].owner.name, "Vincent Jacques")
def testSpecificPort(self):
g = github.Github(self.login, self.password, base_url="http://my.enterprise.com:8080")
self.assertListKeyEqual(g.get_user().get_repos(), lambda r: r.name, ["TestPyGithub", "django", "PyGithub", "developer.github.com", "acme-public-website", "C4Planner", "Hacking", "vincent-jacques.net", "Contests", "Candidates", "Tests", "DrawTurksHead", "DrawSyntax", "QuadProgMm", "Boost.HierarchicalEnum", "ViDE"])
|
MasonMcGill/fauxton
|
refs/heads/master
|
setup.py
|
1
|
from distutils.core import setup
setup(name='fauxton',
packages=['fauxton'],
version='0.1.2',
description=('Computer graphics technology for computer vision'
' applications.'),
author='Mason McGill',
author_email='mmcgill@caltech.edu',
url='https://github.com/MasonMcGill/fauxton',
download_url='https://github.com/MasonMcGill/fauxton/tarball/0.1',
keywords=['vision', 'graphics', 'blender'],
install_requires=['numpy'],
classifiers=[])
|
IronLanguages/ironpython3
|
refs/heads/master
|
Src/StdLib/Lib/email/_parseaddr.py
|
125
|
# Copyright (C) 2002-2007 Python Software Foundation
# Contact: email-sig@python.org
"""Email address parsing code.
Lifted directly from rfc822.py. This should eventually be rewritten.
"""
__all__ = [
'mktime_tz',
'parsedate',
'parsedate_tz',
'quote',
]
import time, calendar
SPACE = ' '
EMPTYSTRING = ''
COMMASPACE = ', '
# Parse a date field
_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec',
'january', 'february', 'march', 'april', 'may', 'june', 'july',
'august', 'september', 'october', 'november', 'december']
_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The timezone table does not include the military time zones defined
# in RFC822, other than Z. According to RFC1123, the description in
# RFC822 gets the signs wrong, so we can't rely on any such time
# zones. RFC1123 recommends that numeric timezone indicators be used
# instead of timezone names.
_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
'AST': -400, 'ADT': -300, # Atlantic (used in Canada)
'EST': -500, 'EDT': -400, # Eastern
'CST': -600, 'CDT': -500, # Central
'MST': -700, 'MDT': -600, # Mountain
'PST': -800, 'PDT': -700 # Pacific
}
def parsedate_tz(data):
"""Convert a date string to a time tuple.
Accounts for military timezones.
"""
res = _parsedate_tz(data)
if not res:
return
if res[9] is None:
res[9] = 0
return tuple(res)
def _parsedate_tz(data):
"""Convert date to extended time tuple.
The last (additional) element is the time zone offset in seconds, except if
the timezone was specified as -0000. In that case the last element is
None. This indicates a UTC timestamp that explicitly declaims knowledge of
the source timezone, as opposed to a +0000 timestamp that indicates the
source timezone really was UTC.
"""
if not data:
return
data = data.split()
# The FWS after the comma after the day-of-week is optional, so search and
# adjust for this.
if data[0].endswith(',') or data[0].lower() in _daynames:
# There's a dayname here. Skip it
del data[0]
else:
i = data[0].rfind(',')
if i >= 0:
data[0] = data[0][i+1:]
if len(data) == 3: # RFC 850 date, deprecated
stuff = data[0].split('-')
if len(stuff) == 3:
data = stuff + data[1:]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i == -1:
i = s.find('-')
if i > 0:
data[3:] = [s[:i], s[i:]]
else:
data.append('') # Dummy tz
if len(data) < 5:
return None
data = data[:5]
[dd, mm, yy, tm, tz] = data
mm = mm.lower()
if mm not in _monthnames:
dd, mm = mm, dd.lower()
if mm not in _monthnames:
return None
mm = _monthnames.index(mm) + 1
if mm > 12:
mm -= 12
if dd[-1] == ',':
dd = dd[:-1]
i = yy.find(':')
if i > 0:
yy, tm = tm, yy
if yy[-1] == ',':
yy = yy[:-1]
if not yy[0].isdigit():
yy, tz = tz, yy
if tm[-1] == ',':
tm = tm[:-1]
tm = tm.split(':')
if len(tm) == 2:
[thh, tmm] = tm
tss = '0'
elif len(tm) == 3:
[thh, tmm, tss] = tm
elif len(tm) == 1 and '.' in tm[0]:
# Some non-compliant MUAs use '.' to separate time elements.
tm = tm[0].split('.')
if len(tm) == 2:
[thh, tmm] = tm
tss = 0
elif len(tm) == 3:
[thh, tmm, tss] = tm
else:
return None
try:
yy = int(yy)
dd = int(dd)
thh = int(thh)
tmm = int(tmm)
tss = int(tss)
except ValueError:
return None
# Check for a yy specified in two-digit format, then convert it to the
# appropriate four-digit format, according to the POSIX standard. RFC 822
# calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
# mandates a 4-digit yy. For more information, see the documentation for
# the time module.
if yy < 100:
# The year is between 1969 and 1999 (inclusive).
if yy > 68:
yy += 1900
# The year is between 2000 and 2068 (inclusive).
else:
yy += 2000
tzoffset = None
tz = tz.upper()
if tz in _timezones:
tzoffset = _timezones[tz]
else:
try:
tzoffset = int(tz)
except ValueError:
pass
if tzoffset==0 and tz.startswith('-'):
tzoffset = None
# Convert a timezone offset into seconds ; -0500 -> -18000
if tzoffset:
if tzoffset < 0:
tzsign = -1
tzoffset = -tzoffset
else:
tzsign = 1
tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
# Daylight Saving Time flag is set to -1, since DST is unknown.
return [yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset]
def parsedate(data):
"""Convert a time string to a time tuple."""
t = parsedate_tz(data)
if isinstance(t, tuple):
return t[:9]
else:
return t
def mktime_tz(data):
"""Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp."""
if data[9] is None:
# No zone info, so localtime is better assumption than GMT
return time.mktime(data[:8] + (-1,))
else:
t = calendar.timegm(data)
return t - data[9]
def quote(str):
"""Prepare string to be used in a quoted string.
Turns backslash and double quote characters into quoted pairs. These
are the only characters that need to be quoted inside a quoted string.
Does not add the surrounding double quotes.
"""
return str.replace('\\', '\\\\').replace('"', '\\"')
class AddrlistClass:
"""Address parser class by Ben Escoto.
To understand what this class does, it helps to have a copy of RFC 2822 in
front of you.
Note: this class interface is deprecated and may be removed in the future.
Use email.utils.AddressList instead.
"""
def __init__(self, field):
"""Initialize a new instance.
`field' is an unparsed address header field, containing
one or more addresses.
"""
self.specials = '()<>@,:;.\"[]'
self.pos = 0
self.LWS = ' \t'
self.CR = '\r\n'
self.FWS = self.LWS + self.CR
self.atomends = self.specials + self.LWS + self.CR
# Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
# is obsolete syntax. RFC 2822 requires that we recognize obsolete
# syntax, so allow dots in phrases.
self.phraseends = self.atomends.replace('.', '')
self.field = field
self.commentlist = []
def gotonext(self):
"""Skip white space and extract comments."""
wslist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS + '\n\r':
if self.field[self.pos] not in '\n\r':
wslist.append(self.field[self.pos])
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
else:
break
return EMPTYSTRING.join(wslist)
def getaddrlist(self):
"""Parse all addresses.
Returns a list containing all of the addresses.
"""
result = []
while self.pos < len(self.field):
ad = self.getaddress()
if ad:
result += ad
else:
result.append(('', ''))
return result
def getaddress(self):
"""Parse the next address."""
self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if self.pos >= len(self.field):
# Bad email address technically, no domain.
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in '.@':
# email address is just an addrspec
# this isn't very efficient since we start over
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(SPACE.join(self.commentlist), addrspec)]
elif self.field[self.pos] == ':':
# address is a group
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while self.pos < len(self.field):
self.gotonext()
if self.pos < fieldlen and self.field[self.pos] == ';':
self.pos += 1
break
returnlist = returnlist + self.getaddress()
elif self.field[self.pos] == '<':
# Address is a phrase then a route addr
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [(SPACE.join(plist) + ' (' +
' '.join(self.commentlist) + ')', routeaddr)]
else:
returnlist = [(SPACE.join(plist), routeaddr)]
else:
if plist:
returnlist = [(SPACE.join(self.commentlist), plist[0])]
elif self.field[self.pos] in self.specials:
self.pos += 1
self.gotonext()
if self.pos < len(self.field) and self.field[self.pos] == ',':
self.pos += 1
return returnlist
def getrouteaddr(self):
"""Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.
"""
if self.field[self.pos] != '<':
return
expectroute = False
self.pos += 1
self.gotonext()
adlist = ''
while self.pos < len(self.field):
if expectroute:
self.getdomain()
expectroute = False
elif self.field[self.pos] == '>':
self.pos += 1
break
elif self.field[self.pos] == '@':
self.pos += 1
expectroute = True
elif self.field[self.pos] == ':':
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
def getaddrspec(self):
"""Parse an RFC 2822 addr-spec."""
aslist = []
self.gotonext()
while self.pos < len(self.field):
preserve_ws = True
if self.field[self.pos] == '.':
if aslist and not aslist[-1].strip():
aslist.pop()
aslist.append('.')
self.pos += 1
preserve_ws = False
elif self.field[self.pos] == '"':
aslist.append('"%s"' % quote(self.getquote()))
elif self.field[self.pos] in self.atomends:
if aslist and not aslist[-1].strip():
aslist.pop()
break
else:
aslist.append(self.getatom())
ws = self.gotonext()
if preserve_ws and ws:
aslist.append(ws)
if self.pos >= len(self.field) or self.field[self.pos] != '@':
return EMPTYSTRING.join(aslist)
aslist.append('@')
self.pos += 1
self.gotonext()
return EMPTYSTRING.join(aslist) + self.getdomain()
def getdomain(self):
"""Get the complete domain name from an address."""
sdlist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.LWS:
self.pos += 1
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] == '[':
sdlist.append(self.getdomainliteral())
elif self.field[self.pos] == '.':
self.pos += 1
sdlist.append('.')
elif self.field[self.pos] in self.atomends:
break
else:
sdlist.append(self.getatom())
return EMPTYSTRING.join(sdlist)
def getdelimited(self, beginchar, endchars, allowcomments=True):
"""Parse a header fragment delimited by special characters.
`beginchar' is the start character for the fragment.
If self is not looking at an instance of `beginchar' then
getdelimited returns the empty string.
`endchars' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.
"""
if self.field[self.pos] != beginchar:
return ''
slist = ['']
quote = False
self.pos += 1
while self.pos < len(self.field):
if quote:
slist.append(self.field[self.pos])
quote = False
elif self.field[self.pos] in endchars:
self.pos += 1
break
elif allowcomments and self.field[self.pos] == '(':
slist.append(self.getcomment())
continue # have already advanced pos from getcomment
elif self.field[self.pos] == '\\':
quote = True
else:
slist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(slist)
def getquote(self):
"""Get a quote-delimited fragment from self's field."""
return self.getdelimited('"', '"\r', False)
def getcomment(self):
"""Get a parenthesis-delimited fragment from self's field."""
return self.getdelimited('(', ')\r', True)
def getdomainliteral(self):
"""Parse an RFC 2822 domain-literal."""
return '[%s]' % self.getdelimited('[', ']\r', False)
def getatom(self, atomends=None):
"""Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.' (which
is legal in phrases)."""
atomlist = ['']
if atomends is None:
atomends = self.atomends
while self.pos < len(self.field):
if self.field[self.pos] in atomends:
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return EMPTYSTRING.join(atomlist)
def getphraselist(self):
"""Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.
"""
plist = []
while self.pos < len(self.field):
if self.field[self.pos] in self.FWS:
self.pos += 1
elif self.field[self.pos] == '"':
plist.append(self.getquote())
elif self.field[self.pos] == '(':
self.commentlist.append(self.getcomment())
elif self.field[self.pos] in self.phraseends:
break
else:
plist.append(self.getatom(self.phraseends))
return plist
class AddressList(AddrlistClass):
"""An AddressList encapsulates a list of parsed RFC 2822 addresses."""
def __init__(self, field):
AddrlistClass.__init__(self, field)
if field:
self.addresslist = self.getaddrlist()
else:
self.addresslist = []
def __len__(self):
return len(self.addresslist)
def __add__(self, other):
# Set union
newaddr = AddressList(None)
newaddr.addresslist = self.addresslist[:]
for x in other.addresslist:
if not x in self.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __iadd__(self, other):
# Set union, in-place
for x in other.addresslist:
if not x in self.addresslist:
self.addresslist.append(x)
return self
def __sub__(self, other):
# Set difference
newaddr = AddressList(None)
for x in self.addresslist:
if not x in other.addresslist:
newaddr.addresslist.append(x)
return newaddr
def __isub__(self, other):
# Set difference, in-place
for x in other.addresslist:
if x in self.addresslist:
self.addresslist.remove(x)
return self
def __getitem__(self, index):
# Make indexing, slices, and 'in' work
return self.addresslist[index]
|
pmghalvorsen/gramps_branch
|
refs/heads/master
|
gramps/gen/filters/rules/media/_hassourcecount.py
|
2
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Jerome Rapinat
# Copyright (C) 2008 Benny Malengier
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# GRAMPS modules
#
#-------------------------------------------------------------------------
from .._hassourcecountbase import HasSourceCountBase
#-------------------------------------------------------------------------
# "People having sources"
#-------------------------------------------------------------------------
class HasSourceCount(HasSourceCountBase):
"""Media with sources"""
name = _('Media with <count> sources')
description = _("Matches media with a certain number of sources connected to it")
|
deepsrijit1105/edx-platform
|
refs/heads/master
|
common/lib/chem/chem/chemtools.py
|
250
|
"""This module originally includes functions for grading Vsepr problems.
Also, may be this module is the place for other chemistry-related grade functions. TODO: discuss it.
"""
import json
import unittest
import itertools
def vsepr_parse_user_answer(user_input):
"""
user_input is json generated by vsepr.js from dictionary.
There are must be only two keys in original user_input dictionary: "geometry" and "atoms".
Format: u'{"geometry": "AX3E0","atoms":{"c0": "B","p0": "F","p1": "B","p2": "F"}}'
Order of elements inside "atoms" subdict does not matters.
Return dict from parsed json.
"Atoms" subdict stores positions of atoms in molecule.
General types of positions:
c0 - central atom
p0..pN - peripheral atoms
a0..aN - axial atoms
e0..eN - equatorial atoms
Each position is dictionary key, i.e. user_input["atoms"]["c0"] is central atom, user_input["atoms"]["a0"] is one of axial atoms.
Special position only for AX6 (Octahedral) geometry:
e10, e12 - atom pairs opposite the central atom,
e20, e22 - atom pairs opposite the central atom,
e1 and e2 pairs lying crosswise in equatorial plane.
In user_input["atoms"] may be only 3 set of keys:
(c0,p0..pN),
(c0, a0..aN, e0..eN),
(c0, a0, a1, e10,e11,e20,e21) - if geometry is AX6.
"""
return json.loads(user_input)
def vsepr_build_correct_answer(geometry, atoms):
"""
geometry is string.
atoms is dict of atoms with proper positions.
Example:
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
returns a dictionary composed from input values:
{'geometry': geometry, 'atoms': atoms}
"""
return {'geometry': geometry, 'atoms': atoms}
def vsepr_grade(user_input, correct_answer, convert_to_peripheral=False):
"""
This function does comparison between user_input and correct_answer.
Comparison is successful if all steps are successful:
1) geometries are equal
2) central atoms (index in dictionary 'c0') are equal
3):
In next steps there is comparing of corresponding subsets of atom positions: equatorial (e0..eN), axial (a0..aN) or peripheral (p0..pN)
If convert_to_peripheral is True, then axial and equatorial positions are converted to peripheral.
This means that user_input from:
"atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}' after parsing to json
is converted to:
{"c0": "Br", "p0": "(ep)", "p1": "test", "p2": "H", "p3": "H", "p4": "(ep)", "p6": "(ep)"}
i.e. aX and eX -> pX
So if converted, p subsets are compared,
if not a and e subsets are compared
If all subsets are equal, grade succeeds.
There is also one special case for AX6 geometry.
In this case user_input["atoms"] contains special 3 symbol keys: e10, e12, e20, and e21.
Correct answer for this geometry can be of 3 types:
1) c0 and peripheral
2) c0 and axial and equatorial
3) c0 and axial and equatorial-subset-1 (e1X) and equatorial-subset-2 (e2X)
If correct answer is type 1 or 2, then user_input is converted from type 3 to type 2 (or to type 1 if convert_to_peripheral is True)
If correct_answer is type 3, then we done special case comparison. We have 3 sets of atoms positions both in user_input and correct_answer: axial, eq-1 and eq-2.
Answer will be correct if these sets are equals for one of permutations. For example, if :
user_axial = correct_eq-1
user_eq-1 = correct-axial
user_eq-2 = correct-eq-2
"""
if user_input['geometry'] != correct_answer['geometry']:
return False
if user_input['atoms']['c0'] != correct_answer['atoms']['c0']:
return False
if convert_to_peripheral:
# convert user_input from (a,e,e1,e2) to (p)
# correct_answer must be set in (p) using this flag
c0 = user_input['atoms'].pop('c0')
user_input['atoms'] = {'p' + str(i): v for i, v in enumerate(user_input['atoms'].values())}
user_input['atoms']['c0'] = c0
# special case for AX6
if 'e10' in correct_answer['atoms']: # need check e1x, e2x symmetry for AX6..
a_user = {}
a_correct = {}
for ea_position in ['a', 'e1', 'e2']: # collecting positions:
a_user[ea_position] = [v for k, v in user_input['atoms'].items() if k.startswith(ea_position)]
a_correct[ea_position] = [v for k, v in correct_answer['atoms'].items() if k.startswith(ea_position)]
correct = [sorted(a_correct['a'])] + [sorted(a_correct['e1'])] + [sorted(a_correct['e2'])]
for permutation in itertools.permutations(['a', 'e1', 'e2']):
if correct == [sorted(a_user[permutation[0]])] + [sorted(a_user[permutation[1]])] + [sorted(a_user[permutation[2]])]:
return True
return False
else: # no need to check e1x,e2x symmetry - convert them to ex
if 'e10' in user_input['atoms']: # e1x exists, it is AX6.. case
e_index = 0
for k, v in user_input['atoms'].items():
if len(k) == 3: # e1x
del user_input['atoms'][k]
user_input['atoms']['e' + str(e_index)] = v
e_index += 1
# common case
for ea_position in ['p', 'a', 'e']:
# collecting atoms:
a_user = [v for k, v in user_input['atoms'].items() if k.startswith(ea_position)]
a_correct = [v for k, v in correct_answer['atoms'].items() if k.startswith(ea_position)]
# print a_user, a_correct
if len(a_user) != len(a_correct):
return False
if sorted(a_user) != sorted(a_correct):
return False
return True
class Test_Grade(unittest.TestCase):
''' test grade function '''
def test_incorrect_geometry(self):
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX3E0","atoms":{"c0": "B","p0": "F","p1": "B","p2": "F"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX4E0","atoms":{"c0": "N","p0": "H","p1": "(ep)","p2": "H", "p3": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_ae(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "test", "a1": "(ep)", "e0": "H", "e1": "H", "e2": "(ep)", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "H","e20": "(ep)","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_ae_convert_to_p_but_input_not_in_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "test", "e0": "H", "e1": "H", "e2": "(ep)", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer, convert_to_peripheral=True))
def test_correct_answer_ae_convert_to_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "p0": "(ep)", "p1": "test", "p2": "H", "p3": "H", "p4": "(ep)", "p6": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer, convert_to_peripheral=True))
def test_correct_answer_e1e2_in_a(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "(ep)","a1": "(ep)","e10": "H","e11": "H","e20": "H","e21": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_e1e2_in_e1(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "(ep)","e11": "(ep)","e20": "H","e21": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_e1e2_in_e2(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "H","e11": "H","e20": "(ep)","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_incorrect_answer_e1e2(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "(ep)","e11": "H","e20": "H","e21": "(ep)"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def test_incorrect_c0(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "test", "e0": "H", "e1": "H", "e2": "H", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "H","a0": "test","a1": "(ep)","e0": "H","e1": "H","e2": "(ep)","e3": "H"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def suite():
testcases = [Test_Grade]
suites = []
for testcase in testcases:
suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase))
return unittest.TestSuite(suites)
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
|
kmatzen/ansible
|
refs/heads/devel
|
lib/ansible/plugins/callback/oneline.py
|
30
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.callback import CallbackBase
from ansible import constants as C
class CallbackModule(CallbackBase):
'''
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'oneline'
def _command_generic_msg(self, hostname, result, caption):
stdout = result.get('stdout','').replace('\n', '\\n')
if 'stderr' in result and result['stderr']:
stderr = result.get('stderr','').replace('\n', '\\n')
return "%s | %s | rc=%s | (stdout) %s (stderr) %s" % (hostname, caption, result.get('rc',0), stdout, stderr)
else:
return "%s | %s | rc=%s | (stdout) %s" % (hostname, caption, result.get('rc',0), stdout)
def v2_runner_on_failed(self, result, ignore_errors=False):
if 'exception' in result._result:
if self._display.verbosity < 3:
# extract just the actual error message from the exception text
error = result._result['exception'].strip().split('\n')[-1]
msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error
else:
msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'].replace('\n','')
if result._task.action in C.MODULE_NO_JSON:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'FAILED'), color=C.COLOR_ERROR)
else:
self._display.display(msg, color=C.COLOR_ERROR)
# finally, remove the exception from the result so it's not shown every time
del result._result['exception']
self._display.display("%s | FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_ERROR)
def v2_runner_on_ok(self, result):
if result._task.action in C.MODULE_NO_JSON:
self._display.display(self._command_generic_msg(result._host.get_name(), result._result,'SUCCESS'), color=C.COLOR_OK)
else:
self._display.display("%s | SUCCESS => %s" % (result._host.get_name(), self._dump_results(result._result, indent=0).replace('\n','')), color=C.COLOR_OK)
def v2_runner_on_unreachable(self, result):
self._display.display("%s | UNREACHABLE!" % result._host.get_name(), color=C.COLOR_UNREACHABLE)
def v2_runner_on_skipped(self, result):
self._display.display("%s | SKIPPED" % (result._host.get_name()), color=C.COLOR_SKIP)
|
caphrim007/ansible
|
refs/heads/devel
|
test/units/module_utils/facts/system/__init__.py
|
12133432
| |
iandees/postboxes
|
refs/heads/master
|
usps/__init__.py
|
12133432
| |
MikeAmy/django
|
refs/heads/master
|
tests/field_deconstruction/__init__.py
|
12133432
| |
susansalkeld/discsongs
|
refs/heads/master
|
discsongs/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/__init__.py
|
12133432
| |
khushboo9293/postorius
|
refs/heads/master
|
src/postorius/templatetags/__init__.py
|
12133432
| |
CSSIP-AIR/UMETRICS
|
refs/heads/master
|
ThirdParty/unidecode/x0cb.py
|
253
|
data = (
'jjwaels', # 0x00
'jjwaelt', # 0x01
'jjwaelp', # 0x02
'jjwaelh', # 0x03
'jjwaem', # 0x04
'jjwaeb', # 0x05
'jjwaebs', # 0x06
'jjwaes', # 0x07
'jjwaess', # 0x08
'jjwaeng', # 0x09
'jjwaej', # 0x0a
'jjwaec', # 0x0b
'jjwaek', # 0x0c
'jjwaet', # 0x0d
'jjwaep', # 0x0e
'jjwaeh', # 0x0f
'jjoe', # 0x10
'jjoeg', # 0x11
'jjoegg', # 0x12
'jjoegs', # 0x13
'jjoen', # 0x14
'jjoenj', # 0x15
'jjoenh', # 0x16
'jjoed', # 0x17
'jjoel', # 0x18
'jjoelg', # 0x19
'jjoelm', # 0x1a
'jjoelb', # 0x1b
'jjoels', # 0x1c
'jjoelt', # 0x1d
'jjoelp', # 0x1e
'jjoelh', # 0x1f
'jjoem', # 0x20
'jjoeb', # 0x21
'jjoebs', # 0x22
'jjoes', # 0x23
'jjoess', # 0x24
'jjoeng', # 0x25
'jjoej', # 0x26
'jjoec', # 0x27
'jjoek', # 0x28
'jjoet', # 0x29
'jjoep', # 0x2a
'jjoeh', # 0x2b
'jjyo', # 0x2c
'jjyog', # 0x2d
'jjyogg', # 0x2e
'jjyogs', # 0x2f
'jjyon', # 0x30
'jjyonj', # 0x31
'jjyonh', # 0x32
'jjyod', # 0x33
'jjyol', # 0x34
'jjyolg', # 0x35
'jjyolm', # 0x36
'jjyolb', # 0x37
'jjyols', # 0x38
'jjyolt', # 0x39
'jjyolp', # 0x3a
'jjyolh', # 0x3b
'jjyom', # 0x3c
'jjyob', # 0x3d
'jjyobs', # 0x3e
'jjyos', # 0x3f
'jjyoss', # 0x40
'jjyong', # 0x41
'jjyoj', # 0x42
'jjyoc', # 0x43
'jjyok', # 0x44
'jjyot', # 0x45
'jjyop', # 0x46
'jjyoh', # 0x47
'jju', # 0x48
'jjug', # 0x49
'jjugg', # 0x4a
'jjugs', # 0x4b
'jjun', # 0x4c
'jjunj', # 0x4d
'jjunh', # 0x4e
'jjud', # 0x4f
'jjul', # 0x50
'jjulg', # 0x51
'jjulm', # 0x52
'jjulb', # 0x53
'jjuls', # 0x54
'jjult', # 0x55
'jjulp', # 0x56
'jjulh', # 0x57
'jjum', # 0x58
'jjub', # 0x59
'jjubs', # 0x5a
'jjus', # 0x5b
'jjuss', # 0x5c
'jjung', # 0x5d
'jjuj', # 0x5e
'jjuc', # 0x5f
'jjuk', # 0x60
'jjut', # 0x61
'jjup', # 0x62
'jjuh', # 0x63
'jjweo', # 0x64
'jjweog', # 0x65
'jjweogg', # 0x66
'jjweogs', # 0x67
'jjweon', # 0x68
'jjweonj', # 0x69
'jjweonh', # 0x6a
'jjweod', # 0x6b
'jjweol', # 0x6c
'jjweolg', # 0x6d
'jjweolm', # 0x6e
'jjweolb', # 0x6f
'jjweols', # 0x70
'jjweolt', # 0x71
'jjweolp', # 0x72
'jjweolh', # 0x73
'jjweom', # 0x74
'jjweob', # 0x75
'jjweobs', # 0x76
'jjweos', # 0x77
'jjweoss', # 0x78
'jjweong', # 0x79
'jjweoj', # 0x7a
'jjweoc', # 0x7b
'jjweok', # 0x7c
'jjweot', # 0x7d
'jjweop', # 0x7e
'jjweoh', # 0x7f
'jjwe', # 0x80
'jjweg', # 0x81
'jjwegg', # 0x82
'jjwegs', # 0x83
'jjwen', # 0x84
'jjwenj', # 0x85
'jjwenh', # 0x86
'jjwed', # 0x87
'jjwel', # 0x88
'jjwelg', # 0x89
'jjwelm', # 0x8a
'jjwelb', # 0x8b
'jjwels', # 0x8c
'jjwelt', # 0x8d
'jjwelp', # 0x8e
'jjwelh', # 0x8f
'jjwem', # 0x90
'jjweb', # 0x91
'jjwebs', # 0x92
'jjwes', # 0x93
'jjwess', # 0x94
'jjweng', # 0x95
'jjwej', # 0x96
'jjwec', # 0x97
'jjwek', # 0x98
'jjwet', # 0x99
'jjwep', # 0x9a
'jjweh', # 0x9b
'jjwi', # 0x9c
'jjwig', # 0x9d
'jjwigg', # 0x9e
'jjwigs', # 0x9f
'jjwin', # 0xa0
'jjwinj', # 0xa1
'jjwinh', # 0xa2
'jjwid', # 0xa3
'jjwil', # 0xa4
'jjwilg', # 0xa5
'jjwilm', # 0xa6
'jjwilb', # 0xa7
'jjwils', # 0xa8
'jjwilt', # 0xa9
'jjwilp', # 0xaa
'jjwilh', # 0xab
'jjwim', # 0xac
'jjwib', # 0xad
'jjwibs', # 0xae
'jjwis', # 0xaf
'jjwiss', # 0xb0
'jjwing', # 0xb1
'jjwij', # 0xb2
'jjwic', # 0xb3
'jjwik', # 0xb4
'jjwit', # 0xb5
'jjwip', # 0xb6
'jjwih', # 0xb7
'jjyu', # 0xb8
'jjyug', # 0xb9
'jjyugg', # 0xba
'jjyugs', # 0xbb
'jjyun', # 0xbc
'jjyunj', # 0xbd
'jjyunh', # 0xbe
'jjyud', # 0xbf
'jjyul', # 0xc0
'jjyulg', # 0xc1
'jjyulm', # 0xc2
'jjyulb', # 0xc3
'jjyuls', # 0xc4
'jjyult', # 0xc5
'jjyulp', # 0xc6
'jjyulh', # 0xc7
'jjyum', # 0xc8
'jjyub', # 0xc9
'jjyubs', # 0xca
'jjyus', # 0xcb
'jjyuss', # 0xcc
'jjyung', # 0xcd
'jjyuj', # 0xce
'jjyuc', # 0xcf
'jjyuk', # 0xd0
'jjyut', # 0xd1
'jjyup', # 0xd2
'jjyuh', # 0xd3
'jjeu', # 0xd4
'jjeug', # 0xd5
'jjeugg', # 0xd6
'jjeugs', # 0xd7
'jjeun', # 0xd8
'jjeunj', # 0xd9
'jjeunh', # 0xda
'jjeud', # 0xdb
'jjeul', # 0xdc
'jjeulg', # 0xdd
'jjeulm', # 0xde
'jjeulb', # 0xdf
'jjeuls', # 0xe0
'jjeult', # 0xe1
'jjeulp', # 0xe2
'jjeulh', # 0xe3
'jjeum', # 0xe4
'jjeub', # 0xe5
'jjeubs', # 0xe6
'jjeus', # 0xe7
'jjeuss', # 0xe8
'jjeung', # 0xe9
'jjeuj', # 0xea
'jjeuc', # 0xeb
'jjeuk', # 0xec
'jjeut', # 0xed
'jjeup', # 0xee
'jjeuh', # 0xef
'jjyi', # 0xf0
'jjyig', # 0xf1
'jjyigg', # 0xf2
'jjyigs', # 0xf3
'jjyin', # 0xf4
'jjyinj', # 0xf5
'jjyinh', # 0xf6
'jjyid', # 0xf7
'jjyil', # 0xf8
'jjyilg', # 0xf9
'jjyilm', # 0xfa
'jjyilb', # 0xfb
'jjyils', # 0xfc
'jjyilt', # 0xfd
'jjyilp', # 0xfe
'jjyilh', # 0xff
)
|
RockySteveJobs/python-for-android
|
refs/heads/master
|
python-build/python-libs/gdata/samples/oauth/oauth_on_appengine/appengine_utilities/event.py
|
138
|
"""
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import __main__
class Event(object):
"""
Event is a simple publish/subscribe based event dispatcher
It sets itself to the __main__ function. In order to use it,
you must import it and __main__
"""
def __init__(self):
self.events = []
def subscribe(self, event, callback, args = None):
"""
This method will subscribe a callback function to an event name.
"""
if not {"event": event, "callback": callback, "args": args, } \
in self.events:
self.events.append({"event": event, "callback": callback, \
"args": args, })
def unsubscribe(self, event, callback, args = None):
"""
This method will unsubscribe a callback from an event.
"""
if {"event": event, "callback": callback, "args": args, }\
in self.events:
self.events.remove({"event": event, "callback": callback,\
"args": args, })
def fire_event(self, event = None):
"""
This method is what a method uses to fire an event,
initiating all registered callbacks
"""
for e in self.events:
if e["event"] == event:
if type(e["args"]) == type([]):
e["callback"](*e["args"])
elif type(e["args"]) == type({}):
e["callback"](**e["args"])
elif e["args"] == None:
e["callback"]()
else:
e["callback"](e["args"])
"""
Assign to the event class to __main__
"""
__main__.AEU_Events = Event()
|
gwpy/gwpy.github.io
|
refs/heads/master
|
docs/1.0.0/examples/timeseries/pycbc-snr-5.py
|
7
|
from pycbc.filter import matched_filter
snr = matched_filter(hp, zoom.to_pycbc(), psd=psd.to_pycbc(),
low_frequency_cutoff=15)
snrts = TimeSeries.from_pycbc(snr).abs()
|
guorendong/iridium-browser-ubuntu
|
refs/heads/ubuntu/precise
|
third_party/webdriver/pylib/test/selenium/webdriver/common/executing_async_javascript_tests.py
|
19
|
#!/usr/bin/python
# Copyright 2008-2010 WebDriver committers
# Copyright 2008-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from selenium.webdriver.common.by import By
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.remote.webelement import WebElement
class ExecutingAsyncJavaScriptTests(unittest.TestCase):
def testShouldNotTimeoutIfCallbackInvokedImmediately(self):
self._loadPage("ajaxy_page")
result = self.driver.execute_async_script("arguments[arguments.length - 1](123);")
self.assertTrue(type(result) == int)
self.assertEqual(123, result)
def testShouldBeAbleToReturnJavascriptPrimitivesFromAsyncScripts_NeitherNoneNorUndefined(self):
self._loadPage("ajaxy_page")
self.assertEqual(123, self.driver.execute_async_script(
"arguments[arguments.length - 1](123);"))
self.assertEqual("abc", self.driver.execute_async_script("arguments[arguments.length - 1]('abc');"))
self.assertFalse(bool(self.driver.execute_async_script("arguments[arguments.length - 1](false);")))
self.assertTrue(bool(self.driver.execute_async_script("arguments[arguments.length - 1](true);")))
#@Ignore(value = SELENESE, reason = "SeleniumRC cannot return null values.")
def testShouldBeAbleToReturnJavascriptPrimitivesFromAsyncScripts_NullAndUndefined(self):
self._loadPage("ajaxy_page")
self.assertTrue(self.driver.execute_async_script("arguments[arguments.length - 1](null)") is None)
self.assertTrue(self.driver.execute_async_script("arguments[arguments.length - 1]()") is None)
#@Ignore(value = SELENESE, reason = "Selenium cannot return arrays")
def testShouldBeAbleToReturnAnArrayLiteralFromAnAsyncScript(self):
self._loadPage("ajaxy_page")
result = self.driver.execute_async_script("arguments[arguments.length - 1]([]);")
self.assertTrue("Expected not to be null!", result is not None)
self.assertTrue(type(result) == list)
self.assertTrue(len(result) == 0)
#@Ignore(value = SELENESE, reason = "Selenium cannot return arrays")
def testShouldBeAbleToReturnAnArrayObjectFromAnAsyncScript(self):
self._loadPage("ajaxy_page")
result = self.driver.execute_async_script("arguments[arguments.length - 1](new Array());")
self.assertTrue("Expected not to be null!", result is not None)
self.assertTrue(type(result) == list)
self.assertTrue(len(result) == 0)
#@Ignore(value = ANDROID, SELENESE,
# reason = "Android does not properly handle arrays; Selenium cannot return arrays")
def testShouldBeAbleToReturnArraysOfPrimitivesFromAsyncScripts(self):
self._loadPage("ajaxy_page")
result = self.driver.execute_async_script(
"arguments[arguments.length - 1]([null, 123, 'abc', true, false]);")
self.assertTrue(result is not None)
self.assertTrue(type(result) == list)
self.assertFalse(bool(result.pop()))
self.assertTrue(bool(result.pop()))
self.assertEqual("abc", result.pop())
self.assertEqual(123, result.pop())
self.assertTrue(result.pop() is None)
self.assertTrue(len(result) == 0)
#@Ignore(value = SELENESE, reason = "Selenium cannot return elements from scripts")
def testShouldBeAbleToReturnWebElementsFromAsyncScripts(self):
self._loadPage("ajaxy_page")
result = self.driver.execute_async_script("arguments[arguments.length - 1](document.body);")
self.assertTrue(type(result) == WebElement)
self.assertEqual("body", result.tag_name.lower())
#@Ignore(value = ANDROID, SELENESE,
# reason = "Android does not properly handle arrays; Selenium cannot return elements")
def testShouldBeAbleToReturnArraysOfWebElementsFromAsyncScripts(self):
self._loadPage("ajaxy_page")
result = self.driver.execute_async_script(
"arguments[arguments.length - 1]([document.body, document.body]);")
self.assertTrue(result is not None)
self.assertTrue(type(result) == list)
list_ = result;
self.assertEqual(2, len(list_))
self.assertTrue(type(list_[0]) == WebElement)
self.assertTrue(type(list_[1]) == WebElement)
self.assertEqual("body", list_[0].tag_name)
#self.assertEqual(list_[0], list_[1])
def testShouldTimeoutIfScriptDoesNotInvokeCallback(self):
self._loadPage("ajaxy_page")
try:
#Script is expected to be async and explicitly callback, so this should timeout.
self.driver.execute_async_script("return 1 + 2;")
self.fail("Should have thrown a TimeOutException!")
except TimeoutException,e :
pass
def testShouldTimeoutIfScriptDoesNotInvokeCallbackWithAZeroTimeout(self):
self._loadPage("ajaxy_page")
try:
self.driver.execute_async_script("window.setTimeout(function() {}, 0);")
fail("Should have thrown a TimeOutException!")
except TimeoutException, e:
pass
def testShouldNotTimeoutIfScriptCallsbackInsideAZeroTimeout(self):
self._loadPage("ajaxy_page")
self.driver.execute_async_script(
"""var callback = arguments[arguments.length - 1];
window.setTimeout(function() { callback(123); }, 0)""")
def testShouldTimeoutIfScriptDoesNotInvokeCallbackWithLongTimeout(self):
self.driver.set_script_timeout(0.5)
self._loadPage("ajaxy_page")
try:
self.driver.execute_async_script(
"""var callback = arguments[arguments.length - 1];
window.setTimeout(callback, 1500);""")
self.fail("Should have thrown a TimeOutException!")
except TimeoutException,e:
pass
def testShouldDetectPageLoadsWhileWaitingOnAnAsyncScriptAndReturnAnError(self):
self._loadPage("ajaxy_page")
self.driver.set_script_timeout(0.1)
try:
self.driver.execute_async_script("window.location = '" + self._pageURL("dynamic") + "';")
self.fail('Should have throw a WebDriverException')
except WebDriverException,expected:
pass
def testShouldCatchErrorsWhenExecutingInitialScript(self):
self._loadPage("ajaxy_page")
try:
self.driver.execute_async_script("throw Error('you should catch this!');")
self.fail("Should have thrown a WebDriverException")
except WebDriverException, expected:
pass
#@Ignore(value = ANDROID, CHROME,
# reason = "Android: Emulator is too slow and latency causes test to fall out of sync with app;"
# + "Chrome: Click is not working")
def testShouldBeAbleToExecuteAsynchronousScripts(self):
self._loadPage("ajaxy_page")
typer = self.driver.find_element(by=By.NAME, value="typer")
typer.send_keys("bob")
self.assertEqual("bob", typer.get_attribute("value"))
self.driver.find_element(by=By.ID, value="red").click()
self.driver.find_element(by=By.NAME, value="submit").click()
self.assertEqual(1, len(self.driver.find_elements(by=By.TAG_NAME, value='div')),
"There should only be 1 DIV at this point, which is used for the butter message")
self.driver.set_script_timeout(10)
text = self.driver.execute_async_script(
"""var callback = arguments[arguments.length - 1];
window.registerListener(arguments[arguments.length - 1]);""")
self.assertEqual("bob", text)
self.assertEqual("", typer.get_attribute("value"))
self.assertEqual(2, len(self.driver.find_elements(by=By.TAG_NAME, value='div')),
"There should be 1 DIV (for the butter message) + 1 DIV (for the new label)")
def testShouldBeAbleToPassMultipleArgumentsToAsyncScripts(self):
self._loadPage("ajaxy_page")
result = self.driver.execute_async_script("""
arguments[arguments.length - 1](arguments[0] + arguments[1]);""", 1, 2)
self.assertEqual(3, result)
#TODO DavidBurns Disabled till Java WebServer is used
#def testShouldBeAbleToMakeXMLHttpRequestsAndWaitForTheResponse(self):
# script = """
# var url = arguments[0];
# var callback = arguments[arguments.length - 1];
# // Adapted from http://www.quirksmode.org/js/xmlhttp.html
# var XMLHttpFactories = [
# function () return new XMLHttpRequest(),
# function () return new ActiveXObject('Msxml2.XMLHTTP'),
# function () return new ActiveXObject('Msxml3.XMLHTTP'),
# function () return new ActiveXObject('Microsoft.XMLHTTP')
# ];
# var xhr = false;
# while (!xhr && XMLHttpFactories.length)
# try{
# xhr = XMLHttpFactories.shift().call();
# }catch (e)
#
# if (!xhr) throw Error('unable to create XHR object');
# xhr.open('GET', url, true);
# xhr.onreadystatechange = function()
# if (xhr.readyState == 4) callback(xhr.responseText);
#
# xhr.send('');""" # empty string to stop firefox 3 from choking
#
# self._loadPage("ajaxy_page")
# self.driver.set_script_timeout(3)
# response = self.driver.execute_async_script(script, pages.sleepingPage + "?time=2")
# htm = "<html><head><title>Done</title></head><body>Slept for 2s</body></html>"
# self.assertTrue(response.strip() == htm)
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
|
helldorado/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/vultr/vultr_account_facts.py
|
28
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_account_facts
short_description: Gather facts about the Vultr account.
description:
- Gather facts about account balance, charges and payments.
version_added: "2.5"
author: "René Moser (@resmo)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Gather Vultr account facts
local_action:
module: vultr_account_facts
- name: Print the gathered facts
debug:
var: ansible_facts.vultr_account_facts
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_account_facts:
description: Response from Vultr API
returned: success
type: complex
contains:
balance:
description: Your account balance.
returned: success
type: float
sample: -214.69
pending_charges:
description: Charges pending.
returned: success
type: float
sample: 57.03
last_payment_date:
description: Date of the last payment.
returned: success
type: str
sample: "2017-08-26 12:47:48"
last_payment_amount:
description: The amount of the last payment transaction.
returned: success
type: float
sample: -250.0
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrAccountFacts(Vultr):
def __init__(self, module):
super(AnsibleVultrAccountFacts, self).__init__(module, "vultr_account_facts")
self.returns = {
'balance': dict(convert_to='float'),
'pending_charges': dict(convert_to='float'),
'last_payment_date': dict(),
'last_payment_amount': dict(convert_to='float'),
}
def get_account_info(self):
return self.api_query(path="/v1/account/info")
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
account_facts = AnsibleVultrAccountFacts(module)
result = account_facts.get_result(account_facts.get_account_info())
ansible_facts = {
'vultr_account_facts': result['vultr_account_facts']
}
module.exit_json(ansible_facts=ansible_facts, **result)
if __name__ == '__main__':
main()
|
foreni-packages/golismero
|
refs/heads/master
|
tools/theHarvester/discovery/dogpilesearch.py
|
8
|
import httplib
import myparser
import time
from search_results import *
import sys
class search_dogpile:
def __init__(self,word,options):
self.word=word
self.total_results=u""
self.server="www.dogpile.com"
self.hostname="www.dogpile.com"
self.userAgent="(Mozilla/5.0 (Windows; U; Windows NT 6.0;en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6"
self.limit=options.limit
self.counter=options.start
self.quantity = 10
def do_search(self):
h = httplib.HTTP(self.server)
#Dogpile is hardcoded to return 10 results
h.putrequest('GET', "/search/web?qsi=" + str(self.counter) + "&q=\"%40" + self.word + "\"")
h.putheader('Host', self.hostname)
h.putheader('User-agent', self.userAgent)
h.endheaders()
returncode, returnmsg, response_headers = h.getreply()
encoding=response_headers['content-type'].split('charset=')[-1]
self.total_results+=unicode(h.getfile().read(), encoding)
def process(self):
print "[-] Searching DogPile:"
while self.counter < self.limit and self.counter <= 1000:
self.do_search()
time.sleep(1)
self.counter+=self.quantity
print "\r\tProcessed "+ str(self.counter) + " results..."
def get_results(self):
raw_results=myparser.parser(self.total_results,self.word)
results = search_results()
results.emails = raw_results.emails()
results.hostnames = raw_results.hostnames()
return results
|
RealImpactAnalytics/airflow
|
refs/heads/master
|
tests/models.py
|
1
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import os
import pendulum
import unittest
import time
import six
import re
import urllib
import textwrap
import inspect
from airflow import configuration, models, settings, AirflowException
from airflow.exceptions import AirflowDagCycleException, AirflowSkipException
from airflow.jobs import BackfillJob
from airflow.models import DAG, TaskInstance as TI
from airflow.models import State as ST
from airflow.models import DagModel, DagStat
from airflow.models import clear_task_instances
from airflow.models import XCom
from airflow.models import Connection
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import ShortCircuitOperator
from airflow.ti_deps.deps.trigger_rule_dep import TriggerRuleDep
from airflow.utils import timezone
from airflow.utils.weight_rule import WeightRule
from airflow.utils.state import State
from airflow.utils.trigger_rule import TriggerRule
from mock import patch
from parameterized import parameterized
from tempfile import NamedTemporaryFile
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class DagTest(unittest.TestCase):
def test_parms_not_passed_is_empty_dict(self):
"""
Test that when 'params' is _not_ passed to a new Dag, that the params
attribute is set to an empty dictionary.
"""
dag = models.DAG('test-dag')
self.assertEqual(dict, type(dag.params))
self.assertEqual(0, len(dag.params))
def test_params_passed_and_params_in_default_args_no_override(self):
"""
Test that when 'params' exists as a key passed to the default_args dict
in addition to params being passed explicitly as an argument to the
dag, that the 'params' key of the default_args dict is merged with the
dict of the params argument.
"""
params1 = {'parameter1': 1}
params2 = {'parameter2': 2}
dag = models.DAG('test-dag',
default_args={'params': params1},
params=params2)
params_combined = params1.copy()
params_combined.update(params2)
self.assertEqual(params_combined, dag.params)
def test_dag_as_context_manager(self):
"""
Test DAG as a context manager.
When used as a context manager, Operators are automatically added to
the DAG (unless they specifiy a different DAG)
"""
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
dag2 = DAG(
'dag2',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner2'})
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2', dag=dag2)
self.assertIs(op1.dag, dag)
self.assertEqual(op1.owner, 'owner1')
self.assertIs(op2.dag, dag2)
self.assertEqual(op2.owner, 'owner2')
with dag2:
op3 = DummyOperator(task_id='op3')
self.assertIs(op3.dag, dag2)
self.assertEqual(op3.owner, 'owner2')
with dag:
with dag2:
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
self.assertIs(op4.dag, dag2)
self.assertIs(op5.dag, dag)
self.assertEqual(op4.owner, 'owner2')
self.assertEqual(op5.owner, 'owner1')
with DAG('creating_dag_in_cm', start_date=DEFAULT_DATE) as dag:
DummyOperator(task_id='op6')
self.assertEqual(dag.dag_id, 'creating_dag_in_cm')
self.assertEqual(dag.tasks[0].task_id, 'op6')
def test_dag_topological_sort(self):
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
topological_list = dag.topological_sort()
logging.info(topological_list)
tasks = [op2, op3, op4]
self.assertTrue(topological_list[0] in tasks)
tasks.remove(topological_list[0])
self.assertTrue(topological_list[1] in tasks)
tasks.remove(topological_list[1])
self.assertTrue(topological_list[2] in tasks)
tasks.remove(topological_list[2])
self.assertTrue(topological_list[3] == op1)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# C -> (A u B) -> D
# C -> E
# ordered: E | D, A | B, C
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op5 = DummyOperator(task_id='E')
op1.set_downstream(op3)
op2.set_downstream(op3)
op1.set_upstream(op4)
op2.set_upstream(op4)
op5.set_downstream(op3)
topological_list = dag.topological_sort()
logging.info(topological_list)
set1 = [op4, op5]
self.assertTrue(topological_list[0] in set1)
set1.remove(topological_list[0])
set2 = [op1, op2]
set2.extend(set1)
self.assertTrue(topological_list[1] in set2)
set2.remove(topological_list[1])
self.assertTrue(topological_list[2] in set2)
set2.remove(topological_list[2])
self.assertTrue(topological_list[3] in set2)
self.assertTrue(topological_list[4] == op3)
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
self.assertEquals(tuple(), dag.topological_sort())
def test_dag_none_default_args_start_date(self):
"""
Tests if a start_date of None in default_args
works.
"""
dag = DAG('DAG', default_args={'start_date': None})
self.assertEqual(dag.timezone, settings.TIMEZONE)
def test_dag_task_priority_weight_total(self):
width = 5
depth = 5
weight = 5
pattern = re.compile('stage(\\d*).(\\d*)')
# Fully connected parallel tasks. i.e. every task at each parallel
# stage is dependent on every task in the previous stage.
# Default weight should be calculated using downstream descendants
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
pipeline = [
[DummyOperator(
task_id='stage{}.{}'.format(i, j), priority_weight=weight)
for j in range(0, width)] for i in range(0, depth)
]
for d, stage in enumerate(pipeline):
if d == 0:
continue
for current_task in stage:
for prev_task in pipeline[d - 1]:
current_task.set_upstream(prev_task)
for task in six.itervalues(dag.task_dict):
match = pattern.match(task.task_id)
task_depth = int(match.group(1))
# the sum of each stages after this task + itself
correct_weight = ((depth - (task_depth + 1)) * width + 1) * weight
calculated_weight = task.priority_weight_total
self.assertEquals(calculated_weight, correct_weight)
# Same test as above except use 'upstream' for weight calculation
weight = 3
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
pipeline = [
[DummyOperator(
task_id='stage{}.{}'.format(i, j), priority_weight=weight,
weight_rule=WeightRule.UPSTREAM)
for j in range(0, width)] for i in range(0, depth)
]
for d, stage in enumerate(pipeline):
if d == 0:
continue
for current_task in stage:
for prev_task in pipeline[d - 1]:
current_task.set_upstream(prev_task)
for task in six.itervalues(dag.task_dict):
match = pattern.match(task.task_id)
task_depth = int(match.group(1))
# the sum of each stages after this task + itself
correct_weight = ((task_depth) * width + 1) * weight
calculated_weight = task.priority_weight_total
self.assertEquals(calculated_weight, correct_weight)
# Same test as above except use 'absolute' for weight calculation
weight = 10
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
pipeline = [
[DummyOperator(
task_id='stage{}.{}'.format(i, j), priority_weight=weight,
weight_rule=WeightRule.ABSOLUTE)
for j in range(0, width)] for i in range(0, depth)
]
for d, stage in enumerate(pipeline):
if d == 0:
continue
for current_task in stage:
for prev_task in pipeline[d - 1]:
current_task.set_upstream(prev_task)
for task in six.itervalues(dag.task_dict):
match = pattern.match(task.task_id)
task_depth = int(match.group(1))
# the sum of each stages after this task + itself
correct_weight = weight
calculated_weight = task.priority_weight_total
self.assertEquals(calculated_weight, correct_weight)
# Test if we enter an invalid weight rule
with DAG('dag', start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'}) as dag:
with self.assertRaises(AirflowException):
DummyOperator(task_id='should_fail', weight_rule='no rule')
def test_get_num_task_instances(self):
test_dag_id = 'test_get_num_task_instances_dag'
test_task_id = 'task_1'
test_dag = DAG(dag_id=test_dag_id, start_date=DEFAULT_DATE)
test_task = DummyOperator(task_id=test_task_id, dag=test_dag)
ti1 = TI(task=test_task, execution_date=DEFAULT_DATE)
ti1.state = None
ti2 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti2.state = State.RUNNING
ti3 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=2))
ti3.state = State.QUEUED
ti4 = TI(task=test_task, execution_date=DEFAULT_DATE + datetime.timedelta(days=3))
ti4.state = State.RUNNING
session = settings.Session()
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(0, DAG.get_num_task_instances(test_dag_id, ['fakename'],
session=session))
self.assertEqual(4, DAG.get_num_task_instances(test_dag_id, [test_task_id],
session=session))
self.assertEqual(4, DAG.get_num_task_instances(test_dag_id,
['fakename', test_task_id], session=session))
self.assertEqual(1, DAG.get_num_task_instances(test_dag_id, [test_task_id],
states=[None], session=session))
self.assertEqual(2, DAG.get_num_task_instances(test_dag_id, [test_task_id],
states=[State.RUNNING], session=session))
self.assertEqual(3, DAG.get_num_task_instances(test_dag_id, [test_task_id],
states=[None, State.RUNNING], session=session))
self.assertEqual(4, DAG.get_num_task_instances(test_dag_id, [test_task_id],
states=[None, State.QUEUED, State.RUNNING], session=session))
session.close()
def test_render_template_field(self):
"""Tests if render_template from a field works"""
dag = DAG('test-dag',
start_date=DEFAULT_DATE)
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', '{{ foo }}', dict(foo='bar'))
self.assertEqual(result, 'bar')
def test_render_template_field_macro(self):
""" Tests if render_template from a field works,
if a custom filter was defined"""
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_macros = dict(foo='bar'))
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', '{{ foo }}', dict())
self.assertEqual(result, 'bar')
def test_user_defined_filters(self):
def jinja_udf(name):
return 'Hello %s' %name
dag = models.DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_filters=dict(hello=jinja_udf))
jinja_env = dag.get_template_env()
self.assertIn('hello', jinja_env.filters)
self.assertEqual(jinja_env.filters['hello'], jinja_udf)
def test_render_template_field_filter(self):
""" Tests if render_template from a field works,
if a custom filter was defined"""
def jinja_udf(name):
return 'Hello %s' %name
dag = DAG('test-dag',
start_date=DEFAULT_DATE,
user_defined_filters = dict(hello=jinja_udf))
with dag:
task = DummyOperator(task_id='op1')
result = task.render_template('', "{{ 'world' | hello}}", dict())
self.assertEqual(result, 'Hello world')
def test_cycle(self):
# test empty
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
self.assertFalse(dag.test_cycle())
# test single task
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
opA = DummyOperator(task_id='A')
self.assertFalse(dag.test_cycle())
# test no cycle
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B -> C
# B -> D
# E -> F
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opF = DummyOperator(task_id='F')
opA.set_downstream(opB)
opB.set_downstream(opC)
opB.set_downstream(opD)
opE.set_downstream(opF)
self.assertFalse(dag.test_cycle())
# test self loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> A
with dag:
opA = DummyOperator(task_id='A')
opA.set_downstream(opA)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
# test downstream self loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B -> C -> D -> E -> E
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opA.set_downstream(opB)
opB.set_downstream(opC)
opC.set_downstream(opD)
opD.set_downstream(opE)
opE.set_downstream(opE)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
# large loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B -> C -> D -> E -> A
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opA.set_downstream(opB)
opB.set_downstream(opC)
opC.set_downstream(opD)
opD.set_downstream(opE)
opE.set_downstream(opA)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
# test arbitrary loop
dag = DAG(
'dag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# E-> A -> B -> F -> A
# -> C -> F
with dag:
opA = DummyOperator(task_id='A')
opB = DummyOperator(task_id='B')
opC = DummyOperator(task_id='C')
opD = DummyOperator(task_id='D')
opE = DummyOperator(task_id='E')
opF = DummyOperator(task_id='F')
opA.set_downstream(opB)
opA.set_downstream(opC)
opE.set_downstream(opA)
opC.set_downstream(opF)
opB.set_downstream(opF)
opF.set_downstream(opA)
with self.assertRaises(AirflowDagCycleException):
dag.test_cycle()
class DagStatTest(unittest.TestCase):
def test_dagstats_crud(self):
DagStat.create(dag_id='test_dagstats_crud')
session = settings.Session()
qry = session.query(DagStat).filter(DagStat.dag_id == 'test_dagstats_crud')
self.assertEqual(len(qry.all()), len(State.dag_states))
DagStat.set_dirty(dag_id='test_dagstats_crud')
res = qry.all()
for stat in res:
self.assertTrue(stat.dirty)
# create missing
DagStat.set_dirty(dag_id='test_dagstats_crud_2')
qry2 = session.query(DagStat).filter(DagStat.dag_id == 'test_dagstats_crud_2')
self.assertEqual(len(qry2.all()), len(State.dag_states))
dag = DAG(
'test_dagstats_crud',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='A')
now = timezone.utcnow()
dr = dag.create_dagrun(
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.FAILED,
external_trigger=False,
)
DagStat.update(dag_ids=['test_dagstats_crud'])
res = qry.all()
for stat in res:
if stat.state == State.FAILED:
self.assertEqual(stat.count, 1)
else:
self.assertEqual(stat.count, 0)
DagStat.update()
res = qry2.all()
for stat in res:
self.assertFalse(stat.dirty)
class DagRunTest(unittest.TestCase):
def create_dag_run(self, dag, state=State.RUNNING, task_states=None, execution_date=None):
now = timezone.utcnow()
if execution_date is None:
execution_date = now
dag_run = dag.create_dagrun(
run_id='manual__' + now.isoformat(),
execution_date=execution_date,
start_date=now,
state=state,
external_trigger=False,
)
if task_states is not None:
session = settings.Session()
for task_id, state in task_states.items():
ti = dag_run.get_task_instance(task_id)
ti.set_state(state, session)
session.close()
return dag_run
def test_id_for_date(self):
run_id = models.DagRun.id_for_date(
timezone.datetime(2015, 1, 2, 3, 4, 5, 6))
self.assertEqual(
'scheduled__2015-01-02T03:04:05', run_id,
'Generated run_id did not match expectations: {0}'.format(run_id))
def test_dagrun_find(self):
session = settings.Session()
now = timezone.utcnow()
dag_id1 = "test_dagrun_find_externally_triggered"
dag_run = models.DagRun(
dag_id=dag_id1,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=True,
)
session.add(dag_run)
dag_id2 = "test_dagrun_find_not_externally_triggered"
dag_run = models.DagRun(
dag_id=dag_id2,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
session.add(dag_run)
session.commit()
self.assertEqual(1, len(models.DagRun.find(dag_id=dag_id1, external_trigger=True)))
self.assertEqual(0, len(models.DagRun.find(dag_id=dag_id1, external_trigger=False)))
self.assertEqual(0, len(models.DagRun.find(dag_id=dag_id2, external_trigger=True)))
self.assertEqual(1, len(models.DagRun.find(dag_id=dag_id2, external_trigger=False)))
def test_dagrun_success_when_all_skipped(self):
"""
Tests that a DAG run succeeds when all tasks are skipped
"""
dag = DAG(
dag_id='test_dagrun_success_when_all_skipped',
start_date=timezone.datetime(2017, 1, 1)
)
dag_task1 = ShortCircuitOperator(
task_id='test_short_circuit_false',
dag=dag,
python_callable=lambda: False)
dag_task2 = DummyOperator(
task_id='test_state_skipped1',
dag=dag)
dag_task3 = DummyOperator(
task_id='test_state_skipped2',
dag=dag)
dag_task1.set_downstream(dag_task2)
dag_task2.set_downstream(dag_task3)
initial_task_states = {
'test_short_circuit_false': State.SUCCESS,
'test_state_skipped1': State.SKIPPED,
'test_state_skipped2': State.SKIPPED,
}
dag_run = self.create_dag_run(dag=dag,
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.SUCCESS, updated_dag_state)
def test_dagrun_success_conditions(self):
session = settings.Session()
dag = DAG(
'test_dagrun_success_conditions',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
# A -> B
# A -> C -> D
# ordered: B, D, C, A or D, B, C, A or D, C, B, A
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op3 = DummyOperator(task_id='C')
op4 = DummyOperator(task_id='D')
op1.set_upstream([op2, op3])
op3.set_upstream(op4)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(run_id='test_dagrun_success_conditions',
state=State.RUNNING,
execution_date=now,
start_date=now)
# op1 = root
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op3 = dr.get_task_instance(task_id=op3.task_id)
ti_op4 = dr.get_task_instance(task_id=op4.task_id)
# root is successful, but unfinished tasks
state = dr.update_state()
self.assertEqual(State.RUNNING, state)
# one has failed, but root is successful
ti_op2.set_state(state=State.FAILED, session=session)
ti_op3.set_state(state=State.SUCCESS, session=session)
ti_op4.set_state(state=State.SUCCESS, session=session)
state = dr.update_state()
self.assertEqual(State.SUCCESS, state)
def test_dagrun_deadlock(self):
session = settings.Session()
dag = DAG(
'text_dagrun_deadlock',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='A')
op2 = DummyOperator(task_id='B')
op2.trigger_rule = TriggerRule.ONE_FAILED
op2.set_upstream(op1)
dag.clear()
now = timezone.utcnow()
dr = dag.create_dagrun(run_id='test_dagrun_deadlock',
state=State.RUNNING,
execution_date=now,
start_date=now)
ti_op1 = dr.get_task_instance(task_id=op1.task_id)
ti_op1.set_state(state=State.SUCCESS, session=session)
ti_op2 = dr.get_task_instance(task_id=op2.task_id)
ti_op2.set_state(state=State.NONE, session=session)
dr.update_state()
self.assertEqual(dr.state, State.RUNNING)
ti_op2.set_state(state=State.NONE, session=session)
op2.trigger_rule = 'invalid'
dr.update_state()
self.assertEqual(dr.state, State.FAILED)
def test_dagrun_no_deadlock(self):
session = settings.Session()
dag = DAG('test_dagrun_no_deadlock',
start_date=DEFAULT_DATE)
with dag:
op1 = DummyOperator(task_id='dop', depends_on_past=True)
op2 = DummyOperator(task_id='tc', task_concurrency=1)
dag.clear()
dr = dag.create_dagrun(run_id='test_dagrun_no_deadlock_1',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
dr2 = dag.create_dagrun(run_id='test_dagrun_no_deadlock_2',
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(days=1),
start_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti1_op1 = dr.get_task_instance(task_id='dop')
ti2_op1 = dr2.get_task_instance(task_id='dop')
ti2_op1 = dr.get_task_instance(task_id='tc')
ti2_op2 = dr.get_task_instance(task_id='tc')
ti1_op1.set_state(state=State.RUNNING, session=session)
dr.update_state()
dr2.update_state()
self.assertEqual(dr.state, State.RUNNING)
self.assertEqual(dr2.state, State.RUNNING)
ti2_op1.set_state(state=State.RUNNING, session=session)
dr.update_state()
dr2.update_state()
self.assertEqual(dr.state, State.RUNNING)
self.assertEqual(dr2.state, State.RUNNING)
def test_dagrun_success_callback(self):
def on_success_callable(context):
self.assertEqual(
context['dag_run'].dag_id,
'test_dagrun_success_callback'
)
dag = DAG(
dag_id='test_dagrun_success_callback',
start_date=datetime.datetime(2017, 1, 1),
on_success_callback=on_success_callable,
)
dag_task1 = DummyOperator(
task_id='test_state_succeeded1',
dag=dag)
dag_task2 = DummyOperator(
task_id='test_state_succeeded2',
dag=dag)
dag_task1.set_downstream(dag_task2)
initial_task_states = {
'test_state_succeeded1': State.SUCCESS,
'test_state_succeeded2': State.SUCCESS,
}
dag_run = self.create_dag_run(dag=dag,
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.SUCCESS, updated_dag_state)
def test_dagrun_failure_callback(self):
def on_failure_callable(context):
self.assertEqual(
context['dag_run'].dag_id,
'test_dagrun_failure_callback'
)
dag = DAG(
dag_id='test_dagrun_failure_callback',
start_date=datetime.datetime(2017, 1, 1),
on_failure_callback=on_failure_callable,
)
dag_task1 = DummyOperator(
task_id='test_state_succeeded1',
dag=dag)
dag_task2 = DummyOperator(
task_id='test_state_failed2',
dag=dag)
initial_task_states = {
'test_state_succeeded1': State.SUCCESS,
'test_state_failed2': State.FAILED,
}
dag_task1.set_downstream(dag_task2)
dag_run = self.create_dag_run(dag=dag,
state=State.RUNNING,
task_states=initial_task_states)
updated_dag_state = dag_run.update_state()
self.assertEqual(State.FAILED, updated_dag_state)
def test_get_task_instance_on_empty_dagrun(self):
"""
Make sure that a proper value is returned when a dagrun has no task instances
"""
dag = DAG(
dag_id='test_get_task_instance_on_empty_dagrun',
start_date=timezone.datetime(2017, 1, 1)
)
dag_task1 = ShortCircuitOperator(
task_id='test_short_circuit_false',
dag=dag,
python_callable=lambda: False)
session = settings.Session()
now = timezone.utcnow()
# Don't use create_dagrun since it will create the task instances too which we
# don't want
dag_run = models.DagRun(
dag_id=dag.dag_id,
run_id='manual__' + now.isoformat(),
execution_date=now,
start_date=now,
state=State.RUNNING,
external_trigger=False,
)
session.add(dag_run)
session.commit()
ti = dag_run.get_task_instance('test_short_circuit_false')
self.assertEqual(None, ti)
def test_get_latest_runs(self):
session = settings.Session()
dag = DAG(
dag_id='test_latest_runs_1',
start_date=DEFAULT_DATE)
dag_1_run_1 = self.create_dag_run(dag,
execution_date=timezone.datetime(2015, 1, 1))
dag_1_run_2 = self.create_dag_run(dag,
execution_date=timezone.datetime(2015, 1, 2))
dagruns = models.DagRun.get_latest_runs(session)
session.close()
for dagrun in dagruns:
if dagrun.dag_id == 'test_latest_runs_1':
self.assertEqual(dagrun.execution_date, timezone.datetime(2015, 1, 2))
def test_is_backfill(self):
dag = DAG(dag_id='test_is_backfill', start_date=DEFAULT_DATE)
dagrun = self.create_dag_run(dag, execution_date=DEFAULT_DATE)
dagrun.run_id = BackfillJob.ID_PREFIX + '_sfddsffds'
dagrun2 = self.create_dag_run(dag, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(dagrun.is_backfill)
self.assertFalse(dagrun2.is_backfill)
def test_removed_task_instances_can_be_restored(self):
def with_all_tasks_removed(dag):
return DAG(dag_id=dag.dag_id, start_date=dag.start_date)
dag = DAG('test_task_restoration', start_date=DEFAULT_DATE)
dag.add_task(DummyOperator(task_id='flaky_task', owner='test'))
dagrun = self.create_dag_run(dag)
flaky_ti = dagrun.get_task_instances()[0]
self.assertEquals('flaky_task', flaky_ti.task_id)
self.assertEquals(State.NONE, flaky_ti.state)
dagrun.dag = with_all_tasks_removed(dag)
dagrun.verify_integrity()
flaky_ti.refresh_from_db()
self.assertEquals(State.REMOVED, flaky_ti.state)
dagrun.dag.add_task(DummyOperator(task_id='flaky_task', owner='test'))
dagrun.verify_integrity()
flaky_ti.refresh_from_db()
self.assertEquals(State.NONE, flaky_ti.state)
class DagBagTest(unittest.TestCase):
def test_get_existing_dag(self):
"""
test that were're able to parse some example DAGs and retrieve them
"""
dagbag = models.DagBag(include_examples=True)
some_expected_dag_ids = ["example_bash_operator",
"example_branch_operator"]
for dag_id in some_expected_dag_ids:
dag = dagbag.get_dag(dag_id)
self.assertIsNotNone(dag)
self.assertEqual(dag_id, dag.dag_id)
self.assertGreaterEqual(dagbag.size(), 7)
def test_get_non_existing_dag(self):
"""
test that retrieving a non existing dag id returns None without crashing
"""
dagbag = models.DagBag(include_examples=True)
non_existing_dag_id = "non_existing_dag_id"
self.assertIsNone(dagbag.get_dag(non_existing_dag_id))
def test_process_file_that_contains_multi_bytes_char(self):
"""
test that we're able to parse file that contains multi-byte char
"""
f = NamedTemporaryFile()
f.write('\u3042'.encode('utf8')) # write multi-byte char (hiragana)
f.flush()
dagbag = models.DagBag(include_examples=True)
self.assertEqual([], dagbag.process_file(f.name))
def test_zip(self):
"""
test the loading of a DAG within a zip file that includes dependencies
"""
dagbag = models.DagBag()
dagbag.process_file(os.path.join(TEST_DAGS_FOLDER, "test_zip.zip"))
self.assertTrue(dagbag.get_dag("test_zip_dag"))
@patch.object(DagModel,'get_current')
def test_get_dag_without_refresh(self, mock_dagmodel):
"""
Test that, once a DAG is loaded, it doesn't get refreshed again if it
hasn't been expired.
"""
dag_id = 'example_bash_operator'
mock_dagmodel.return_value = DagModel()
mock_dagmodel.return_value.last_expired = None
mock_dagmodel.return_value.fileloc = 'foo'
class TestDagBag(models.DagBag):
process_file_calls = 0
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
if 'example_bash_operator.py' == os.path.basename(filepath):
TestDagBag.process_file_calls += 1
super(TestDagBag, self).process_file(filepath, only_if_updated, safe_mode)
dagbag = TestDagBag(include_examples=True)
processed_files = dagbag.process_file_calls
# Should not call process_file agani, since it's already loaded during init.
self.assertEqual(1, dagbag.process_file_calls)
self.assertIsNotNone(dagbag.get_dag(dag_id))
self.assertEqual(1, dagbag.process_file_calls)
def test_get_dag_fileloc(self):
"""
Test that fileloc is correctly set when we load example DAGs,
specifically SubDAGs.
"""
dagbag = models.DagBag(include_examples=True)
expected = {
'example_bash_operator': 'example_bash_operator.py',
'example_subdag_operator': 'example_subdag_operator.py',
'example_subdag_operator.section-1': 'subdags/subdag.py'
}
for dag_id, path in expected.items():
dag = dagbag.get_dag(dag_id)
self.assertTrue(
dag.fileloc.endswith('airflow/example_dags/' + path))
def process_dag(self, create_dag):
"""
Helper method to process a file generated from the input create_dag function.
"""
# write source to file
source = textwrap.dedent(''.join(
inspect.getsource(create_dag).splitlines(True)[1:-1]))
f = NamedTemporaryFile()
f.write(source.encode('utf8'))
f.flush()
dagbag = models.DagBag(include_examples=False)
found_dags = dagbag.process_file(f.name)
return (dagbag, found_dags, f.name)
def validate_dags(self, expected_parent_dag, actual_found_dags, actual_dagbag,
should_be_found=True):
expected_dag_ids = list(map(lambda dag: dag.dag_id, expected_parent_dag.subdags))
expected_dag_ids.append(expected_parent_dag.dag_id)
actual_found_dag_ids = list(map(lambda dag: dag.dag_id, actual_found_dags))
for dag_id in expected_dag_ids:
actual_dagbag.log.info('validating %s' % dag_id)
self.assertEquals(
dag_id in actual_found_dag_ids, should_be_found,
'dag "%s" should %shave been found after processing dag "%s"' %
(dag_id, '' if should_be_found else 'not ', expected_parent_dag.dag_id)
)
self.assertEquals(
dag_id in actual_dagbag.dags, should_be_found,
'dag "%s" should %sbe in dagbag.dags after processing dag "%s"' %
(dag_id, '' if should_be_found else 'not ', expected_parent_dag.dag_id)
)
def test_load_subdags(self):
# Define Dag to load
def standard_subdag():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'master'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# master:
# A -> opSubDag_0
# master.opsubdag_0:
# -> subdag_0.task
# A -> opSubDag_1
# master.opsubdag_1:
# -> subdag_1.task
with dag:
def subdag_0():
subdag_0 = DAG('master.opSubdag_0', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_0.task', dag=subdag_0)
return subdag_0
def subdag_1():
subdag_1 = DAG('master.opSubdag_1', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_1.task', dag=subdag_1)
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = standard_subdag()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 2)
# Perform processing dag
dagbag, found_dags, _ = self.process_dag(standard_subdag)
# Validate correctness
# all dags from testDag should be listed
self.validate_dags(testDag, found_dags, dagbag)
# Define Dag to load
def nested_subdags():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'master'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# master:
# A -> opSubdag_0
# master.opSubdag_0:
# -> opSubDag_A
# master.opSubdag_0.opSubdag_A:
# -> subdag_A.task
# -> opSubdag_B
# master.opSubdag_0.opSubdag_B:
# -> subdag_B.task
# A -> opSubdag_1
# master.opSubdag_1:
# -> opSubdag_C
# master.opSubdag_1.opSubdag_C:
# -> subdag_C.task
# -> opSubDag_D
# master.opSubdag_1.opSubdag_D:
# -> subdag_D.task
with dag:
def subdag_A():
subdag_A = DAG(
'master.opSubdag_0.opSubdag_A', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_A.task', dag=subdag_A)
return subdag_A
def subdag_B():
subdag_B = DAG(
'master.opSubdag_0.opSubdag_B', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_B.task', dag=subdag_B)
return subdag_B
def subdag_C():
subdag_C = DAG(
'master.opSubdag_1.opSubdag_C', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_C.task', dag=subdag_C)
return subdag_C
def subdag_D():
subdag_D = DAG(
'master.opSubdag_1.opSubdag_D', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_D.task', dag=subdag_D)
return subdag_D
def subdag_0():
subdag_0 = DAG('master.opSubdag_0', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_A', dag=subdag_0, subdag=subdag_A())
SubDagOperator(task_id='opSubdag_B', dag=subdag_0, subdag=subdag_B())
return subdag_0
def subdag_1():
subdag_1 = DAG('master.opSubdag_1', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_C', dag=subdag_1, subdag=subdag_C())
SubDagOperator(task_id='opSubdag_D', dag=subdag_1, subdag=subdag_D())
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = nested_subdags()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 6)
# Perform processing dag
dagbag, found_dags, _ = self.process_dag(nested_subdags)
# Validate correctness
# all dags from testDag should be listed
self.validate_dags(testDag, found_dags, dagbag)
def test_skip_cycle_dags(self):
"""
Don't crash when loading an invalid (contains a cycle) DAG file.
Don't load the dag into the DagBag either
"""
# Define Dag to load
def basic_cycle():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
import datetime
DAG_NAME = 'cycle_dag'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# A -> A
with dag:
opA = DummyOperator(task_id='A')
opA.set_downstream(opA)
return dag
testDag = basic_cycle()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 0)
# Perform processing dag
dagbag, found_dags, file_path = self.process_dag(basic_cycle)
# #Validate correctness
# None of the dags should be found
self.validate_dags(testDag, found_dags, dagbag, should_be_found=False)
self.assertIn(file_path, dagbag.import_errors)
# Define Dag to load
def nested_subdag_cycle():
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.subdag_operator import SubDagOperator
import datetime
DAG_NAME = 'nested_cycle'
DEFAULT_ARGS = {
'owner': 'owner1',
'start_date': datetime.datetime(2016, 1, 1)
}
dag = DAG(
DAG_NAME,
default_args=DEFAULT_ARGS)
# cycle:
# A -> opSubdag_0
# cycle.opSubdag_0:
# -> opSubDag_A
# cycle.opSubdag_0.opSubdag_A:
# -> subdag_A.task
# -> opSubdag_B
# cycle.opSubdag_0.opSubdag_B:
# -> subdag_B.task
# A -> opSubdag_1
# cycle.opSubdag_1:
# -> opSubdag_C
# cycle.opSubdag_1.opSubdag_C:
# -> subdag_C.task -> subdag_C.task >Invalid Loop<
# -> opSubDag_D
# cycle.opSubdag_1.opSubdag_D:
# -> subdag_D.task
with dag:
def subdag_A():
subdag_A = DAG(
'nested_cycle.opSubdag_0.opSubdag_A', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_A.task', dag=subdag_A)
return subdag_A
def subdag_B():
subdag_B = DAG(
'nested_cycle.opSubdag_0.opSubdag_B', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_B.task', dag=subdag_B)
return subdag_B
def subdag_C():
subdag_C = DAG(
'nested_cycle.opSubdag_1.opSubdag_C', default_args=DEFAULT_ARGS)
opSubdag_C_task = DummyOperator(
task_id='subdag_C.task', dag=subdag_C)
# introduce a loop in opSubdag_C
opSubdag_C_task.set_downstream(opSubdag_C_task)
return subdag_C
def subdag_D():
subdag_D = DAG(
'nested_cycle.opSubdag_1.opSubdag_D', default_args=DEFAULT_ARGS)
DummyOperator(task_id='subdag_D.task', dag=subdag_D)
return subdag_D
def subdag_0():
subdag_0 = DAG('nested_cycle.opSubdag_0', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_A', dag=subdag_0, subdag=subdag_A())
SubDagOperator(task_id='opSubdag_B', dag=subdag_0, subdag=subdag_B())
return subdag_0
def subdag_1():
subdag_1 = DAG('nested_cycle.opSubdag_1', default_args=DEFAULT_ARGS)
SubDagOperator(task_id='opSubdag_C', dag=subdag_1, subdag=subdag_C())
SubDagOperator(task_id='opSubdag_D', dag=subdag_1, subdag=subdag_D())
return subdag_1
opSubdag_0 = SubDagOperator(
task_id='opSubdag_0', dag=dag, subdag=subdag_0())
opSubdag_1 = SubDagOperator(
task_id='opSubdag_1', dag=dag, subdag=subdag_1())
opA = DummyOperator(task_id='A')
opA.set_downstream(opSubdag_0)
opA.set_downstream(opSubdag_1)
return dag
testDag = nested_subdag_cycle()
# sanity check to make sure DAG.subdag is still functioning properly
self.assertEqual(len(testDag.subdags), 6)
# Perform processing dag
dagbag, found_dags, file_path = self.process_dag(nested_subdag_cycle)
# Validate correctness
# None of the dags should be found
self.validate_dags(testDag, found_dags, dagbag, should_be_found=False)
self.assertIn(file_path, dagbag.import_errors)
def test_process_file_with_none(self):
"""
test that process_file can handle Nones
"""
dagbag = models.DagBag(include_examples=True)
self.assertEqual([], dagbag.process_file(None))
class TaskInstanceTest(unittest.TestCase):
def test_set_task_dates(self):
"""
Test that tasks properly take start/end dates from DAGs
"""
dag = DAG('dag', start_date=DEFAULT_DATE, end_date=DEFAULT_DATE + datetime.timedelta(days=10))
op1 = DummyOperator(task_id='op_1', owner='test')
self.assertTrue(op1.start_date is None and op1.end_date is None)
# dag should assign its dates to op1 because op1 has no dates
dag.add_task(op1)
self.assertTrue(
op1.start_date == dag.start_date and op1.end_date == dag.end_date)
op2 = DummyOperator(
task_id='op_2',
owner='test',
start_date=DEFAULT_DATE - datetime.timedelta(days=1),
end_date=DEFAULT_DATE + datetime.timedelta(days=11))
# dag should assign its dates to op2 because they are more restrictive
dag.add_task(op2)
self.assertTrue(
op2.start_date == dag.start_date and op2.end_date == dag.end_date)
op3 = DummyOperator(
task_id='op_3',
owner='test',
start_date=DEFAULT_DATE + datetime.timedelta(days=1),
end_date=DEFAULT_DATE + datetime.timedelta(days=9))
# op3 should keep its dates because they are more restrictive
dag.add_task(op3)
self.assertTrue(
op3.start_date == DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(
op3.end_date == DEFAULT_DATE + datetime.timedelta(days=9))
def test_timezone_awareness(self):
NAIVE_DATETIME = DEFAULT_DATE.replace(tzinfo=None)
# check ti without dag (just for bw compat)
op_no_dag = DummyOperator(task_id='op_no_dag')
ti = TI(task=op_no_dag, execution_date=NAIVE_DATETIME)
self.assertEquals(ti.execution_date, DEFAULT_DATE)
# check with dag without localized execution_date
dag = DAG('dag', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='op_1')
dag.add_task(op1)
ti = TI(task=op1, execution_date=NAIVE_DATETIME)
self.assertEquals(ti.execution_date, DEFAULT_DATE)
# with dag and localized execution_date
tz = pendulum.timezone("Europe/Amsterdam")
execution_date = timezone.datetime(2016, 1, 1, 1, 0, 0, tzinfo=tz)
utc_date = timezone.convert_to_utc(execution_date)
ti = TI(task=op1, execution_date=execution_date)
self.assertEquals(ti.execution_date, utc_date)
def test_set_dag(self):
"""
Test assigning Operators to Dags, including deferred assignment
"""
dag = DAG('dag', start_date=DEFAULT_DATE)
dag2 = DAG('dag2', start_date=DEFAULT_DATE)
op = DummyOperator(task_id='op_1', owner='test')
# no dag assigned
self.assertFalse(op.has_dag())
self.assertRaises(AirflowException, getattr, op, 'dag')
# no improper assignment
with self.assertRaises(TypeError):
op.dag = 1
op.dag = dag
# no reassignment
with self.assertRaises(AirflowException):
op.dag = dag2
# but assigning the same dag is ok
op.dag = dag
self.assertIs(op.dag, dag)
self.assertIn(op, dag.tasks)
def test_infer_dag(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
dag2 = DAG('dag2', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='test_op_1', owner='test')
op2 = DummyOperator(task_id='test_op_2', owner='test')
op3 = DummyOperator(task_id='test_op_3', owner='test', dag=dag)
op4 = DummyOperator(task_id='test_op_4', owner='test', dag=dag2)
# double check dags
self.assertEqual(
[i.has_dag() for i in [op1, op2, op3, op4]],
[False, False, True, True])
# can't combine operators with no dags
self.assertRaises(AirflowException, op1.set_downstream, op2)
# op2 should infer dag from op1
op1.dag = dag
op1.set_downstream(op2)
self.assertIs(op2.dag, dag)
# can't assign across multiple DAGs
self.assertRaises(AirflowException, op1.set_downstream, op4)
self.assertRaises(AirflowException, op1.set_downstream, [op3, op4])
def test_bitshift_compose_operators(self):
dag = DAG('dag', start_date=DEFAULT_DATE)
op1 = DummyOperator(task_id='test_op_1', owner='test')
op2 = DummyOperator(task_id='test_op_2', owner='test')
op3 = DummyOperator(task_id='test_op_3', owner='test')
op4 = DummyOperator(task_id='test_op_4', owner='test')
op5 = DummyOperator(task_id='test_op_5', owner='test')
# can't compose operators without dags
with self.assertRaises(AirflowException):
op1 >> op2
dag >> op1 >> op2 << op3
# make sure dag assignment carries through
# using __rrshift__
self.assertIs(op1.dag, dag)
self.assertIs(op2.dag, dag)
self.assertIs(op3.dag, dag)
# op2 should be downstream of both
self.assertIn(op2, op1.downstream_list)
self.assertIn(op2, op3.downstream_list)
# test dag assignment with __rlshift__
dag << op4
self.assertIs(op4.dag, dag)
# dag assignment with __rrshift__
dag >> op5
self.assertIs(op5.dag, dag)
@patch.object(DAG, 'concurrency_reached')
def test_requeue_over_concurrency(self, mock_concurrency_reached):
mock_concurrency_reached.return_value = True
dag = DAG(dag_id='test_requeue_over_concurrency', start_date=DEFAULT_DATE,
max_active_runs=1, concurrency=2)
task = DummyOperator(task_id='test_requeue_over_concurrency_op', dag=dag)
ti = TI(task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(ti.state, models.State.NONE)
@patch.object(TI, 'pool_full')
def test_run_pooling_task(self, mock_pool_full):
"""
test that running task update task state as without running task.
(no dependency check in ti_deps anymore, so also -> SUCCESS)
"""
# Mock the pool out with a full pool because the pool doesn't actually exist
mock_pool_full.return_value = True
dag = models.DAG(dag_id='test_run_pooling_task')
task = DummyOperator(task_id='test_run_pooling_task_op', dag=dag,
pool='test_run_pooling_task_pool', owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(ti.state, models.State.SUCCESS)
@patch.object(TI, 'pool_full')
def test_run_pooling_task_with_mark_success(self, mock_pool_full):
"""
test that running task with mark_success param update task state as SUCCESS
without running task.
"""
# Mock the pool out with a full pool because the pool doesn't actually exist
mock_pool_full.return_value = True
dag = models.DAG(dag_id='test_run_pooling_task_with_mark_success')
task = DummyOperator(
task_id='test_run_pooling_task_with_mark_success_op',
dag=dag,
pool='test_run_pooling_task_with_mark_success_pool',
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run(mark_success=True)
self.assertEqual(ti.state, models.State.SUCCESS)
def test_run_pooling_task_with_skip(self):
"""
test that running task which returns AirflowSkipOperator will end
up in a SKIPPED state.
"""
def raise_skip_exception():
raise AirflowSkipException
dag = models.DAG(dag_id='test_run_pooling_task_with_skip')
task = PythonOperator(
task_id='test_run_pooling_task_with_skip',
dag=dag,
python_callable=raise_skip_exception,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=timezone.utcnow())
ti.run()
self.assertEqual(models.State.SKIPPED, ti.state)
def test_retry_delay(self):
"""
Test that retry delays are respected
"""
dag = models.DAG(dag_id='test_retry_handling')
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=3),
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti.try_number, 1)
# first run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
# second run -- still up for retry because retry_delay hasn't expired
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
# third run -- failed
time.sleep(3)
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
@patch.object(TI, 'pool_full')
def test_retry_handling(self, mock_pool_full):
"""
Test that task retries are handled properly
"""
# Mock the pool with a pool with slots open since the pool doesn't actually exist
mock_pool_full.return_value = False
dag = models.DAG(dag_id='test_retry_handling')
task = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
retry_delay=datetime.timedelta(seconds=0),
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
def run_with_error(ti):
try:
ti.run()
except AirflowException:
pass
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti.try_number, 1)
# first run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti._try_number, 1)
self.assertEqual(ti.try_number, 2)
# second run -- fail
run_with_error(ti)
self.assertEqual(ti.state, State.FAILED)
self.assertEqual(ti._try_number, 2)
self.assertEqual(ti.try_number, 3)
# Clear the TI state since you can't run a task with a FAILED state without
# clearing it first
dag.clear()
# third run -- up for retry
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti._try_number, 3)
self.assertEqual(ti.try_number, 4)
# fourth run -- fail
run_with_error(ti)
ti.refresh_from_db()
self.assertEqual(ti.state, State.FAILED)
self.assertEqual(ti._try_number, 4)
self.assertEqual(ti.try_number, 5)
def test_next_retry_datetime(self):
delay = datetime.timedelta(seconds=30)
max_delay = datetime.timedelta(minutes=60)
dag = models.DAG(dag_id='fail_dag')
task = BashOperator(
task_id='task_with_exp_backoff_and_max_delay',
bash_command='exit 1',
retries=3,
retry_delay=delay,
retry_exponential_backoff=True,
max_retry_delay=max_delay,
dag=dag,
owner='airflow',
start_date=timezone.datetime(2016, 2, 1, 0, 0, 0))
ti = TI(
task=task, execution_date=DEFAULT_DATE)
ti.end_date = pendulum.instance(timezone.utcnow())
dt = ti.next_retry_datetime()
# between 30 * 2^0.5 and 30 * 2^1 (15 and 30)
period = ti.end_date.add(seconds=30) - ti.end_date.add(seconds=15)
self.assertTrue(dt in period)
ti.try_number = 3
dt = ti.next_retry_datetime()
# between 30 * 2^2 and 30 * 2^3 (120 and 240)
period = ti.end_date.add(seconds=240) - ti.end_date.add(seconds=120)
self.assertTrue(dt in period)
ti.try_number = 5
dt = ti.next_retry_datetime()
# between 30 * 2^4 and 30 * 2^5 (480 and 960)
period = ti.end_date.add(seconds=960) - ti.end_date.add(seconds=480)
self.assertTrue(dt in period)
ti.try_number = 9
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date+max_delay)
ti.try_number = 50
dt = ti.next_retry_datetime()
self.assertEqual(dt, ti.end_date+max_delay)
def test_depends_on_past(self):
dagbag = models.DagBag()
dag = dagbag.get_dag('test_depends_on_past')
dag.clear()
task = dag.tasks[0]
run_date = task.start_date + datetime.timedelta(days=5)
ti = TI(task, run_date)
# depends_on_past prevents the run
task.run(start_date=run_date, end_date=run_date)
ti.refresh_from_db()
self.assertIs(ti.state, None)
# ignore first depends_on_past to allow the run
task.run(
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
# Parameterized tests to check for the correct firing
# of the trigger_rule under various circumstances
# Numeric fields are in order:
# successes, skipped, failed, upstream_failed, done
@parameterized.expand([
#
# Tests for all_success
#
['all_success', 5, 0, 0, 0, 0, True, None, True],
['all_success', 2, 0, 0, 0, 0, True, None, False],
['all_success', 2, 0, 1, 0, 0, True, ST.UPSTREAM_FAILED, False],
['all_success', 2, 1, 0, 0, 0, True, ST.SKIPPED, False],
#
# Tests for one_success
#
['one_success', 5, 0, 0, 0, 5, True, None, True],
['one_success', 2, 0, 0, 0, 2, True, None, True],
['one_success', 2, 0, 1, 0, 3, True, None, True],
['one_success', 2, 1, 0, 0, 3, True, None, True],
#
# Tests for all_failed
#
['all_failed', 5, 0, 0, 0, 5, True, ST.SKIPPED, False],
['all_failed', 0, 0, 5, 0, 5, True, None, True],
['all_failed', 2, 0, 0, 0, 2, True, ST.SKIPPED, False],
['all_failed', 2, 0, 1, 0, 3, True, ST.SKIPPED, False],
['all_failed', 2, 1, 0, 0, 3, True, ST.SKIPPED, False],
#
# Tests for one_failed
#
['one_failed', 5, 0, 0, 0, 0, True, None, False],
['one_failed', 2, 0, 0, 0, 0, True, None, False],
['one_failed', 2, 0, 1, 0, 0, True, None, True],
['one_failed', 2, 1, 0, 0, 3, True, None, False],
['one_failed', 2, 3, 0, 0, 5, True, ST.SKIPPED, False],
#
# Tests for done
#
['all_done', 5, 0, 0, 0, 5, True, None, True],
['all_done', 2, 0, 0, 0, 2, True, None, False],
['all_done', 2, 0, 1, 0, 3, True, None, False],
['all_done', 2, 1, 0, 0, 3, True, None, False]
])
def test_check_task_dependencies(self, trigger_rule, successes, skipped,
failed, upstream_failed, done,
flag_upstream_failed,
expect_state, expect_completed):
start_date = timezone.datetime(2016, 2, 1, 0, 0, 0)
dag = models.DAG('test-dag', start_date=start_date)
downstream = DummyOperator(task_id='downstream',
dag=dag, owner='airflow',
trigger_rule=trigger_rule)
for i in range(5):
task = DummyOperator(task_id='runme_{}'.format(i),
dag=dag, owner='airflow')
task.set_downstream(downstream)
run_date = task.start_date + datetime.timedelta(days=5)
ti = TI(downstream, run_date)
dep_results = TriggerRuleDep()._evaluate_trigger_rule(
ti=ti,
successes=successes,
skipped=skipped,
failed=failed,
upstream_failed=upstream_failed,
done=done,
flag_upstream_failed=flag_upstream_failed)
completed = all([dep.passed for dep in dep_results])
self.assertEqual(completed, expect_completed)
self.assertEqual(ti.state, expect_state)
def test_xcom_pull(self):
"""
Test xcom_pull, using different filtering methods.
"""
dag = models.DAG(
dag_id='test_xcom', schedule_interval='@monthly',
start_date=timezone.datetime(2016, 6, 1, 0, 0, 0))
exec_date = timezone.utcnow()
# Push a value
task1 = DummyOperator(task_id='test_xcom_1', dag=dag, owner='airflow')
ti1 = TI(task=task1, execution_date=exec_date)
ti1.xcom_push(key='foo', value='bar')
# Push another value with the same key (but by a different task)
task2 = DummyOperator(task_id='test_xcom_2', dag=dag, owner='airflow')
ti2 = TI(task=task2, execution_date=exec_date)
ti2.xcom_push(key='foo', value='baz')
# Pull with no arguments
result = ti1.xcom_pull()
self.assertEqual(result, None)
# Pull the value pushed most recently by any task.
result = ti1.xcom_pull(key='foo')
self.assertIn(result, 'baz')
# Pull the value pushed by the first task
result = ti1.xcom_pull(task_ids='test_xcom_1', key='foo')
self.assertEqual(result, 'bar')
# Pull the value pushed by the second task
result = ti1.xcom_pull(task_ids='test_xcom_2', key='foo')
self.assertEqual(result, 'baz')
# Pull the values pushed by both tasks
result = ti1.xcom_pull(
task_ids=['test_xcom_1', 'test_xcom_2'], key='foo')
self.assertEqual(result, ('bar', 'baz'))
def test_xcom_pull_after_success(self):
"""
tests xcom set/clear relative to a task in a 'success' rerun scenario
"""
key = 'xcom_key'
value = 'xcom_value'
dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly')
task = DummyOperator(
task_id='test_xcom',
dag=dag,
pool='test_xcom',
owner='airflow',
start_date=timezone.datetime(2016, 6, 2, 0, 0, 0))
exec_date = timezone.utcnow()
ti = TI(
task=task, execution_date=exec_date)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
ti.run()
# The second run and assert is to handle AIRFLOW-131 (don't clear on
# prior success)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
# Test AIRFLOW-703: Xcom shouldn't be cleared if the task doesn't
# execute, even if dependencies are ignored
ti.run(ignore_all_deps=True, mark_success=True)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
# Xcom IS finally cleared once task has executed
ti.run(ignore_all_deps=True)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None)
def test_xcom_pull_different_execution_date(self):
"""
tests xcom fetch behavior with different execution dates, using
both xcom_pull with "include_prior_dates" and without
"""
key = 'xcom_key'
value = 'xcom_value'
dag = models.DAG(dag_id='test_xcom', schedule_interval='@monthly')
task = DummyOperator(
task_id='test_xcom',
dag=dag,
pool='test_xcom',
owner='airflow',
start_date=timezone.datetime(2016, 6, 2, 0, 0, 0))
exec_date = timezone.utcnow()
ti = TI(
task=task, execution_date=exec_date)
ti.run(mark_success=True)
ti.xcom_push(key=key, value=value)
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), value)
ti.run()
exec_date += datetime.timedelta(days=1)
ti = TI(
task=task, execution_date=exec_date)
ti.run()
# We have set a new execution date (and did not pass in
# 'include_prior_dates'which means this task should now have a cleared
# xcom value
self.assertEqual(ti.xcom_pull(task_ids='test_xcom', key=key), None)
# We *should* get a value using 'include_prior_dates'
self.assertEqual(ti.xcom_pull(task_ids='test_xcom',
key=key,
include_prior_dates=True),
value)
def test_post_execute_hook(self):
"""
Test that post_execute hook is called with the Operator's result.
The result ('error') will cause an error to be raised and trapped.
"""
class TestError(Exception):
pass
class TestOperator(PythonOperator):
def post_execute(self, context, result):
if result == 'error':
raise TestError('expected error.')
dag = models.DAG(dag_id='test_post_execute_dag')
task = TestOperator(
task_id='test_operator',
dag=dag,
python_callable=lambda: 'error',
owner='airflow',
start_date=timezone.datetime(2017, 2, 1))
ti = TI(task=task, execution_date=timezone.utcnow())
with self.assertRaises(TestError):
ti.run()
def test_check_and_change_state_before_execution(self):
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
ti = TI(
task=task, execution_date=timezone.utcnow())
self.assertEqual(ti._try_number, 0)
self.assertTrue(ti._check_and_change_state_before_execution())
# State should be running, and try_number column should be incremented
self.assertEqual(ti.state, State.RUNNING)
self.assertEqual(ti._try_number, 1)
def test_check_and_change_state_before_execution_dep_not_met(self):
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
task2= DummyOperator(task_id='task2', dag=dag, start_date=DEFAULT_DATE)
task >> task2
ti = TI(
task=task2, execution_date=timezone.utcnow())
self.assertFalse(ti._check_and_change_state_before_execution())
def test_try_number(self):
"""
Test the try_number accessor behaves in various running states
"""
dag = models.DAG(dag_id='test_check_and_change_state_before_execution')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
ti = TI(task=task, execution_date=timezone.utcnow())
self.assertEqual(1, ti.try_number)
ti.try_number = 2
ti.state = State.RUNNING
self.assertEqual(2, ti.try_number)
ti.state = State.SUCCESS
self.assertEqual(3, ti.try_number)
def test_get_num_running_task_instances(self):
session = settings.Session()
dag = models.DAG(dag_id='test_get_num_running_task_instances')
dag2 = models.DAG(dag_id='test_get_num_running_task_instances_dummy')
task = DummyOperator(task_id='task', dag=dag, start_date=DEFAULT_DATE)
task2 = DummyOperator(task_id='task', dag=dag2, start_date=DEFAULT_DATE)
ti1 = TI(task=task, execution_date=DEFAULT_DATE)
ti2 = TI(task=task, execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti3 = TI(task=task2, execution_date=DEFAULT_DATE)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.RUNNING
session.add(ti1)
session.add(ti2)
session.add(ti3)
session.commit()
self.assertEquals(1, ti1.get_num_running_task_instances(session=session))
self.assertEquals(1, ti2.get_num_running_task_instances(session=session))
self.assertEquals(1, ti3.get_num_running_task_instances(session=session))
def test_log_url(self):
now = pendulum.now('Europe/Brussels')
dag = DAG('dag', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='op', dag=dag)
ti = TI(task=task, execution_date=now)
d = urllib.parse.parse_qs(
urllib.parse.urlparse(ti.log_url).query,
keep_blank_values=True, strict_parsing=True)
self.assertEqual(d['dag_id'][0], 'dag')
self.assertEqual(d['task_id'][0], 'op')
self.assertEqual(pendulum.parse(d['execution_date'][0]), now)
def test_mark_success_url(self):
now = pendulum.now('Europe/Brussels')
dag = DAG('dag', start_date=DEFAULT_DATE)
task = DummyOperator(task_id='op', dag=dag)
ti = TI(task=task, execution_date=now)
d = urllib.parse.parse_qs(
urllib.parse.urlparse(ti.mark_success_url).query,
keep_blank_values=True, strict_parsing=True)
self.assertEqual(d['dag_id'][0], 'dag')
self.assertEqual(d['task_id'][0], 'op')
self.assertEqual(pendulum.parse(d['execution_date'][0]), now)
class ClearTasksTest(unittest.TestCase):
def test_clear_task_instances(self):
dag = DAG('test_clear_task_instances', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='0', owner='test', dag=dag)
task1 = DummyOperator(task_id='1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session, dag=dag)
session.commit()
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 3)
def test_clear_task_instances_without_task(self):
dag = DAG('test_clear_task_instances_without_task', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='task0', owner='test', dag=dag)
task1 = DummyOperator(task_id='task1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
# Remove the task from dag.
dag.task_dict = {}
self.assertFalse(dag.has_task(task0.task_id))
self.assertFalse(dag.has_task(task1.task_id))
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.commit()
# When dag is None, max_tries will be maximum of original max_tries or try_number.
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 2)
def test_clear_task_instances_without_dag(self):
dag = DAG('test_clear_task_instances_without_dag', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='task_0', owner='test', dag=dag)
task1 = DummyOperator(task_id='task_1', owner='test', dag=dag, retries=2)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
ti0.run()
ti1.run()
session = settings.Session()
qry = session.query(TI).filter(
TI.dag_id == dag.dag_id).all()
clear_task_instances(qry, session)
session.commit()
# When dag is None, max_tries will be maximum of original max_tries or try_number.
ti0.refresh_from_db()
ti1.refresh_from_db()
# Next try to run will be try 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
self.assertEqual(ti1.try_number, 2)
self.assertEqual(ti1.max_tries, 2)
def test_dag_clear(self):
dag = DAG('test_dag_clear', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
task0 = DummyOperator(task_id='test_dag_clear_task_0', owner='test', dag=dag)
ti0 = TI(task=task0, execution_date=DEFAULT_DATE)
# Next try to run will be try 1
self.assertEqual(ti0.try_number, 1)
ti0.run()
self.assertEqual(ti0.try_number, 2)
dag.clear()
ti0.refresh_from_db()
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.state, State.NONE)
self.assertEqual(ti0.max_tries, 1)
task1 = DummyOperator(task_id='test_dag_clear_task_1', owner='test',
dag=dag, retries=2)
ti1 = TI(task=task1, execution_date=DEFAULT_DATE)
self.assertEqual(ti1.max_tries, 2)
ti1.try_number = 1
# Next try will be 2
ti1.run()
self.assertEqual(ti1.try_number, 3)
self.assertEqual(ti1.max_tries, 2)
dag.clear()
ti0.refresh_from_db()
ti1.refresh_from_db()
# after clear dag, ti2 should show attempt 3 of 5
self.assertEqual(ti1.max_tries, 4)
self.assertEqual(ti1.try_number, 3)
# after clear dag, ti1 should show attempt 2 of 2
self.assertEqual(ti0.try_number, 2)
self.assertEqual(ti0.max_tries, 1)
def test_dags_clear(self):
# setup
session = settings.Session()
dags, tis = [], []
num_of_dags = 5
for i in range(num_of_dags):
dag = DAG('test_dag_clear_' + str(i), start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
ti = TI(task=DummyOperator(task_id='test_task_clear_' + str(i), owner='test', dag=dag),
execution_date=DEFAULT_DATE)
dags.append(dag)
tis.append(ti)
# test clear all dags
for i in range(num_of_dags):
tis[i].run()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 2)
self.assertEqual(tis[i].max_tries, 0)
DAG.clear_dags(dags)
for i in range(num_of_dags):
tis[i].refresh_from_db()
self.assertEqual(tis[i].state, State.NONE)
self.assertEqual(tis[i].try_number, 2)
self.assertEqual(tis[i].max_tries, 1)
# test dry_run
for i in range(num_of_dags):
tis[i].run()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
DAG.clear_dags(dags, dry_run=True)
for i in range(num_of_dags):
tis[i].refresh_from_db()
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
# test only_failed
from random import randint
failed_dag_idx = randint(0, len(tis) - 1)
tis[failed_dag_idx].state = State.FAILED
session.merge(tis[failed_dag_idx])
session.commit()
DAG.clear_dags(dags, only_failed=True)
for i in range(num_of_dags):
tis[i].refresh_from_db()
if i != failed_dag_idx:
self.assertEqual(tis[i].state, State.SUCCESS)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 1)
else:
self.assertEqual(tis[i].state, State.NONE)
self.assertEqual(tis[i].try_number, 3)
self.assertEqual(tis[i].max_tries, 2)
def test_operator_clear(self):
dag = DAG('test_operator_clear', start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=10))
t1 = DummyOperator(task_id='bash_op', owner='test', dag=dag)
t2 = DummyOperator(task_id='dummy_op', owner='test', dag=dag, retries=1)
t2.set_upstream(t1)
ti1 = TI(task=t1, execution_date=DEFAULT_DATE)
ti2 = TI(task=t2, execution_date=DEFAULT_DATE)
ti2.run()
# Dependency not met
self.assertEqual(ti2.try_number, 1)
self.assertEqual(ti2.max_tries, 1)
t2.clear(upstream=True)
ti1.run()
ti2.run()
self.assertEqual(ti1.try_number, 2)
# max_tries is 0 because there is no task instance in db for ti1
# so clear won't change the max_tries.
self.assertEqual(ti1.max_tries, 0)
self.assertEqual(ti2.try_number, 2)
# try_number (0) + retries(1)
self.assertEqual(ti2.max_tries, 1)
def test_xcom_disable_pickle_type(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test1"
dag_id = "test_dag1"
task_id = "test_task1"
XCom.set(key=key,
value=json_obj,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date,
enable_pickling=False)
ret_value = XCom.get_one(key=key,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date,
enable_pickling=False)
self.assertEqual(ret_value, json_obj)
def test_xcom_enable_pickle_type(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test2"
dag_id = "test_dag2"
task_id = "test_task2"
XCom.set(key=key,
value=json_obj,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date,
enable_pickling=True)
ret_value = XCom.get_one(key=key,
dag_id=dag_id,
task_id=task_id,
execution_date=execution_date,
enable_pickling=True)
self.assertEqual(ret_value, json_obj)
def test_xcom_disable_pickle_type_fail_on_non_json(self):
class PickleRce(object):
def __reduce__(self):
return (os.system, ("ls -alt",))
self.assertRaises(TypeError, XCom.set,
key="xcom_test3",
value=PickleRce(),
dag_id="test_dag3",
task_id="test_task3",
execution_date=timezone.utcnow(),
enable_pickling=False)
def test_xcom_get_many(self):
json_obj = {"key": "value"}
execution_date = timezone.utcnow()
key = "xcom_test4"
dag_id1 = "test_dag4"
task_id1 = "test_task4"
dag_id2 = "test_dag5"
task_id2 = "test_task5"
XCom.set(key=key,
value=json_obj,
dag_id=dag_id1,
task_id=task_id1,
execution_date=execution_date,
enable_pickling=True)
XCom.set(key=key,
value=json_obj,
dag_id=dag_id2,
task_id=task_id2,
execution_date=execution_date,
enable_pickling=True)
results = XCom.get_many(key=key,
execution_date=execution_date,
enable_pickling=True)
for result in results:
self.assertEqual(result.value, json_obj)
class ConnectionTest(unittest.TestCase):
@patch.object(configuration, 'get')
def test_connection_extra_no_encryption(self, mock_get):
"""
Tests extras on a new connection without encryption. The fernet key
is set to a non-base64-encoded string and the extra is stored without
encryption.
"""
mock_get.return_value = 'cryptography_not_found_storing_passwords_in_plain_text'
test_connection = Connection(extra='testextra')
self.assertEqual(test_connection.extra, 'testextra')
@patch.object(configuration, 'get')
def test_connection_extra_with_encryption(self, mock_get):
"""
Tests extras on a new connection with encryption. The fernet key
is set to a base64 encoded string and the extra is encrypted.
"""
# 'dGVzdA==' is base64 encoded 'test'
mock_get.return_value = 'dGVzdA=='
test_connection = Connection(extra='testextra')
self.assertEqual(test_connection.extra, 'testextra')
|
prakashpp/trytond-shipping-fedex
|
refs/heads/master
|
setup.py
|
2
|
#!/usr/bin/env python
import re
import os
import ConfigParser
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
config = ConfigParser.ConfigParser()
config.readfp(open('tryton.cfg'))
info = dict(config.items('tryton'))
for key in ('depends', 'extras_depend', 'xml'):
if key in info:
info[key] = info[key].strip().splitlines()
major_version, minor_version, _ = info.get('version', '0.0.1').split('.', 2)
major_version = int(major_version)
minor_version = int(minor_version)
requires = [
'pyfedex',
]
MODULE2PREFIX = {
'shipping': 'fio'
}
MODULE = "shipping_fedex"
PREFIX = "fio"
for dep in info.get('depends', []):
if not re.match(r'(ir|res|webdav)(\W|$)', dep):
requires.append(
'%s_%s >= %s.%s, < %s.%s' % (
MODULE2PREFIX.get(dep, 'trytond'), dep,
major_version, minor_version, major_version,
minor_version + 1
)
)
requires.append(
'trytond >= %s.%s, < %s.%s' % (
major_version, minor_version, major_version, minor_version + 1
)
)
setup(
name='%s_%s' % (PREFIX, MODULE),
version=info.get('version', '0.0.1'),
description="Trytond Fedex Integration",
author="Fulfil.IO Inc., Openlabs Technologies and Consulting (P) Ltd.",
author_email='info@fulfil.io',
url='http://www.fulfil.io/',
package_dir={'trytond.modules.%s' % MODULE: '.'},
packages=[
'trytond.modules.%s' % MODULE,
'trytond.modules.%s.tests' % MODULE,
],
package_data={
'trytond.modules.%s' % MODULE: info.get('xml', [])
+ info.get('translation', [])
+ ['tryton.cfg', 'locale/*.po', 'tests/*.rst', 'reports/*.odt']
+ ['view/*.xml'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Plugins',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Tryton',
'Topic :: Office/Business',
],
long_description=open('README.rst').read(),
license='BSD',
install_requires=requires,
zip_safe=False,
entry_points="""
[trytond.modules]
%s = trytond.modules.%s
""" % (MODULE, MODULE),
test_suite='tests',
test_loader='trytond.test_loader:Loader',
)
|
mesosphere/mesos-cli
|
refs/heads/master
|
mesos/cli/__init__.py
|
1
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = '0.1.5'
|
heiths/allura
|
refs/heads/master
|
ForgeBlog/forgeblog/tests/test_roles.py
|
3
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from pylons import tmpl_context as c, app_globals as g
from alluratest.controller import setup_basic_test, setup_global_objects
from allura import model as M
from allura.lib import security
from allura.lib import helpers as h
def setUp():
setup_basic_test()
setup_global_objects()
h.set_context('test', neighborhood='Projects')
c.project.install_app('blog', 'blog')
g.set_app('blog')
def test_role_assignments():
admin = M.User.by_username('test-admin')
user = M.User.by_username('test-user')
anon = M.User.anonymous()
def check_access(perm):
pred = security.has_access(c.app, perm)
return pred(user=admin), pred(user=user), pred(user=anon)
assert check_access('configure') == (True, False, False)
assert check_access('read') == (True, True, True)
assert check_access('write') == (True, False, False)
assert check_access('unmoderated_post') == (True, True, False)
assert check_access('post') == (True, True, False)
assert check_access('moderate') == (True, False, False)
assert check_access('admin') == (True, False, False)
|
Kazade/NeHe-Website
|
refs/heads/master
|
google_appengine/lib/django-1.2/tests/regressiontests/admin_scripts/tests.py
|
39
|
"""
A series of tests to establish that the command-line managment tools work as
advertised - especially with regards to the handling of the DJANGO_SETTINGS_MODULE
and default settings.py files.
"""
import os
import unittest
import shutil
import sys
import re
from django import conf, bin, get_version
from django.conf import settings
class AdminScriptTestCase(unittest.TestCase):
def write_settings(self, filename, apps=None, is_dir=False, sdict=None):
test_dir = os.path.dirname(os.path.dirname(__file__))
if is_dir:
settings_dir = os.path.join(test_dir,filename)
os.mkdir(settings_dir)
settings_file = open(os.path.join(settings_dir,'__init__.py'), 'w')
else:
settings_file = open(os.path.join(test_dir, filename), 'w')
settings_file.write('# Settings file automatically generated by regressiontests.admin_scripts test case\n')
exports = [
'DATABASES',
'ROOT_URLCONF'
]
for s in exports:
if hasattr(settings, s):
o = getattr(settings, s)
if not isinstance(o, dict):
o = "'%s'" % o
settings_file.write("%s = %s\n" % (s, o))
if apps is None:
apps = ['django.contrib.auth', 'django.contrib.contenttypes', 'admin_scripts']
if apps:
settings_file.write("INSTALLED_APPS = %s\n" % apps)
if sdict:
for k, v in sdict.items():
settings_file.write("%s = %s\n" % (k, v))
settings_file.close()
def remove_settings(self, filename, is_dir=False):
test_dir = os.path.dirname(os.path.dirname(__file__))
full_name = os.path.join(test_dir, filename)
if is_dir:
shutil.rmtree(full_name)
else:
os.remove(full_name)
# Also try to remove the compiled file; if it exists, it could
# mess up later tests that depend upon the .py file not existing
try:
if sys.platform.startswith('java'):
# Jython produces module$py.class files
os.remove(re.sub(r'\.py$', '$py.class', full_name))
else:
# CPython produces module.pyc files
os.remove(full_name + 'c')
except OSError:
pass
def _ext_backend_paths(self):
"""
Returns the paths for any external backend packages.
"""
paths = []
first_package_re = re.compile(r'(^[^\.]+)\.')
for backend in settings.DATABASES.values():
result = first_package_re.findall(backend['ENGINE'])
if result and result != 'django':
backend_pkg = __import__(result[0])
backend_dir = os.path.dirname(backend_pkg.__file__)
paths.append(os.path.dirname(backend_dir))
return paths
def run_test(self, script, args, settings_file=None, apps=None):
test_dir = os.path.dirname(os.path.dirname(__file__))
project_dir = os.path.dirname(test_dir)
base_dir = os.path.dirname(project_dir)
ext_backend_base_dirs = self._ext_backend_paths()
# Remember the old environment
old_django_settings_module = os.environ.get('DJANGO_SETTINGS_MODULE', None)
if sys.platform.startswith('java'):
python_path_var_name = 'JYTHONPATH'
else:
python_path_var_name = 'PYTHONPATH'
old_python_path = os.environ.get(python_path_var_name, None)
old_cwd = os.getcwd()
# Set the test environment
if settings_file:
os.environ['DJANGO_SETTINGS_MODULE'] = settings_file
elif 'DJANGO_SETTINGS_MODULE' in os.environ:
del os.environ['DJANGO_SETTINGS_MODULE']
python_path = [test_dir, base_dir]
python_path.extend(ext_backend_base_dirs)
os.environ[python_path_var_name] = os.pathsep.join(python_path)
# Build the command line
executable = sys.executable
arg_string = ' '.join(['%s' % arg for arg in args])
if ' ' in executable:
cmd = '""%s" "%s" %s"' % (executable, script, arg_string)
else:
cmd = '%s "%s" %s' % (executable, script, arg_string)
# Move to the test directory and run
os.chdir(test_dir)
try:
from subprocess import Popen, PIPE
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdin, stdout, stderr = (p.stdin, p.stdout, p.stderr)
p.wait()
except ImportError:
stdin, stdout, stderr = os.popen3(cmd)
out, err = stdout.read(), stderr.read()
# Restore the old environment
if old_django_settings_module:
os.environ['DJANGO_SETTINGS_MODULE'] = old_django_settings_module
if old_python_path:
os.environ[python_path_var_name] = old_python_path
# Move back to the old working directory
os.chdir(old_cwd)
return out, err
def run_django_admin(self, args, settings_file=None):
bin_dir = os.path.abspath(os.path.dirname(bin.__file__))
return self.run_test(os.path.join(bin_dir,'django-admin.py'), args, settings_file)
def run_manage(self, args, settings_file=None):
conf_dir = os.path.dirname(conf.__file__)
template_manage_py = os.path.join(conf_dir, 'project_template', 'manage.py')
test_dir = os.path.dirname(os.path.dirname(__file__))
test_manage_py = os.path.join(test_dir, 'manage.py')
shutil.copyfile(template_manage_py, test_manage_py)
stdout, stderr = self.run_test('./manage.py', args, settings_file)
# Cleanup - remove the generated manage.py script
os.remove(test_manage_py)
return stdout, stderr
def assertNoOutput(self, stream):
"Utility assertion: assert that the given stream is empty"
self.assertEquals(len(stream), 0, "Stream should be empty: actually contains '%s'" % stream)
def assertOutput(self, stream, msg):
"Utility assertion: assert that the given message exists in the output"
self.assertTrue(msg in stream, "'%s' does not match actual output text '%s'" % (msg, stream))
##########################################################################
# DJANGO ADMIN TESTS
# This first series of test classes checks the environment processing
# of the django-admin.py script
##########################################################################
class DjangoAdminNoSettings(AdminScriptTestCase):
"A series of tests for django-admin.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: django-admin builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'environment variable DJANGO_SETTINGS_MODULE is undefined')
def test_builtin_with_bad_settings(self):
"no settings: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"no settings: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
class DjangoAdminDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"default: django-admin builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'environment variable DJANGO_SETTINGS_MODULE is undefined')
def test_builtin_with_settings(self):
"default: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall','--settings=settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"default: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"default: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"default: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"default: django-admin can't execute user commands if it isn't provided settings"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"default: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"default: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args,'settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class DjangoAdminFullPathDefaultSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes', 'regressiontests.admin_scripts'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"fulldefault: django-admin builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'environment variable DJANGO_SETTINGS_MODULE is undefined')
def test_builtin_with_settings(self):
"fulldefault: django-admin builtin commands succeed if a settings file is provided"
args = ['sqlall','--settings=settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"fulldefault: django-admin builtin commands succeed if the environment contains settings"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"fulldefault: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"fulldefault: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"fulldefault: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"fulldefault: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"fulldefault: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args,'settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class DjangoAdminMinimalSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth','django.contrib.contenttypes'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"minimal: django-admin builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'environment variable DJANGO_SETTINGS_MODULE is undefined')
def test_builtin_with_settings(self):
"minimal: django-admin builtin commands fail if settings are provided as argument"
args = ['sqlall','--settings=settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_environment(self):
"minimal: django-admin builtin commands fail if settings are provided in the environment"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'settings')
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_bad_settings(self):
"minimal: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"minimal: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"minimal: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: django-admin can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: django-admin can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args,'settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class DjangoAdminAlternateSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'environment variable DJANGO_SETTINGS_MODULE is undefined')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall','--settings=alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"alternate: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"alternate: django-admin can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"alternate: django-admin can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args,'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class DjangoAdminMultipleSettings(AdminScriptTestCase):
"""A series of tests for django-admin.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth','django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: django-admin builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'environment variable DJANGO_SETTINGS_MODULE is undefined')
def test_builtin_with_settings(self):
"alternate: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall','--settings=alternate_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"alternate: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"alternate: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"alternate: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"alternate: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"alternate: django-admin can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"alternate: django-admin can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_django_admin(args,'alternate_settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class DjangoAdminSettingsDirectory(AdminScriptTestCase):
"""
A series of tests for django-admin.py when the settings file is in a
directory. (see #9751).
"""
def setUp(self):
self.write_settings('settings', is_dir=True)
def tearDown(self):
self.remove_settings('settings', is_dir=True)
def test_setup_environ(self):
"directory: startapp creates the correct directory"
test_dir = os.path.dirname(os.path.dirname(__file__))
args = ['startapp','settings_test']
out, err = self.run_django_admin(args,'settings')
self.assertNoOutput(err)
self.assert_(os.path.exists(os.path.join(test_dir, 'settings_test')))
shutil.rmtree(os.path.join(test_dir, 'settings_test'))
def test_builtin_command(self):
"directory: django-admin builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, 'environment variable DJANGO_SETTINGS_MODULE is undefined')
def test_builtin_with_bad_settings(self):
"directory: django-admin builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"directory: django-admin builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_custom_command(self):
"directory: django-admin can't execute user commands unless settings are provided"
args = ['noargs_command']
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_builtin_with_settings(self):
"directory: django-admin builtin commands succeed if settings are provided as argument"
args = ['sqlall','--settings=settings', 'admin_scripts']
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"directory: django-admin builtin commands succeed if settings are provided in the environment"
args = ['sqlall','admin_scripts']
out, err = self.run_django_admin(args,'settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
##########################################################################
# MANAGE.PY TESTS
# This next series of test classes checks the environment processing
# of the generated manage.py script
##########################################################################
class ManageNoSettings(AdminScriptTestCase):
"A series of tests for manage.py when there is no settings.py file."
def test_builtin_command(self):
"no settings: manage.py builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_builtin_with_bad_settings(self):
"no settings: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_builtin_with_bad_environment(self):
"no settings: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
class ManageDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application.
"""
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"default: manage.py builtin commands succeed when default settings are appropriate"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_settings(self):
"default: manage.py builtin commands succeed if settings are provided as argument"
args = ['sqlall','--settings=settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"default: manage.py builtin commands succeed if settings are provided in the environment"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"default: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"default: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'bad_settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_custom_command(self):
"default: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_settings(self):
"default: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"default: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args,'settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class ManageFullPathDefaultSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
self.write_settings('settings.py', ['django.contrib.auth', 'django.contrib.contenttypes', 'regressiontests.admin_scripts'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"fulldefault: manage.py builtin commands succeed when default settings are appropriate"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_settings(self):
"fulldefault: manage.py builtin commands succeed if settings are provided as argument"
args = ['sqlall','--settings=settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"fulldefault: manage.py builtin commands succeed if settings are provided in the environment"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_bad_settings(self):
"fulldefault: manage.py builtin commands succeed if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"fulldefault: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'bad_settings')
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_custom_command(self):
"fulldefault: manage.py can execute user commands when default settings are appropriate"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_settings(self):
"fulldefault: manage.py can execute user commands when settings are provided as argument"
args = ['noargs_command', '--settings=settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"fulldefault: manage.py can execute user commands when settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args,'settings')
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
class ManageMinimalSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings.py file that
doesn't contain the test application.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth','django.contrib.contenttypes'])
def tearDown(self):
self.remove_settings('settings.py')
def test_builtin_command(self):
"minimal: manage.py builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_settings(self):
"minimal: manage.py builtin commands fail if settings are provided as argument"
args = ['sqlall','--settings=settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_environment(self):
"minimal: manage.py builtin commands fail if settings are provided in the environment"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'settings')
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_builtin_with_bad_settings(self):
"minimal: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"minimal: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found')
def test_custom_command(self):
"minimal: manage.py can't execute user commands without appropriate settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"minimal: manage.py can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=settings']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_environment(self):
"minimal: manage.py can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args,'settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class ManageAlternateSettings(AdminScriptTestCase):
"""A series of tests for manage.py when using a settings file
with a name other than 'settings.py'.
"""
def setUp(self):
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"alternate: manage.py builtin commands fail with an import error when no default settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_builtin_with_settings(self):
"alternate: manage.py builtin commands fail if settings are provided as argument but no defaults"
args = ['sqlall','--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_builtin_with_environment(self):
"alternate: manage.py builtin commands fail if settings are provided in the environment but no defaults"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'alternate_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_builtin_with_bad_settings(self):
"alternate: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_builtin_with_bad_environment(self):
"alternate: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_custom_command(self):
"alternate: manage.py can't execute user commands"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_custom_command_with_settings(self):
"alternate: manage.py can't execute user commands, even if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
def test_custom_command_with_environment(self):
"alternate: manage.py can't execute user commands, even if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args,'alternate_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Can't find the file 'settings.py' in the directory containing './manage.py'")
class ManageMultipleSettings(AdminScriptTestCase):
"""A series of tests for manage.py when multiple settings files
(including the default 'settings.py') are available. The default settings
file is insufficient for performing the operations described, so the
alternate settings must be used by the running script.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth','django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_builtin_command(self):
"multiple: manage.py builtin commands fail with an import error when no settings provided"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found.')
def test_builtin_with_settings(self):
"multiple: manage.py builtin commands succeed if settings are provided as argument"
args = ['sqlall','--settings=alternate_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, 'CREATE TABLE')
def test_builtin_with_environment(self):
"multiple: manage.py builtin commands fail if settings are provided in the environment"
# FIXME: This doesn't seem to be the correct output.
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'alternate_settings')
self.assertNoOutput(out)
self.assertOutput(err, 'App with label admin_scripts could not be found.')
def test_builtin_with_bad_settings(self):
"multiple: manage.py builtin commands fail if settings file (from argument) doesn't exist"
args = ['sqlall','--settings=bad_settings', 'admin_scripts']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Could not import settings 'bad_settings'")
def test_builtin_with_bad_environment(self):
"multiple: manage.py builtin commands fail if settings file (from environment) doesn't exist"
args = ['sqlall','admin_scripts']
out, err = self.run_manage(args,'bad_settings')
self.assertNoOutput(out)
self.assertOutput(err, "App with label admin_scripts could not be found")
def test_custom_command(self):
"multiple: manage.py can't execute user commands using default settings"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"multiple: manage.py can execute user commands if settings are provided as argument"
args = ['noargs_command', '--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand")
def test_custom_command_with_environment(self):
"multiple: manage.py can execute user commands if settings are provided in environment"
args = ['noargs_command']
out, err = self.run_manage(args,'alternate_settings')
self.assertNoOutput(out)
self.assertOutput(err, "Unknown command: 'noargs_command'")
class ManageValidate(AdminScriptTestCase):
def tearDown(self):
self.remove_settings('settings.py')
def test_nonexistent_app(self):
"manage.py validate reports an error on a non-existent app in INSTALLED_APPS"
self.write_settings('settings.py', apps=['admin_scriptz.broken_app'], sdict={'USE_I18N': False})
args = ['validate']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'No module named admin_scriptz')
def test_broken_app(self):
"manage.py validate reports an ImportError if an app's models.py raises one on import"
self.write_settings('settings.py', apps=['admin_scripts.broken_app'])
args = ['validate']
out, err = self.run_manage(args)
self.assertNoOutput(out)
self.assertOutput(err, 'ImportError')
def test_complex_app(self):
"manage.py validate does not raise an ImportError validating a complex app with nested calls to load_app"
self.write_settings('settings.py',
apps=['admin_scripts.complex_app', 'admin_scripts.simple_app'],
sdict={'DEBUG': True})
args = ['validate']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, '0 errors found')
def test_app_with_import(self):
"manage.py validate does not raise errors when an app imports a base class that itself has an abstract base"
self.write_settings('settings.py',
apps=['admin_scripts.app_with_import', 'django.contrib.comments'],
sdict={'DEBUG': True})
args = ['validate']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, '0 errors found')
##########################################################################
# COMMAND PROCESSING TESTS
# Check that user-space commands are correctly handled - in particular,
# that arguments to the commands are correctly parsed and processed.
##########################################################################
class CommandTypes(AdminScriptTestCase):
"Tests for the various types of base command types that can be defined."
def setUp(self):
self.write_settings('settings.py')
def tearDown(self):
self.remove_settings('settings.py')
def test_version(self):
"--version is handled as a special case"
args = ['--version']
out, err = self.run_manage(args)
self.assertNoOutput(err)
# Only check the first part of the version number
self.assertOutput(out, get_version().split('-')[0])
def test_help(self):
"--help is handled as a special case"
args = ['--help']
out, err = self.run_manage(args)
if sys.version_info < (2, 5):
self.assertOutput(out, "usage: manage.py subcommand [options] [args]")
else:
self.assertOutput(out, "Usage: manage.py subcommand [options] [args]")
self.assertOutput(err, "Type 'manage.py help <subcommand>' for help on a specific subcommand.")
def test_specific_help(self):
"--help can be used on a specific command"
args = ['sqlall','--help']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "Prints the CREATE TABLE, custom SQL and CREATE INDEX SQL statements for the given model module name(s).")
def test_base_command(self):
"User BaseCommands can execute when a label is provided"
args = ['base_command','testlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', '1'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_base_command_no_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=(), options=[('option_a', '1'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_base_command_multiple_label(self):
"User BaseCommands can execute when no labels are provided"
args = ['base_command','testlabel','anotherlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel', 'anotherlabel'), options=[('option_a', '1'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_base_command_with_option(self):
"User BaseCommands can execute with options when a label is provided"
args = ['base_command','testlabel','--option_a=x']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_base_command_with_options(self):
"User BaseCommands can execute with multiple options when a label is provided"
args = ['base_command','testlabel','-a','x','--option_b=y']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', 'y'), ('option_c', '3'), ('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_noargs(self):
"NoArg Commands can be executed"
args = ['noargs_command']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:NoArgsCommand options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_noargs_with_args(self):
"NoArg Commands raise an error if an argument is provided"
args = ['noargs_command','argument']
out, err = self.run_manage(args)
self.assertOutput(err, "Error: Command doesn't accept any arguments")
def test_app_command(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'auth']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand app=<module 'django.contrib.auth.models'")
self.assertOutput(out, os.sep.join(['django','contrib','auth','models.py']))
self.assertOutput(out, "'>, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_app_command_no_apps(self):
"User AppCommands raise an error when no app name is provided"
args = ['app_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'Error: Enter at least one appname.')
def test_app_command_multiple_apps(self):
"User AppCommands raise an error when multiple app names are provided"
args = ['app_command','auth','contenttypes']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:AppCommand app=<module 'django.contrib.auth.models'")
self.assertOutput(out, os.sep.join(['django','contrib','auth','models.py']))
self.assertOutput(out, "'>, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
self.assertOutput(out, "EXECUTE:AppCommand app=<module 'django.contrib.contenttypes.models'")
self.assertOutput(out, os.sep.join(['django','contrib','contenttypes','models.py']))
self.assertOutput(out, "'>, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_app_command_invalid_appname(self):
"User AppCommands can execute when a single app name is provided"
args = ['app_command', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "App with label NOT_AN_APP could not be found")
def test_app_command_some_invalid_appnames(self):
"User AppCommands can execute when some of the provided app names are invalid"
args = ['app_command', 'auth', 'NOT_AN_APP']
out, err = self.run_manage(args)
self.assertOutput(err, "App with label NOT_AN_APP could not be found")
def test_label_command(self):
"User LabelCommands can execute when a label is provided"
args = ['label_command','testlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:LabelCommand label=testlabel, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
def test_label_command_no_label(self):
"User LabelCommands raise an error if no label is provided"
args = ['label_command']
out, err = self.run_manage(args)
self.assertOutput(err, 'Enter at least one label')
def test_label_command_multiple_label(self):
"User LabelCommands are executed multiple times if multiple labels are provided"
args = ['label_command','testlabel','anotherlabel']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:LabelCommand label=testlabel, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
self.assertOutput(out, "EXECUTE:LabelCommand label=anotherlabel, options=[('pythonpath', None), ('settings', None), ('traceback', None), ('verbosity', '1')]")
class ArgumentOrder(AdminScriptTestCase):
"""Tests for 2-stage argument parsing scheme.
django-admin command arguments are parsed in 2 parts; the core arguments
(--settings, --traceback and --pythonpath) are parsed using a Lax parser.
This Lax parser ignores any unknown options. Then the full settings are
passed to the command parser, which extracts commands of interest to the
individual command.
"""
def setUp(self):
self.write_settings('settings.py', apps=['django.contrib.auth','django.contrib.contenttypes'])
self.write_settings('alternate_settings.py')
def tearDown(self):
self.remove_settings('settings.py')
self.remove_settings('alternate_settings.py')
def test_setting_then_option(self):
"Options passed after settings are correctly handled"
args = ['base_command','testlabel','--settings=alternate_settings','--option_a=x']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', '1')]")
def test_setting_then_short_option(self):
"Short options passed after settings are correctly handled"
args = ['base_command','testlabel','--settings=alternate_settings','--option_a=x']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', '1')]")
def test_option_then_setting(self):
"Options passed before settings are correctly handled"
args = ['base_command','testlabel','--option_a=x','--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', '1')]")
def test_short_option_then_setting(self):
"Short options passed before settings are correctly handled"
args = ['base_command','testlabel','-a','x','--settings=alternate_settings']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', '2'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', '1')]")
def test_option_then_setting_then_option(self):
"Options are correctly handled when they are passed before and after a setting"
args = ['base_command','testlabel','--option_a=x','--settings=alternate_settings','--option_b=y']
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE:BaseCommand labels=('testlabel',), options=[('option_a', 'x'), ('option_b', 'y'), ('option_c', '3'), ('pythonpath', None), ('settings', 'alternate_settings'), ('traceback', None), ('verbosity', '1')]")
|
nemesisdesign/django
|
refs/heads/master
|
django/conf/locale/mn/formats.py
|
619
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'g:i A'
# DATETIME_FORMAT =
# YEAR_MONTH_FORMAT =
# MONTH_DAY_FORMAT =
SHORT_DATE_FORMAT = 'j M Y'
# SHORT_DATETIME_FORMAT =
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
# DECIMAL_SEPARATOR =
# THOUSAND_SEPARATOR =
# NUMBER_GROUPING =
|
SteveXiSong/UW-Madison-ECE757-S15-MulticastSnooping
|
refs/heads/master
|
src/arch/x86/isa/insts/x87/arithmetic/change_sign.py
|
70
|
# Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop FABS {
absfp st(0), st(0), SetStatus=True
};
def macroop FCHS {
chsfp st(0), st(0), SetStatus=True
};
'''
|
tumbl3w33d/ansible
|
refs/heads/devel
|
test/lib/ansible_test/_data/pytest/plugins/ansible_pytest_collections.py
|
25
|
"""Enable unit testing of Ansible collections. PYTEST_DONT_REWRITE"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
# set by ansible-test to a single directory, rather than a list of directories as supported by Ansible itself
ANSIBLE_COLLECTIONS_PATH = os.path.join(os.environ['ANSIBLE_COLLECTIONS_PATHS'], 'ansible_collections')
def collection_pypkgpath(self):
"""Configure the Python package path so that pytest can find our collections."""
for parent in self.parts(reverse=True):
if str(parent) == ANSIBLE_COLLECTIONS_PATH:
return parent
raise Exception('File "%s" not found in collection path "%s".' % (self.strpath, ANSIBLE_COLLECTIONS_PATH))
def pytest_configure():
"""Configure this pytest plugin."""
try:
if pytest_configure.executed:
return
except AttributeError:
pytest_configure.executed = True
from ansible.utils.collection_loader import AnsibleCollectionLoader
# allow unit tests to import code from collections
sys.meta_path.insert(0, AnsibleCollectionLoader())
# noinspection PyProtectedMember
import py._path.local
# force collections unit tests to be loaded with the ansible_collections namespace
# original idea from https://stackoverflow.com/questions/50174130/how-do-i-pytest-a-project-using-pep-420-namespace-packages/50175552#50175552
# noinspection PyProtectedMember
py._path.local.LocalPath.pypkgpath = collection_pypkgpath # pylint: disable=protected-access
pytest_configure()
|
menardorama/ReadyNAS-Add-ons
|
refs/heads/master
|
headphones-1.0.0/files/etc/apps/headphones/lib/unidecode/x092.py
|
252
|
data = (
'Ba ', # 0x00
'Fang ', # 0x01
'Chen ', # 0x02
'Xing ', # 0x03
'Tou ', # 0x04
'Yue ', # 0x05
'Yan ', # 0x06
'Fu ', # 0x07
'Pi ', # 0x08
'Na ', # 0x09
'Xin ', # 0x0a
'E ', # 0x0b
'Jue ', # 0x0c
'Dun ', # 0x0d
'Gou ', # 0x0e
'Yin ', # 0x0f
'Qian ', # 0x10
'Ban ', # 0x11
'Ji ', # 0x12
'Ren ', # 0x13
'Chao ', # 0x14
'Niu ', # 0x15
'Fen ', # 0x16
'Yun ', # 0x17
'Ji ', # 0x18
'Qin ', # 0x19
'Pi ', # 0x1a
'Guo ', # 0x1b
'Hong ', # 0x1c
'Yin ', # 0x1d
'Jun ', # 0x1e
'Shi ', # 0x1f
'Yi ', # 0x20
'Zhong ', # 0x21
'Nie ', # 0x22
'Gai ', # 0x23
'Ri ', # 0x24
'Huo ', # 0x25
'Tai ', # 0x26
'Kang ', # 0x27
'Habaki ', # 0x28
'Irori ', # 0x29
'Ngaak ', # 0x2a
'[?] ', # 0x2b
'Duo ', # 0x2c
'Zi ', # 0x2d
'Ni ', # 0x2e
'Tu ', # 0x2f
'Shi ', # 0x30
'Min ', # 0x31
'Gu ', # 0x32
'E ', # 0x33
'Ling ', # 0x34
'Bing ', # 0x35
'Yi ', # 0x36
'Gu ', # 0x37
'Ba ', # 0x38
'Pi ', # 0x39
'Yu ', # 0x3a
'Si ', # 0x3b
'Zuo ', # 0x3c
'Bu ', # 0x3d
'You ', # 0x3e
'Dian ', # 0x3f
'Jia ', # 0x40
'Zhen ', # 0x41
'Shi ', # 0x42
'Shi ', # 0x43
'Tie ', # 0x44
'Ju ', # 0x45
'Zhan ', # 0x46
'Shi ', # 0x47
'She ', # 0x48
'Xuan ', # 0x49
'Zhao ', # 0x4a
'Bao ', # 0x4b
'He ', # 0x4c
'Bi ', # 0x4d
'Sheng ', # 0x4e
'Chu ', # 0x4f
'Shi ', # 0x50
'Bo ', # 0x51
'Zhu ', # 0x52
'Chi ', # 0x53
'Za ', # 0x54
'Po ', # 0x55
'Tong ', # 0x56
'Qian ', # 0x57
'Fu ', # 0x58
'Zhai ', # 0x59
'Liu ', # 0x5a
'Qian ', # 0x5b
'Fu ', # 0x5c
'Li ', # 0x5d
'Yue ', # 0x5e
'Pi ', # 0x5f
'Yang ', # 0x60
'Ban ', # 0x61
'Bo ', # 0x62
'Jie ', # 0x63
'Gou ', # 0x64
'Shu ', # 0x65
'Zheng ', # 0x66
'Mu ', # 0x67
'Ni ', # 0x68
'Nie ', # 0x69
'Di ', # 0x6a
'Jia ', # 0x6b
'Mu ', # 0x6c
'Dan ', # 0x6d
'Shen ', # 0x6e
'Yi ', # 0x6f
'Si ', # 0x70
'Kuang ', # 0x71
'Ka ', # 0x72
'Bei ', # 0x73
'Jian ', # 0x74
'Tong ', # 0x75
'Xing ', # 0x76
'Hong ', # 0x77
'Jiao ', # 0x78
'Chi ', # 0x79
'Er ', # 0x7a
'Ge ', # 0x7b
'Bing ', # 0x7c
'Shi ', # 0x7d
'Mou ', # 0x7e
'Jia ', # 0x7f
'Yin ', # 0x80
'Jun ', # 0x81
'Zhou ', # 0x82
'Chong ', # 0x83
'Shang ', # 0x84
'Tong ', # 0x85
'Mo ', # 0x86
'Lei ', # 0x87
'Ji ', # 0x88
'Yu ', # 0x89
'Xu ', # 0x8a
'Ren ', # 0x8b
'Zun ', # 0x8c
'Zhi ', # 0x8d
'Qiong ', # 0x8e
'Shan ', # 0x8f
'Chi ', # 0x90
'Xian ', # 0x91
'Xing ', # 0x92
'Quan ', # 0x93
'Pi ', # 0x94
'Tie ', # 0x95
'Zhu ', # 0x96
'Hou ', # 0x97
'Ming ', # 0x98
'Kua ', # 0x99
'Yao ', # 0x9a
'Xian ', # 0x9b
'Xian ', # 0x9c
'Xiu ', # 0x9d
'Jun ', # 0x9e
'Cha ', # 0x9f
'Lao ', # 0xa0
'Ji ', # 0xa1
'Pi ', # 0xa2
'Ru ', # 0xa3
'Mi ', # 0xa4
'Yi ', # 0xa5
'Yin ', # 0xa6
'Guang ', # 0xa7
'An ', # 0xa8
'Diou ', # 0xa9
'You ', # 0xaa
'Se ', # 0xab
'Kao ', # 0xac
'Qian ', # 0xad
'Luan ', # 0xae
'Kasugai ', # 0xaf
'Ai ', # 0xb0
'Diao ', # 0xb1
'Han ', # 0xb2
'Rui ', # 0xb3
'Shi ', # 0xb4
'Keng ', # 0xb5
'Qiu ', # 0xb6
'Xiao ', # 0xb7
'Zhe ', # 0xb8
'Xiu ', # 0xb9
'Zang ', # 0xba
'Ti ', # 0xbb
'Cuo ', # 0xbc
'Gua ', # 0xbd
'Gong ', # 0xbe
'Zhong ', # 0xbf
'Dou ', # 0xc0
'Lu ', # 0xc1
'Mei ', # 0xc2
'Lang ', # 0xc3
'Wan ', # 0xc4
'Xin ', # 0xc5
'Yun ', # 0xc6
'Bei ', # 0xc7
'Wu ', # 0xc8
'Su ', # 0xc9
'Yu ', # 0xca
'Chan ', # 0xcb
'Ting ', # 0xcc
'Bo ', # 0xcd
'Han ', # 0xce
'Jia ', # 0xcf
'Hong ', # 0xd0
'Cuan ', # 0xd1
'Feng ', # 0xd2
'Chan ', # 0xd3
'Wan ', # 0xd4
'Zhi ', # 0xd5
'Si ', # 0xd6
'Xuan ', # 0xd7
'Wu ', # 0xd8
'Wu ', # 0xd9
'Tiao ', # 0xda
'Gong ', # 0xdb
'Zhuo ', # 0xdc
'Lue ', # 0xdd
'Xing ', # 0xde
'Qian ', # 0xdf
'Shen ', # 0xe0
'Han ', # 0xe1
'Lue ', # 0xe2
'Xie ', # 0xe3
'Chu ', # 0xe4
'Zheng ', # 0xe5
'Ju ', # 0xe6
'Xian ', # 0xe7
'Tie ', # 0xe8
'Mang ', # 0xe9
'Pu ', # 0xea
'Li ', # 0xeb
'Pan ', # 0xec
'Rui ', # 0xed
'Cheng ', # 0xee
'Gao ', # 0xef
'Li ', # 0xf0
'Te ', # 0xf1
'Pyeng ', # 0xf2
'Zhu ', # 0xf3
'[?] ', # 0xf4
'Tu ', # 0xf5
'Liu ', # 0xf6
'Zui ', # 0xf7
'Ju ', # 0xf8
'Chang ', # 0xf9
'Yuan ', # 0xfa
'Jian ', # 0xfb
'Gang ', # 0xfc
'Diao ', # 0xfd
'Tao ', # 0xfe
'Chang ', # 0xff
)
|
zed/test-performance-run-in-a-loop-vs-run-standalone
|
refs/heads/master
|
run-standalone.py
|
1
|
from timeit import default_timer as timer # allow Windows friends to play
a = range(500)
sum(a)
for i in range(1000000): #just to create a time interval, seems this disturb cpu cache?
pass
st = timer()
sum(a)
print("%.2f us" % ((timer() - st)*1e6,))
|
arazmj/RouteFlow
|
refs/heads/master
|
pox/pox/lib/epoll_select.py
|
26
|
# Copyright 2012 Andreas Wundsam
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
import select
class EpollSelect(object):
""" a class that implements select.select() type behavior on top of epoll.
Necessary, because select() only works on FD_SETSIZE (typically 1024) fd's at a time
"""
def __init__(self):
self.epoll = select.epoll()
self.fd_to_obj = {}
self.registered = {}
self.lastrl = []
self.lastrl_set = set()
self.lastwl = []
self.lastwl_set = set()
def select(self, rl, wl, xl, timeout=0):
""" emulate the select semantics on top of _epoll.
Note this tries to emulate the behavior of select.select()
- you can pass a raw fd, or an object that answers to #fileno().
- will return the object that belongs to the fd
"""
# a map of fd's that need to be modified.
# fd -> flag to be set (0 for unregister fd)
modify={}
def modify_table(current_obj_list, old_fd_set, op):
""" add operations to modify the registered fd's for operation / epoll mask 'op'
Returns the old_fd_set you should pass in next time
Also updates the fd_to_obj map.
Yes, this is ugly. """
current_fd_set = set()
for obj in current_obj_list:
# iterate through current_obj_list, udpate fd to obj mapping, and create set of fds
fd = obj.fileno() if hasattr(obj, "fileno") else obj
self.fd_to_obj[fd] = obj
current_fd_set.add(fd)
# new fds to register (for this op)
new = current_fd_set - old_fd_set
for fd in new:
if not fd in modify:
modify[fd] = self.registered[fd] if fd in self.registered else 0
modify[fd] |= op
# fd's to remove (at least for this op)
expired = old_fd_set - current_fd_set
for fd in expired:
if not fd in modify:
modify[fd] = self.registered[fd] if fd in self.registered else 0
modify[fd] &= ~op
return current_fd_set
# optimization assumptions
# rl is large and rarely changes
if rl != self.lastrl:
self.lastrl_set = modify_table(rl, self.lastrl_set, select.EPOLLIN|select.EPOLLPRI)
self.lastrl = rl
if wl != self.lastwl:
self.lastwl_set = modify_table(wl, self.lastwl_set, select.EPOLLOUT)
self.lastwl = wl
# ignore XL. Tough luck, epoll /always/ checks for error conditions
# you should, anyway
# now execute the modify ops on the epoll object
for (fd, mask) in modify.iteritems():
if fd in self.registered:
if mask == 0:
self.epoll.unregister(fd)
del self.registered[fd]
else:
self.epoll.modify(fd, mask)
self.registered[fd] = mask
else:
if mask == 0:
raise AssertionError("This should never happen - a new fd was scheduled for modification but neither for read nor write_")
else:
self.epoll.register(fd, mask)
self.registered[fd] = mask
# now for the real beef
events = self.epoll.poll(timeout)
# convert the events list of (fd, event) tuple to the three lists expected by select users
retrl = []
retwl = []
retxl = []
for (fd, event) in events:
if event & (select.EPOLLIN|select.EPOLLPRI|select.EPOLLRDNORM|select.EPOLLRDBAND):
retrl.append(self.fd_to_obj[fd])
if event & (select.EPOLLOUT|select.EPOLLWRNORM|select.EPOLLWRBAND):
retwl.append(self.fd_to_obj[fd])
if event & (select.EPOLLERR|select.EPOLLHUP):
retxl.append(self.fd_to_obj[fd])
return (retrl, retwl, retxl)
def close(self):
self.epoll.close()
|
valexandersaulys/prudential_insurance_kaggle
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/vcs/git.py
|
280
|
from __future__ import absolute_import
import logging
import tempfile
import os.path
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.utils import display_path, rmtree
from pip.vcs import vcs, VersionControl
urlsplit = urllib_parse.urlsplit
urlunsplit = urllib_parse.urlunsplit
logger = logging.getLogger(__name__)
class Git(VersionControl):
name = 'git'
dirname = '.git'
repo_name = 'clone'
schemes = (
'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file',
)
def __init__(self, url=None, *args, **kwargs):
# Works around an apparent Git bug
# (see http://article.gmane.org/gmane.comp.version-control.git/146500)
if url:
scheme, netloc, path, query, fragment = urlsplit(url)
if scheme.endswith('file'):
initial_slashes = path[:-len(path.lstrip('/'))]
newpath = (
initial_slashes +
urllib_request.url2pathname(path)
.replace('\\', '/').lstrip('/')
)
url = urlunsplit((scheme, netloc, newpath, query, fragment))
after_plus = scheme.find('+') + 1
url = scheme[:after_plus] + urlunsplit(
(scheme[after_plus:], netloc, newpath, query, fragment),
)
super(Git, self).__init__(url, *args, **kwargs)
def export(self, location):
"""Export the Git repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
if not location.endswith('/'):
location = location + '/'
self.run_command(
['checkout-index', '-a', '-f', '--prefix', location],
show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def check_rev_options(self, rev, dest, rev_options):
"""Check the revision options before checkout to compensate that tags
and branches may need origin/ as a prefix.
Returns the SHA1 of the branch or tag if found.
"""
revisions = self.get_refs(dest)
origin_rev = 'origin/%s' % rev
if origin_rev in revisions:
# remote branch
return [revisions[origin_rev]]
elif rev in revisions:
# a local tag or branch name
return [revisions[rev]]
else:
logger.warning(
"Could not find a tag or branch '%s', assuming commit.", rev,
)
return rev_options
def switch(self, dest, url, rev_options):
self.run_command(['config', 'remote.origin.url', url], cwd=dest)
self.run_command(['checkout', '-q'] + rev_options, cwd=dest)
self.update_submodules(dest)
def update(self, dest, rev_options):
# First fetch changes from the default remote
self.run_command(['fetch', '-q'], cwd=dest)
# Then reset to wanted revision (maby even origin/master)
if rev_options:
rev_options = self.check_rev_options(
rev_options[0], dest, rev_options,
)
self.run_command(['reset', '--hard', '-q'] + rev_options, cwd=dest)
#: update submodules
self.update_submodules(dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to %s)' % rev
else:
rev_options = ['origin/master']
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Cloning %s%s to %s', url, rev_display, display_path(dest),
)
self.run_command(['clone', '-q', url, dest])
if rev:
rev_options = self.check_rev_options(rev, dest, rev_options)
# Only do a checkout if rev_options differs from HEAD
if not self.get_revision(dest).startswith(rev_options[0]):
self.run_command(
['checkout', '-q'] + rev_options,
cwd=dest,
)
#: repo may contain submodules
self.update_submodules(dest)
def get_url(self, location):
url = self.run_command(
['config', 'remote.origin.url'],
show_stdout=False, cwd=location)
return url.strip()
def get_revision(self, location):
current_rev = self.run_command(
['rev-parse', 'HEAD'], show_stdout=False, cwd=location)
return current_rev.strip()
def get_refs(self, location):
"""Return map of named refs (branches or tags) to commit hashes."""
output = self.run_command(['show-ref'],
show_stdout=False, cwd=location)
rv = {}
for line in output.strip().splitlines():
commit, ref = line.split(' ', 1)
ref = ref.strip()
ref_name = None
if ref.startswith('refs/remotes/'):
ref_name = ref[len('refs/remotes/'):]
elif ref.startswith('refs/heads/'):
ref_name = ref[len('refs/heads/'):]
elif ref.startswith('refs/tags/'):
ref_name = ref[len('refs/tags/'):]
if ref_name is not None:
rv[ref_name] = commit.strip()
return rv
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('git:'):
repo = 'git+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
refs = self.get_refs(location)
# refs maps names to commit hashes; we need the inverse
# if multiple names map to a single commit, we pick the first one
# alphabetically
names_by_commit = {}
for ref, commit in sorted(refs.items()):
if commit not in names_by_commit:
names_by_commit[commit] = ref
if current_rev in names_by_commit:
# It's a tag or branch.
name = names_by_commit[current_rev]
full_egg_name = (
'%s-%s' % (egg_project_name, self.translate_egg_surname(name))
)
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
def get_url_rev(self):
"""
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
That's required because although they use SSH they sometimes doesn't
work with a ssh:// scheme (e.g. Github). But we need a scheme for
parsing. Hence we remove it again afterwards and return it as a stub.
"""
if '://' not in self.url:
assert 'file:' not in self.url
self.url = self.url.replace('git+', 'git+ssh://')
url, rev = super(Git, self).get_url_rev()
url = url.replace('ssh://', '')
else:
url, rev = super(Git, self).get_url_rev()
return url, rev
def update_submodules(self, location):
if not os.path.exists(os.path.join(location, '.gitmodules')):
return
self.run_command(
['submodule', 'update', '--init', '--recursive', '-q'],
cwd=location,
)
vcs.register(Git)
|
Beauhurst/django
|
refs/heads/master
|
tests/select_related_onetoone/tests.py
|
39
|
from django.core.exceptions import FieldError
from django.test import SimpleTestCase, TestCase
from .models import (
AdvancedUserStat, Child1, Child2, Child3, Child4, Image, LinkedList,
Parent1, Parent2, Product, StatDetails, User, UserProfile, UserStat,
UserStatResult,
)
class ReverseSelectRelatedTestCase(TestCase):
def setUp(self):
user = User.objects.create(username="test")
UserProfile.objects.create(user=user, state="KS", city="Lawrence")
results = UserStatResult.objects.create(results='first results')
userstat = UserStat.objects.create(user=user, posts=150,
results=results)
StatDetails.objects.create(base_stats=userstat, comments=259)
user2 = User.objects.create(username="bob")
results2 = UserStatResult.objects.create(results='moar results')
advstat = AdvancedUserStat.objects.create(user=user2, posts=200, karma=5,
results=results2)
StatDetails.objects.create(base_stats=advstat, comments=250)
p1 = Parent1(name1="Only Parent1")
p1.save()
c1 = Child1(name1="Child1 Parent1", name2="Child1 Parent2", value=1)
c1.save()
p2 = Parent2(name2="Child2 Parent2")
p2.save()
c2 = Child2(name1="Child2 Parent1", parent2=p2, value=2)
c2.save()
def test_basic(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userprofile").get(username="test")
self.assertEqual(u.userprofile.state, "KS")
def test_follow_next_level(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat__results").get(username="test")
self.assertEqual(u.userstat.posts, 150)
self.assertEqual(u.userstat.results.results, 'first results')
def test_follow_two(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userprofile", "userstat").get(username="test")
self.assertEqual(u.userprofile.state, "KS")
self.assertEqual(u.userstat.posts, 150)
def test_follow_two_next_level(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat__results", "userstat__statdetails").get(username="test")
self.assertEqual(u.userstat.results.results, 'first results')
self.assertEqual(u.userstat.statdetails.comments, 259)
def test_forward_and_back(self):
with self.assertNumQueries(1):
stat = UserStat.objects.select_related("user__userprofile").get(user__username="test")
self.assertEqual(stat.user.userprofile.state, 'KS')
self.assertEqual(stat.user.userstat.posts, 150)
def test_back_and_forward(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat").get(username="test")
self.assertEqual(u.userstat.user.username, 'test')
def test_not_followed_by_default(self):
with self.assertNumQueries(2):
u = User.objects.select_related().get(username="test")
self.assertEqual(u.userstat.posts, 150)
def test_follow_from_child_class(self):
with self.assertNumQueries(1):
stat = AdvancedUserStat.objects.select_related('user', 'statdetails').get(posts=200)
self.assertEqual(stat.statdetails.comments, 250)
self.assertEqual(stat.user.username, 'bob')
def test_follow_inheritance(self):
with self.assertNumQueries(1):
stat = UserStat.objects.select_related('user', 'advanceduserstat').get(posts=200)
self.assertEqual(stat.advanceduserstat.posts, 200)
self.assertEqual(stat.user.username, 'bob')
with self.assertNumQueries(1):
self.assertEqual(stat.advanceduserstat.user.username, 'bob')
def test_nullable_relation(self):
im = Image.objects.create(name="imag1")
p1 = Product.objects.create(name="Django Plushie", image=im)
p2 = Product.objects.create(name="Talking Django Plushie")
with self.assertNumQueries(1):
result = sorted(Product.objects.select_related("image"), key=lambda x: x.name)
self.assertEqual([p.name for p in result], ["Django Plushie", "Talking Django Plushie"])
self.assertEqual(p1.image, im)
# Check for ticket #13839
self.assertIsNone(p2.image)
def test_missing_reverse(self):
"""
Ticket #13839: select_related() should NOT cache None
for missing objects on a reverse 1-1 relation.
"""
with self.assertNumQueries(1):
user = User.objects.select_related('userprofile').get(username='bob')
with self.assertRaises(UserProfile.DoesNotExist):
user.userprofile
def test_nullable_missing_reverse(self):
"""
Ticket #13839: select_related() should NOT cache None
for missing objects on a reverse 0-1 relation.
"""
Image.objects.create(name="imag1")
with self.assertNumQueries(1):
image = Image.objects.select_related('product').get()
with self.assertRaises(Product.DoesNotExist):
image.product
def test_parent_only(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1').get(name1="Only Parent1")
with self.assertNumQueries(0):
with self.assertRaises(Child1.DoesNotExist):
p.child1
def test_multiple_subclass(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1').get(name1="Child1 Parent1")
self.assertEqual(p.child1.name2, 'Child1 Parent2')
def test_onetoone_with_subclass(self):
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2').get(name2="Child2 Parent2")
self.assertEqual(p.child2.name1, 'Child2 Parent1')
def test_onetoone_with_two_subclasses(self):
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2', "child2__child3").get(name2="Child2 Parent2")
self.assertEqual(p.child2.name1, 'Child2 Parent1')
with self.assertRaises(Child3.DoesNotExist):
p.child2.child3
p3 = Parent2(name2="Child3 Parent2")
p3.save()
c2 = Child3(name1="Child3 Parent1", parent2=p3, value=2, value3=3)
c2.save()
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2', "child2__child3").get(name2="Child3 Parent2")
self.assertEqual(p.child2.name1, 'Child3 Parent1')
self.assertEqual(p.child2.child3.value3, 3)
self.assertEqual(p.child2.child3.value, p.child2.value)
self.assertEqual(p.child2.name1, p.child2.child3.name1)
def test_multiinheritance_two_subclasses(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1', 'child1__child4').get(name1="Child1 Parent1")
self.assertEqual(p.child1.name2, 'Child1 Parent2')
self.assertEqual(p.child1.name1, p.name1)
with self.assertRaises(Child4.DoesNotExist):
p.child1.child4
Child4(name1='n1', name2='n2', value=1, value4=4).save()
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child1', 'child1__child4').get(name2="n2")
self.assertEqual(p.name2, 'n2')
self.assertEqual(p.child1.name1, 'n1')
self.assertEqual(p.child1.name2, p.name2)
self.assertEqual(p.child1.value, 1)
self.assertEqual(p.child1.child4.name1, p.child1.name1)
self.assertEqual(p.child1.child4.name2, p.child1.name2)
self.assertEqual(p.child1.child4.value, p.child1.value)
self.assertEqual(p.child1.child4.value4, 4)
def test_inheritance_deferred(self):
c = Child4.objects.create(name1='n1', name2='n2', value=1, value4=4)
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child1').only(
'id2', 'child1__value').get(name2="n2")
self.assertEqual(p.id2, c.id2)
self.assertEqual(p.child1.value, 1)
p = Parent2.objects.select_related('child1').only(
'id2', 'child1__value').get(name2="n2")
with self.assertNumQueries(1):
self.assertEqual(p.name2, 'n2')
p = Parent2.objects.select_related('child1').only(
'id2', 'child1__value').get(name2="n2")
with self.assertNumQueries(1):
self.assertEqual(p.child1.name2, 'n2')
def test_inheritance_deferred2(self):
c = Child4.objects.create(name1='n1', name2='n2', value=1, value4=4)
qs = Parent2.objects.select_related('child1', 'child1__child4').only(
'id2', 'child1__value', 'child1__child4__value4')
with self.assertNumQueries(1):
p = qs.get(name2="n2")
self.assertEqual(p.id2, c.id2)
self.assertEqual(p.child1.value, 1)
self.assertEqual(p.child1.child4.value4, 4)
self.assertEqual(p.child1.child4.id2, c.id2)
p = qs.get(name2="n2")
with self.assertNumQueries(1):
self.assertEqual(p.child1.name2, 'n2')
p = qs.get(name2="n2")
with self.assertNumQueries(0):
self.assertEqual(p.child1.name1, 'n1')
self.assertEqual(p.child1.child4.name1, 'n1')
def test_self_relation(self):
item1 = LinkedList.objects.create(name='item1')
LinkedList.objects.create(name='item2', previous_item=item1)
with self.assertNumQueries(1):
item1_db = LinkedList.objects.select_related('next_item').get(name='item1')
self.assertEqual(item1_db.next_item.name, 'item2')
class ReverseSelectRelatedValidationTests(SimpleTestCase):
"""
Rverse related fields should be listed in the validation message when an
invalid field is given in select_related().
"""
non_relational_error = "Non-relational field given in select_related: '%s'. Choices are: %s"
invalid_error = "Invalid field name(s) given in select_related: '%s'. Choices are: %s"
def test_reverse_related_validation(self):
fields = 'userprofile, userstat'
with self.assertRaisesMessage(FieldError, self.invalid_error % ('foobar', fields)):
list(User.objects.select_related('foobar'))
with self.assertRaisesMessage(FieldError, self.non_relational_error % ('username', fields)):
list(User.objects.select_related('username'))
|
Nelayah/blogCMS
|
refs/heads/master
|
blog/__init__.py
|
12133432
| |
openstack/networking-edge-vpn
|
refs/heads/master
|
neutron/neutron/services/mplsvpn/__init__.py
|
12133432
| |
hotpxl/mxnet
|
refs/heads/master
|
tests/nightly/mxnet_keras_integration_tests/model_util.py
|
15
|
import os
from keras import backend as K
from keras.models import Model
from keras.layers import Input, merge
from keras.layers.core import Lambda
# Before running the integration tests, users are expected to set these
# environment variables.
IS_GPU = (os.environ['MXNET_KERAS_TEST_MACHINE'] == 'GPU')
GPU_NUM = int(os.environ['GPU_NUM']) if IS_GPU else 0
KERAS_BACKEND = os.environ['KERAS_BACKEND']
def slice_batch(x, n_gpus, part):
sh = K.shape(x)
L = sh[0] / n_gpus
if part == n_gpus - 1:
return x[part*L:]
return x[part*L:(part+1)*L]
def prepare_gpu_model(model, **kwargs):
gpu_list = []
for i in range(GPU_NUM):
gpu_list.append('gpu(%d)' % i)
if KERAS_BACKEND == 'mxnet':
kwargs['context'] = gpu_list
model.compile(**kwargs)
else:
model.compile(**kwargs)
def prepare_cpu_model(model, **kwargs):
model.compile(**kwargs)
def make_model(model, **kwargs):
"""
Compiles the Keras Model object for given backend type and machine type.
Use this function to write one Keras code and run it across different machine type.
If environment variable - MXNET_KERAS_TEST_MACHINE is set to CPU, then Compiles
Keras Model for running on CPU.
If environment variable - MXNET_KERAS_TEST_MACHINE is set to GPU, then Compiles
Keras Model running on GPU using number of GPUs equal to number specified in
GPU_NUM environment variable.
Currently supports only MXNet as Keras backend.
"""
if(IS_GPU):
prepare_gpu_model(model, **kwargs)
else:
prepare_cpu_model(model, **kwargs)
return model
|
gandarez/wakatime
|
refs/heads/master
|
wakatime/packages/pygments_py2/pygments/lexers/agile.py
|
77
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.agile
~~~~~~~~~~~~~~~~~~~~~
Just export lexer classes previously contained in this module.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexers.lisp import SchemeLexer
from pygments.lexers.jvm import IokeLexer, ClojureLexer
from pygments.lexers.python import PythonLexer, PythonConsoleLexer, \
PythonTracebackLexer, Python3Lexer, Python3TracebackLexer, DgLexer
from pygments.lexers.ruby import RubyLexer, RubyConsoleLexer, FancyLexer
from pygments.lexers.perl import PerlLexer, Perl6Lexer
from pygments.lexers.d import CrocLexer, MiniDLexer
from pygments.lexers.iolang import IoLexer
from pygments.lexers.tcl import TclLexer
from pygments.lexers.factor import FactorLexer
from pygments.lexers.scripting import LuaLexer, MoonScriptLexer
__all__ = []
|
pixelrebel/st2
|
refs/heads/master
|
st2reactor/st2reactor/rules/config.py
|
6
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
import st2common.config as common_config
from st2common.constants.system import VERSION_STRING
common_config.register_opts()
CONF = cfg.CONF
def parse_args(args=None):
CONF(args=args, version=VERSION_STRING)
def register_opts():
_register_common_opts()
_register_rules_engine_opts()
def get_logging_config_path():
return cfg.CONF.rulesengine.logging
def _register_common_opts():
common_config.register_opts()
def _register_rules_engine_opts():
logging_opts = [
cfg.StrOpt('logging', default='conf/logging.rulesengine.conf',
help='Location of the logging configuration file.')
]
CONF.register_opts(logging_opts, group='rulesengine')
timer_opts = [
cfg.StrOpt('local_timezone', default='America/Los_Angeles',
help='Timezone pertaining to the location where st2 is run.'),
cfg.BoolOpt('enable', default=True, help='Specify to enable Timer.')
]
CONF.register_opts(timer_opts, group='timer')
register_opts()
|
DGA-MI-SSI/YaCo
|
refs/heads/master
|
deps/swig-3.0.7/Examples/test-suite/python/template_typedef_cplx3_runme.py
|
6
|
import string
from template_typedef_cplx3 import *
#
# this is OK
#
s = Sin()
s.get_base_value()
s.get_value()
s.get_arith_value()
my_func_r(s)
make_Multiplies_double_double_double_double(s, s)
z = CSin()
z.get_base_value()
z.get_value()
z.get_arith_value()
my_func_c(z)
make_Multiplies_complex_complex_complex_complex(z, z)
#
# Here we fail
#
d = make_Identity_double()
my_func_r(d)
c = make_Identity_complex()
my_func_c(c)
|
lucidmotifs/auto-aoc
|
refs/heads/master
|
.venv/lib/python3.5/site-packages/pylint/test/unittest_checker_spelling.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2017 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Michal Nowikowski <godfryd@gmail.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016 Derek Gustafson <degustaf@gmail.com>
# Copyright (c) 2017 Pedro Algarvio <pedro@algarvio.me>
# Copyright (c) 2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Unittest for the spelling checker."""
import pytest
import astroid
from pylint.checkers import spelling
from pylint.testutils import CheckerTestCase, Message, set_config, _tokenize_str
# try to create enchant dictionary
try:
import enchant
except ImportError:
enchant = None
spell_dict = None
if enchant is not None:
try:
enchant.Dict("en_US")
spell_dict = "en_US"
except enchant.DictNotFoundError:
pass
class TestSpellingChecker(CheckerTestCase):
CHECKER_CLASS = spelling.SpellingChecker
skip_on_missing_package_or_dict = pytest.mark.skipif(
spell_dict is None,
reason="missing python-enchant package or missing spelling dictionaries")
def _get_msg_suggestions(self, word, count=4):
return "'{0}'".format("' or '".join(self.checker.spelling_dict.suggest(word)[:count]))
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_check_bad_coment(self):
with self.assertAddsMessages(
Message('wrong-spelling-in-comment', line=1,
args=('coment', '# bad coment',
' ^^^^^^',
self._get_msg_suggestions('coment')))):
self.checker.process_tokens(_tokenize_str("# bad coment"))
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
@set_config(max_spelling_suggestions=2)
def test_check_bad_coment_custom_suggestion_count(self):
with self.assertAddsMessages(
Message('wrong-spelling-in-comment', line=1,
args=('coment', '# bad coment',
' ^^^^^^',
self._get_msg_suggestions('coment', count=2)))):
self.checker.process_tokens(_tokenize_str("# bad coment"))
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_check_bad_docstring(self):
stmt = astroid.extract_node(
'def fff():\n """bad coment"""\n pass')
with self.assertAddsMessages(
Message('wrong-spelling-in-docstring', line=2,
args=('coment', 'bad coment',
' ^^^^^^',
self._get_msg_suggestions('coment')))):
self.checker.visit_functiondef(stmt)
stmt = astroid.extract_node(
'class Abc(object):\n """bad coment"""\n pass')
with self.assertAddsMessages(
Message('wrong-spelling-in-docstring', line=2,
args=('coment', 'bad coment',
' ^^^^^^',
self._get_msg_suggestions('coment')))):
self.checker.visit_classdef(stmt)
@pytest.mark.skipif(True, reason='pyenchant\'s tokenizer strips these')
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_invalid_docstring_characters(self):
stmt = astroid.extract_node(
'def fff():\n """test\\x00"""\n pass')
with self.assertAddsMessages(
Message('invalid-characters-in-docstring', line=2,
args=('test\x00',))):
self.checker.visit_functiondef(stmt)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_shebangs(self):
self.checker.process_tokens(_tokenize_str('#!/usr/bin/env python'))
assert self.linter.release_messages() == []
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_python_coding_comments(self):
self.checker.process_tokens(_tokenize_str(
'# -*- coding: utf-8 -*-'))
assert self.linter.release_messages() == []
self.checker.process_tokens(_tokenize_str(
'# coding=utf-8'))
assert self.linter.release_messages() == []
self.checker.process_tokens(_tokenize_str(
'# vim: set fileencoding=utf-8 :'))
assert self.linter.release_messages() == []
# Now with a shebang first
self.checker.process_tokens(_tokenize_str(
'#!/usr/bin/env python\n# -*- coding: utf-8 -*-'))
assert self.linter.release_messages() == []
self.checker.process_tokens(_tokenize_str(
'#!/usr/bin/env python\n# coding=utf-8'))
assert self.linter.release_messages() == []
self.checker.process_tokens(_tokenize_str(
'#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :'))
assert self.linter.release_messages() == []
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_top_level_pylint_enable_disable_comments(self):
self.checker.process_tokens(_tokenize_str('# Line 1\n Line 2\n# pylint: disable=ungrouped-imports'))
assert self.linter.release_messages() == []
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_words_with_numbers(self):
self.checker.process_tokens(_tokenize_str('\n# 0ne\n# Thr33\n# Sh3ll'))
assert self.linter.release_messages() == []
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_wiki_words(self):
stmt = astroid.extract_node(
'class ComentAbc(object):\n """ComentAbc with a bad coment"""\n pass')
with self.assertAddsMessages(
Message('wrong-spelling-in-docstring', line=2,
args=('coment', 'ComentAbc with a bad coment',
' ^^^^^^',
self._get_msg_suggestions('coment')))):
self.checker.visit_classdef(stmt)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_camel_cased_words(self):
stmt = astroid.extract_node(
'class ComentAbc(object):\n """comentAbc with a bad coment"""\n pass')
with self.assertAddsMessages(
Message('wrong-spelling-in-docstring', line=2,
args=('coment', 'comentAbc with a bad coment',
' ^^^^^^',
self._get_msg_suggestions('coment')))):
self.checker.visit_classdef(stmt)
# With just a single upper case letter in the end
stmt = astroid.extract_node(
'class ComentAbc(object):\n """argumentN with a bad coment"""\n pass')
with self.assertAddsMessages(
Message('wrong-spelling-in-docstring', line=2,
args=('coment', 'argumentN with a bad coment',
' ^^^^^^',
self._get_msg_suggestions('coment')))):
self.checker.visit_classdef(stmt)
for ccn in ('xmlHttpRequest', 'newCustomer', 'newCustomerId',
'innerStopwatch', 'supportsIpv6OnIos', 'affine3D'):
stmt = astroid.extract_node(
'class TestClass(object):\n """{0} comment"""\n pass'.format(ccn))
self.checker.visit_classdef(stmt)
assert self.linter.release_messages() == []
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_words_with_underscores(self):
stmt = astroid.extract_node(
'def fff(param_name):\n """test param_name"""\n pass')
self.checker.visit_functiondef(stmt)
assert self.linter.release_messages() == []
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_email_address(self):
self.checker.process_tokens(_tokenize_str('# uname@domain.tld'))
assert self.linter.release_messages() == []
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_urls(self):
self.checker.process_tokens(_tokenize_str('# https://github.com/rfk/pyenchant'))
assert self.linter.release_messages() == []
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_sphinx_directives(self):
stmt = astroid.extract_node(
'class ComentAbc(object):\n """This is :class:`ComentAbc` with a bad coment"""\n pass')
with self.assertAddsMessages(
Message('wrong-spelling-in-docstring', line=2,
args=('coment', 'This is :class:`ComentAbc` with a bad coment',
' ^^^^^^',
self._get_msg_suggestions('coment')))):
self.checker.visit_classdef(stmt)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_handle_words_joined_by_forward_slash(self):
stmt = astroid.extract_node('''
class ComentAbc(object):
"""This is Comment/Abcz with a bad comment"""
pass
''')
with self.assertAddsMessages(
Message('wrong-spelling-in-docstring', line=3,
args=('Abcz', 'This is Comment/Abcz with a bad comment',
' ^^^^',
self._get_msg_suggestions('Abcz')))):
self.checker.visit_classdef(stmt)
|
azureplus/hue
|
refs/heads/master
|
desktop/core/ext-py/django-nose-1.3/testapp/settings_old_style.py
|
43
|
DATABASES = {
'default': {
'NAME': 'django_master',
'ENGINE': 'django.db.backends.sqlite3',
}
}
INSTALLED_APPS = (
'django_nose',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
TEST_RUNNER = 'django_nose.run_tests'
SECRET_KEY = 'sssshhh'
|
zendesk/dd-agent
|
refs/heads/master
|
modules.py
|
50
|
""" Tools for loading Python modules from arbitrary locations.
"""
# stdlib
import imp
import os
import re
import sys
# project
from util import windows_friendly_colon_split
WINDOWS_PATH = re.compile('[A-Z]:.*', re.IGNORECASE)
def imp_type_for_filename(filename):
"""Given the name of a Python module, return a type description suitable to
be passed to imp.load_module()"""
for type_data in imp.get_suffixes():
extension = type_data[0]
if filename.endswith(extension):
return type_data
return None
def load_qualified_module(full_module_name, path=None):
"""Load a module which may be within a package"""
remaining_pieces = full_module_name.split('.')
done_pieces = []
file_obj = None
while remaining_pieces:
try:
done_pieces.append(remaining_pieces.pop(0))
curr_module_name = '.'.join(done_pieces)
(file_obj, filename, description) = imp.find_module(
done_pieces[-1], path)
package_module = imp.load_module(
curr_module_name, file_obj, filename, description)
path = getattr(package_module, '__path__', None) or [filename]
finally:
if file_obj:
file_obj.close()
return package_module
def module_name_for_filename(filename):
"""Given the name of a Python file, find an appropropriate module name.
This involves determining whether the file is within a package, and
determining the name of same."""
all_segments = filename.split(os.sep)
path_elements = all_segments[:-1]
module_elements = [all_segments[-1].rsplit('.', 1)[0]]
while True:
init_path = os.path.join(*(path_elements + ['__init__.py']))
if path_elements[0] is "":
# os.path.join will not put the leading '/'
# it will return a/b/c for os.path.join("","a","b","c")
init_path = '/' + init_path
if os.path.exists(init_path):
module_elements.insert(0, path_elements.pop())
else:
break
modulename = '.'.join(module_elements)
basename = '/'.join(path_elements)
return (basename, modulename)
def get_module(name):
"""Given either an absolute path to a Python file or a module name, load
and return a Python module.
If the module is already loaded, takes no action."""
if name.startswith('/') or WINDOWS_PATH.match(name):
basename, modulename = module_name_for_filename(name)
path = [basename]
else:
modulename = name
path = None
if modulename in sys.modules:
return sys.modules[modulename]
return load_qualified_module(modulename, path)
def load(config_string, default_name=None):
"""Given a module name and an object expected to be contained within,
return said object.
"""
split = windows_friendly_colon_split(config_string)
if len(split) > 1:
module_name, object_name = ":".join(split[:-1]), split[-1]
else:
module_name, object_name = config_string, default_name
module = get_module(module_name)
if object_name:
return getattr(module, object_name)
else:
return module
|
GENI-NSF/geni-soil
|
refs/heads/develop
|
src/vendor/geniv3rpc/ext/geni/am/gibaggregate/gib_manager.py
|
9
|
#----------------------------------------------------------------------
# Copyright (c) 2012 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
import logging
import os
import subprocess
import resources
import rspec_handler
import config
# GENI-in-a-box specific createSliver
def createSliver(slice_urn, requestRspec, users) :
"""
Create a sliver on this aggregate.
"""
config.logger.info("createSliver called")
# Parse the request rspec
rspec_handler.parseRequestRspec(slice_urn, requestRspec)
# Provision the sliver i.e. assign resource as specifed in the request rspec
# The sliver isn't created yet. The shell commands used to create
# the sliver are written into the file named in config.py
resources.provisionSliver(users)
# Generate the manifest rspec. The manifest is written to the file named
# in config.py
(rspec_handler.GeniManifest(users, requestRspec)).create()
# Add commands to the bash script that create special files/directories
# in the containers. They contain slice configuration information
# such as manifest rspec, slice name, etc.
resources.specialFiles()
## Execute the shell script that create a new sliver
pathToFile = config.sliceSpecificScriptsDir + '/' + config.shellScriptFile
command = 'echo \"%s\" | sudo -S %s' % (config.rootPwd, pathToFile)
print command
os.system(command)
def deleteSliver() :
"""
Delete the sliver created on this aggregate.
"""
config.logger.info("createSliver called")
# Invoke the deleteSliver script in the standardScipts directory
pathToFile = config.standardScriptsDir + '/' + config.deleteSliver
command = 'echo \"%s\" | sudo -S %s %s %s' % (config.rootPwd,
pathToFile,
config.homeDirectory,
config.sliceSpecificScriptsDir
)
print command
os.system(command)
# Delete the file containing the manifest rspec
pathToFile = config.sliceSpecificScriptsDir + '/' + config.manifestFile
os.remove(pathToFile)
# Free up internal data structures representing these resources
resources.freeResources()
def sliverStatus(slice_urn) :
"""
Return the status of the resources that belong to this sliver.
"""
config.logger.info("sliverStatus called")
# Get a list of statuses for each of the VM resources
resourceStatusList = resources.getResourceStatus()
# Determine the overall status of the slice at this aggregate
# If any resource is 'shutdown', the sliver is 'shutdown'
# else if any resource is 'failed', the sliver is 'failed'
# else if any resource is 'configuring', the sliver is 'configuring'
# else if all resources are 'ready', the sliver is 'ready'
# else the sliver is 'unknown'
# Make a list that contains the status of all resources
statusList = list()
for i in range(len(resourceStatusList)) :
statusList.append(resourceStatusList[i]['geni_status'])
sliceStatus = 'unknown'
if 'shutdown' in statusList :
sliceStatus = 'shutdown'
elif 'failed' in statusList :
sliceStatus = 'failed'
elif 'configuring' in statusList :
sliceStatus = 'configuring'
elif 'ready' in statusList :
# Count number of resources that are ready. If all resources are
# ready, the slice is ready.
readyCount = 0;
for i in range(len(resourceStatusList)) :
if resourceStatusList[i]['geni_status'] == 'ready' :
readyCount += 1
print '%s resources are ready\n' % readyCount
if readyCount == len(resourceStatusList) :
sliceStatus = 'ready'
return dict(geni_urn = resources.sliceURN, \
geni_status = sliceStatus, \
geni_resources = resourceStatusList)
def get_manifest() :
"""
Return the manifest rspec for the current slice. The manifest
is in a file created by rspec_handler.GeniManifest.
"""
pathToFile = config.sliceSpecificScriptsDir + '/' + config.manifestFile
config.logger.info('Reading manifest from %s' % pathToFile)
try:
f = open(pathToFile, 'r')
except IOError:
config.logger.error("Failed to open manifest rspec file %s" %
pathToFile)
return None
manifest = f.read()
f.close()
return manifest
def get_advert() :
"""
Return the advertisement rspect for this aggregate. Get this manifest
from a pre-created, static file.
"""
pathToFile = config.standardScriptsDir + '/' + config.advertRspecFile
config.logger.info('Reading advert rspec from %s' % pathToFile)
try:
f = open(pathToFile, 'r')
except IOError:
config.logger.error("Failed to open advertisement rspec file %s" %
pathToFile)
return None
advert = f.read()
f.close()
return advert
|
ubc/edx-ora2
|
refs/heads/master
|
urls.py
|
5
|
from django.conf import settings
from django.conf.urls import include, patterns, url
from django.views.i18n import javascript_catalog
from django.contrib import admin
import openassessment.assessment.urls
import openassessment.fileupload.urls
import workbench.urls
admin.autodiscover()
# Packages to include in the JavaScript i18n strings
JS_INFO_DICT = {
'packages': ('openassessment.xblock',),
}
urlpatterns = patterns(
'',
# Django built-in
url(r'^admin/', include(admin.site.urls)),
# Provided by XBlock
url(r'^/?', include(workbench.urls)),
# edx-ora2 apps
url(r'^peer/evaluations/', include(openassessment.assessment.urls)),
# JavaScript i18n
(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', JS_INFO_DICT),
# File upload to local filesystem
url(r'^openassessment/storage', include(openassessment.fileupload.urls)),
)
# We need to do explicit setup of the Django debug toolbar because autodiscovery
# causes problems when you mix debug toolbar >= 1.0 + django < 1.7, and the
# admin uses autodiscovery. See:
# http://django-debug-toolbar.readthedocs.org/en/1.0/installation.html#explicit-setup
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
|
tmxdyf/CouchPotatoServer
|
refs/heads/master
|
couchpotato/core/plugins/release/main.py
|
5
|
from couchpotato import get_session, md5
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, addEvent
from couchpotato.core.helpers.encoding import ss, toUnicode
from couchpotato.core.helpers.variable import getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.plugins.base import Plugin
from couchpotato.core.plugins.scanner.main import Scanner
from couchpotato.core.settings.model import File, Release as Relea, Media, \
ReleaseInfo
from couchpotato.environment import Env
from inspect import ismethod, isfunction
from sqlalchemy.exc import InterfaceError
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql.expression import and_, or_
import os
import time
import traceback
log = CPLog(__name__)
class Release(Plugin):
def __init__(self):
addEvent('release.add', self.add)
addApiView('release.manual_download', self.manualDownload, docs = {
'desc': 'Send a release manually to the downloaders',
'params': {
'id': {'type': 'id', 'desc': 'ID of the release object in release-table'}
}
})
addApiView('release.delete', self.deleteView, docs = {
'desc': 'Delete releases',
'params': {
'id': {'type': 'id', 'desc': 'ID of the release object in release-table'}
}
})
addApiView('release.ignore', self.ignore, docs = {
'desc': 'Toggle ignore, for bad or wrong releases',
'params': {
'id': {'type': 'id', 'desc': 'ID of the release object in release-table'}
}
})
addApiView('release.for_movie', self.forMovieView, docs = {
'desc': 'Returns all releases for a movie. Ordered by score(desc)',
'params': {
'id': {'type': 'id', 'desc': 'ID of the movie'}
}
})
addEvent('release.download', self.download)
addEvent('release.try_download_result', self.tryDownloadResult)
addEvent('release.create_from_search', self.createFromSearch)
addEvent('release.for_movie', self.forMovie)
addEvent('release.delete', self.delete)
addEvent('release.clean', self.clean)
addEvent('release.update_status', self.updateStatus)
# Clean releases that didn't have activity in the last week
addEvent('app.load', self.cleanDone)
fireEvent('schedule.interval', 'movie.clean_releases', self.cleanDone, hours = 4)
def cleanDone(self):
log.debug('Removing releases from dashboard')
now = time.time()
week = 262080
done_status, available_status, snatched_status, downloaded_status, ignored_status = \
fireEvent('status.get', ['done', 'available', 'snatched', 'downloaded', 'ignored'], single = True)
db = get_session()
# get movies last_edit more than a week ago
media = db.query(Media) \
.filter(Media.status_id == done_status.get('id'), Media.last_edit < (now - week)) \
.all()
for item in media:
for rel in item.releases:
# Remove all available releases
if rel.status_id in [available_status.get('id')]:
fireEvent('release.delete', id = rel.id, single = True)
# Set all snatched and downloaded releases to ignored to make sure they are ignored when re-adding the move
elif rel.status_id in [snatched_status.get('id'), downloaded_status.get('id')]:
self.updateStatus(id = rel.id, status = ignored_status)
def add(self, group):
try:
db = get_session()
identifier = '%s.%s.%s' % (group['library']['identifier'], group['meta_data'].get('audio', 'unknown'), group['meta_data']['quality']['identifier'])
done_status, snatched_status = fireEvent('status.get', ['done', 'snatched'], single = True)
# Add movie
media = db.query(Media).filter_by(library_id = group['library'].get('id')).first()
if not media:
media = Media(
library_id = group['library'].get('id'),
profile_id = 0,
status_id = done_status.get('id')
)
db.add(media)
db.commit()
# Add Release
rel = db.query(Relea).filter(
or_(
Relea.identifier == identifier,
and_(Relea.identifier.startswith(group['library']['identifier']), Relea.status_id == snatched_status.get('id'))
)
).first()
if not rel:
rel = Relea(
identifier = identifier,
movie = media,
quality_id = group['meta_data']['quality'].get('id'),
status_id = done_status.get('id')
)
db.add(rel)
db.commit()
# Add each file type
added_files = []
for type in group['files']:
for cur_file in group['files'][type]:
added_file = self.saveFile(cur_file, type = type, include_media_info = type is 'movie')
added_files.append(added_file.get('id'))
# Add the release files in batch
try:
added_files = db.query(File).filter(or_(*[File.id == x for x in added_files])).all()
rel.files.extend(added_files)
db.commit()
except:
log.debug('Failed to attach "%s" to release: %s', (added_files, traceback.format_exc()))
fireEvent('media.restatus', media.id)
return True
except:
log.error('Failed: %s', traceback.format_exc())
db.rollback()
finally:
db.close()
return False
def saveFile(self, filepath, type = 'unknown', include_media_info = False):
properties = {}
# Get media info for files
if include_media_info:
properties = {}
# Check database and update/insert if necessary
return fireEvent('file.add', path = filepath, part = fireEvent('scanner.partnumber', file, single = True), type_tuple = Scanner.file_types.get(type), properties = properties, single = True)
def deleteView(self, id = None, **kwargs):
return {
'success': self.delete(id)
}
def delete(self, id):
try:
db = get_session()
rel = db.query(Relea).filter_by(id = id).first()
if rel:
rel.delete()
db.commit()
return True
except:
log.error('Failed: %s', traceback.format_exc())
db.rollback()
finally:
db.close()
return False
def clean(self, id):
try:
db = get_session()
rel = db.query(Relea).filter_by(id = id).first()
if rel:
for release_file in rel.files:
if not os.path.isfile(ss(release_file.path)):
db.delete(release_file)
db.commit()
if len(rel.files) == 0:
self.delete(id)
return True
except:
log.error('Failed: %s', traceback.format_exc())
db.rollback()
finally:
db.close()
return False
def ignore(self, id = None, **kwargs):
db = get_session()
rel = db.query(Relea).filter_by(id = id).first()
if rel:
ignored_status, failed_status, available_status = fireEvent('status.get', ['ignored', 'failed', 'available'], single = True)
self.updateStatus(id, available_status if rel.status_id in [ignored_status.get('id'), failed_status.get('id')] else ignored_status)
return {
'success': True
}
def manualDownload(self, id = None, **kwargs):
db = get_session()
rel = db.query(Relea).filter_by(id = id).first()
if not rel:
log.error('Couldn\'t find release with id: %s', id)
return {
'success': False
}
item = {}
for info in rel.info:
item[info.identifier] = info.value
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Snatching "%s"' % item['name'])
# Get matching provider
provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True)
# Backwards compatibility code
if not item.get('protocol'):
item['protocol'] = item['type']
item['type'] = 'movie'
if item.get('protocol') != 'torrent_magnet':
item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download
success = self.download(data = item, media = rel.movie.to_dict({
'profile': {'types': {'quality': {}}},
'releases': {'status': {}, 'quality': {}},
'library': {'titles': {}, 'files': {}},
'files': {}
}), manual = True)
db.expunge_all()
if success:
fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name'])
return {
'success': success == True
}
def download(self, data, media, manual = False):
# Backwards compatibility code
if not data.get('protocol'):
data['protocol'] = data['type']
data['type'] = 'movie'
# Test to see if any downloaders are enabled for this type
downloader_enabled = fireEvent('download.enabled', manual, data, single = True)
if not downloader_enabled:
log.info('Tried to download, but none of the "%s" downloaders are enabled or gave an error', data.get('protocol'))
return False
# Download NZB or torrent file
filedata = None
if data.get('download') and (ismethod(data.get('download')) or isfunction(data.get('download'))):
try:
filedata = data.get('download')(url = data.get('url'), nzb_id = data.get('id'))
except:
log.error('Tried to download, but the "%s" provider gave an error: %s', (data.get('protocol'), traceback.format_exc()))
return False
if filedata == 'try_next':
return filedata
elif not filedata:
return False
# Send NZB or torrent file to downloader
download_result = fireEvent('download', data = data, media = media, manual = manual, filedata = filedata, single = True)
if not download_result:
log.info('Tried to download, but the "%s" downloader gave an error', data.get('protocol'))
return False
log.debug('Downloader result: %s', download_result)
snatched_status, done_status, downloaded_status, active_status = fireEvent('status.get', ['snatched', 'done', 'downloaded', 'active'], single = True)
try:
db = get_session()
rls = db.query(Relea).filter_by(identifier = md5(data['url'])).first()
if not rls:
log.error('No release found to store download information in')
return False
renamer_enabled = Env.setting('enabled', 'renamer')
# Save download-id info if returned
if isinstance(download_result, dict):
for key in download_result:
rls_info = ReleaseInfo(
identifier = 'download_%s' % key,
value = toUnicode(download_result.get(key))
)
rls.info.append(rls_info)
db.commit()
log_movie = '%s (%s) in %s' % (getTitle(media['library']), media['library']['year'], rls.quality.label)
snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie)
log.info(snatch_message)
fireEvent('%s.snatched' % data['type'], message = snatch_message, data = rls.to_dict())
# Mark release as snatched
if renamer_enabled:
self.updateStatus(rls.id, status = snatched_status)
# If renamer isn't used, mark media done if finished or release downloaded
else:
if media['status_id'] == active_status.get('id'):
finished = next((True for profile_type in media['profile']['types']
if profile_type['quality_id'] == rls.quality.id and profile_type['finish']), False)
if finished:
log.info('Renamer disabled, marking media as finished: %s', log_movie)
# Mark release done
self.updateStatus(rls.id, status = done_status)
# Mark media done
mdia = db.query(Media).filter_by(id = media['id']).first()
mdia.status_id = done_status.get('id')
mdia.last_edit = int(time.time())
db.commit()
return True
# Assume release downloaded
self.updateStatus(rls.id, status = downloaded_status)
except:
log.error('Failed storing download status: %s', traceback.format_exc())
db.rollback()
return False
finally:
db.close()
return True
def tryDownloadResult(self, results, media, quality_type, manual = False):
ignored_status, failed_status = fireEvent('status.get', ['ignored', 'failed'], single = True)
for rel in results:
if not quality_type.get('finish', False) and quality_type.get('wait_for', 0) > 0 and rel.get('age') <= quality_type.get('wait_for', 0):
log.info('Ignored, waiting %s days: %s', (quality_type.get('wait_for'), rel['name']))
continue
if rel['status_id'] in [ignored_status.get('id'), failed_status.get('id')]:
log.info('Ignored: %s', rel['name'])
continue
if rel['score'] <= 0:
log.info('Ignored, score to low: %s', rel['name'])
continue
downloaded = fireEvent('release.download', data = rel, media = media, manual = manual, single = True)
if downloaded is True:
return True
elif downloaded != 'try_next':
break
return False
def createFromSearch(self, search_results, media, quality_type):
available_status = fireEvent('status.get', ['available'], single = True)
try:
db = get_session()
found_releases = []
for rel in search_results:
rel_identifier = md5(rel['url'])
found_releases.append(rel_identifier)
rls = db.query(Relea).filter_by(identifier = rel_identifier).first()
if not rls:
rls = Relea(
identifier = rel_identifier,
movie_id = media.get('id'),
#media_id = media.get('id'),
quality_id = quality_type.get('quality_id'),
status_id = available_status.get('id')
)
db.add(rls)
else:
[db.delete(old_info) for old_info in rls.info]
rls.last_edit = int(time.time())
db.commit()
for info in rel:
try:
if not isinstance(rel[info], (str, unicode, int, long, float)):
continue
rls_info = ReleaseInfo(
identifier = info,
value = toUnicode(rel[info])
)
rls.info.append(rls_info)
except InterfaceError:
log.debug('Couldn\'t add %s to ReleaseInfo: %s', (info, traceback.format_exc()))
db.commit()
rel['status_id'] = rls.status_id
return found_releases
except:
log.error('Failed: %s', traceback.format_exc())
db.rollback()
finally:
db.close()
return []
def forMovie(self, id = None):
db = get_session()
releases_raw = db.query(Relea) \
.options(joinedload_all('info')) \
.options(joinedload_all('files')) \
.filter(Relea.movie_id == id) \
.all()
releases = [r.to_dict({'info': {}, 'files': {}}) for r in releases_raw]
releases = sorted(releases, key = lambda k: k['info'].get('score', 0), reverse = True)
return releases
def forMovieView(self, id = None, **kwargs):
releases = self.forMovie(id)
return {
'releases': releases,
'success': True
}
def updateStatus(self, id, status = None):
if not status: return False
try:
db = get_session()
rel = db.query(Relea).filter_by(id = id).first()
if rel and status and rel.status_id != status.get('id'):
item = {}
for info in rel.info:
item[info.identifier] = info.value
release_name = None
if rel.files:
for file_item in rel.files:
if file_item.type.identifier == 'movie':
release_name = os.path.basename(file_item.path)
break
else:
release_name = item['name']
#update status in Db
log.debug('Marking release %s as %s', (release_name, status.get("label")))
rel.status_id = status.get('id')
rel.last_edit = int(time.time())
db.commit()
#Update all movie info as there is no release update function
fireEvent('notify.frontend', type = 'release.update_status', data = rel.to_dict())
return True
except:
log.error('Failed: %s', traceback.format_exc())
db.rollback()
finally:
db.close()
return False
|
mewtaylor/django
|
refs/heads/master
|
tests/m2m_intermediary/__init__.py
|
12133432
| |
andy8788/hadoop-hdfs
|
refs/heads/master
|
build/contrib/hod/hodlib/GridServices/mapred.py
|
182
|
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
"""define MapReduce as subclass of Service"""
# -*- python -*-
import os, copy, time
from service import *
from hodlib.Hod.nodePool import *
from hodlib.Common.desc import CommandDesc
from hodlib.Common.util import get_exception_string, parseEquals
class MapReduceExternal(MasterSlave):
"""dummy proxy to external MapReduce instance"""
def __init__(self, serviceDesc, workDirs, version):
MasterSlave.__init__(self, serviceDesc, workDirs,None)
self.launchedMaster = True
self.masterInitialized = True
self.version = version
def getMasterRequest(self):
return None
def getMasterCommands(self, serviceDict):
return []
def getAdminCommands(self, serviceDict):
return []
def getWorkerCommands(self, serviceDict):
return []
def getMasterAddrs(self):
attrs = self.serviceDesc.getfinalAttrs()
addr = attrs['mapred.job.tracker']
return [addr]
def needsMore(self):
return 0
def needsLess(self):
return 0
def setMasterParams(self, dict):
self.serviceDesc['final-attrs']['mapred.job.tracker'] = "%s:%s" % (dict['host'],
dict['tracker_port'])
if self.version < 16:
self.serviceDesc.dict['final-attrs']['mapred.job.tracker.info.port'] = \
str(self.serviceDesc.dict['info_port'])
else:
# After Hadoop-2185
self.serviceDesc['final-attrs']['mapred.job.tracker.http.address'] = \
"%s:%s" %(dict['host'], dict['info_port'])
def getInfoAddrs(self):
attrs = self.serviceDesc.getfinalAttrs()
if self.version < 16:
addr = attrs['mapred.job.tracker']
k,v = addr.split( ":")
infoaddr = k + ':' + attrs['mapred.job.tracker.info.port']
else:
# After Hadoop-2185
# Note: earlier,we never respected mapred.job.tracker.http.address
infoaddr = attrs['mapred.job.tracker.http.address']
return [infoaddr]
class MapReduce(MasterSlave):
def __init__(self, serviceDesc, workDirs,required_node, version,
workers_per_ring = 1):
MasterSlave.__init__(self, serviceDesc, workDirs,required_node)
self.masterNode = None
self.masterAddr = None
self.infoAddr = None
self.workers = []
self.required_node = required_node
self.version = version
self.workers_per_ring = workers_per_ring
def isLaunchable(self, serviceDict):
hdfs = serviceDict['hdfs']
if (hdfs.isMasterInitialized()):
return True
return False
def getMasterRequest(self):
req = NodeRequest(1, [], False)
return req
def getMasterCommands(self, serviceDict):
hdfs = serviceDict['hdfs']
cmdDesc = self._getJobTrackerCommand(hdfs)
return [cmdDesc]
def getAdminCommands(self, serviceDict):
return []
def getWorkerCommands(self, serviceDict):
hdfs = serviceDict['hdfs']
workerCmds = []
for id in range(1, self.workers_per_ring + 1):
workerCmds.append(self._getTaskTrackerCommand(str(id), hdfs))
return workerCmds
def setMasterNodes(self, list):
node = list[0]
self.masterNode = node
def getMasterAddrs(self):
return [self.masterAddr]
def getInfoAddrs(self):
return [self.infoAddr]
def getWorkers(self):
return self.workers
def requiredNode(self):
return self.required_host
def setMasterParams(self, list):
dict = self._parseEquals(list)
self.masterAddr = dict['mapred.job.tracker']
k,v = self.masterAddr.split(":")
self.masterNode = k
if self.version < 16:
self.infoAddr = self.masterNode + ':' + dict['mapred.job.tracker.info.port']
else:
# After Hadoop-2185
self.infoAddr = dict['mapred.job.tracker.http.address']
def _parseEquals(self, list):
return parseEquals(list)
def _setWorkDirs(self, workDirs, envs, attrs, parentDirs, subDir):
local = []
system = None
temp = None
hadooptmpdir = None
dfsclient = []
for p in parentDirs:
workDirs.append(p)
workDirs.append(os.path.join(p, subDir))
dir = os.path.join(p, subDir, 'mapred-local')
local.append(dir)
if not system:
system = os.path.join(p, subDir, 'mapred-system')
if not temp:
temp = os.path.join(p, subDir, 'mapred-temp')
if not hadooptmpdir:
# Not used currently, generating hadooptmpdir just in case
hadooptmpdir = os.path.join(p, subDir, 'hadoop-tmp')
dfsclientdir = os.path.join(p, subDir, 'dfs-client')
dfsclient.append(dfsclientdir)
workDirs.append(dfsclientdir)
# FIXME!! use csv
attrs['mapred.local.dir'] = ','.join(local)
attrs['mapred.system.dir'] = 'fillindir'
attrs['mapred.temp.dir'] = temp
attrs['hadoop.tmp.dir'] = hadooptmpdir
envs['HADOOP_ROOT_LOGGER'] = "INFO,DRFA"
def _getJobTrackerCommand(self, hdfs):
sd = self.serviceDesc
parentDirs = self.workDirs
workDirs = []
attrs = sd.getfinalAttrs().copy()
envs = sd.getEnvs().copy()
if 'mapred.job.tracker' not in attrs:
attrs['mapred.job.tracker'] = 'fillinhostport'
if self.version < 16:
if 'mapred.job.tracker.info.port' not in attrs:
attrs['mapred.job.tracker.info.port'] = 'fillinport'
else:
# Addressing Hadoop-2185,
if 'mapred.job.tracker.http.address' not in attrs:
attrs['mapred.job.tracker.http.address'] = 'fillinhostport'
attrs['fs.default.name'] = hdfs.getMasterAddrs()[0]
self._setWorkDirs(workDirs, envs, attrs, parentDirs, 'mapred-jt')
dict = { 'name' : 'jobtracker' }
dict['version'] = self.version
dict['program'] = os.path.join('bin', 'hadoop')
dict['argv'] = ['jobtracker']
dict['envs'] = envs
dict['pkgdirs'] = sd.getPkgDirs()
dict['workdirs'] = workDirs
dict['final-attrs'] = attrs
dict['attrs'] = sd.getAttrs()
cmd = CommandDesc(dict)
return cmd
def _getTaskTrackerCommand(self, id, hdfs):
sd = self.serviceDesc
parentDirs = self.workDirs
workDirs = []
attrs = sd.getfinalAttrs().copy()
envs = sd.getEnvs().copy()
jt = self.masterAddr
if jt == None:
raise ValueError, "Can't get job tracker address"
attrs['mapred.job.tracker'] = jt
attrs['fs.default.name'] = hdfs.getMasterAddrs()[0]
if self.version < 16:
if 'tasktracker.http.port' not in attrs:
attrs['tasktracker.http.port'] = 'fillinport'
# earlier to 16, tasktrackers always took ephemeral port 0 for
# tasktracker.report.bindAddress
else:
# Adding the following. Hadoop-2185
if 'mapred.task.tracker.report.address' not in attrs:
attrs['mapred.task.tracker.report.address'] = 'fillinhostport'
if 'mapred.task.tracker.http.address' not in attrs:
attrs['mapred.task.tracker.http.address'] = 'fillinhostport'
# unique parentDirs in case of multiple tasktrackers per hodring
pd = []
for dir in parentDirs:
dir = dir + "-" + id
pd.append(dir)
parentDirs = pd
# end of unique workdirs
self._setWorkDirs(workDirs, envs, attrs, parentDirs, 'mapred-tt')
dict = { 'name' : 'tasktracker' }
dict['program'] = os.path.join('bin', 'hadoop')
dict['argv'] = ['tasktracker']
dict['envs'] = envs
dict['pkgdirs'] = sd.getPkgDirs()
dict['workdirs'] = workDirs
dict['final-attrs'] = attrs
dict['attrs'] = sd.getAttrs()
cmd = CommandDesc(dict)
return cmd
|
TathagataChakraborti/resource-conflicts
|
refs/heads/master
|
PLANROB-2015/seq-sat-lama/Python-2.5.2/Demo/classes/Complex.py
|
43
|
# Complex numbers
# ---------------
# [Now that Python has a complex data type built-in, this is not very
# useful, but it's still a nice example class]
# This module represents complex numbers as instances of the class Complex.
# A Complex instance z has two data attribues, z.re (the real part) and z.im
# (the imaginary part). In fact, z.re and z.im can have any value -- all
# arithmetic operators work regardless of the type of z.re and z.im (as long
# as they support numerical operations).
#
# The following functions exist (Complex is actually a class):
# Complex([re [,im]) -> creates a complex number from a real and an imaginary part
# IsComplex(z) -> true iff z is a complex number (== has .re and .im attributes)
# ToComplex(z) -> a complex number equal to z; z itself if IsComplex(z) is true
# if z is a tuple(re, im) it will also be converted
# PolarToComplex([r [,phi [,fullcircle]]]) ->
# the complex number z for which r == z.radius() and phi == z.angle(fullcircle)
# (r and phi default to 0)
# exp(z) -> returns the complex exponential of z. Equivalent to pow(math.e,z).
#
# Complex numbers have the following methods:
# z.abs() -> absolute value of z
# z.radius() == z.abs()
# z.angle([fullcircle]) -> angle from positive X axis; fullcircle gives units
# z.phi([fullcircle]) == z.angle(fullcircle)
#
# These standard functions and unary operators accept complex arguments:
# abs(z)
# -z
# +z
# not z
# repr(z) == `z`
# str(z)
# hash(z) -> a combination of hash(z.re) and hash(z.im) such that if z.im is zero
# the result equals hash(z.re)
# Note that hex(z) and oct(z) are not defined.
#
# These conversions accept complex arguments only if their imaginary part is zero:
# int(z)
# long(z)
# float(z)
#
# The following operators accept two complex numbers, or one complex number
# and one real number (int, long or float):
# z1 + z2
# z1 - z2
# z1 * z2
# z1 / z2
# pow(z1, z2)
# cmp(z1, z2)
# Note that z1 % z2 and divmod(z1, z2) are not defined,
# nor are shift and mask operations.
#
# The standard module math does not support complex numbers.
# The cmath modules should be used instead.
#
# Idea:
# add a class Polar(r, phi) and mixed-mode arithmetic which
# chooses the most appropriate type for the result:
# Complex for +,-,cmp
# Polar for *,/,pow
import math
import sys
twopi = math.pi*2.0
halfpi = math.pi/2.0
def IsComplex(obj):
return hasattr(obj, 're') and hasattr(obj, 'im')
def ToComplex(obj):
if IsComplex(obj):
return obj
elif isinstance(obj, tuple):
return Complex(*obj)
else:
return Complex(obj)
def PolarToComplex(r = 0, phi = 0, fullcircle = twopi):
phi = phi * (twopi / fullcircle)
return Complex(math.cos(phi)*r, math.sin(phi)*r)
def Re(obj):
if IsComplex(obj):
return obj.re
return obj
def Im(obj):
if IsComplex(obj):
return obj.im
return 0
class Complex:
def __init__(self, re=0, im=0):
_re = 0
_im = 0
if IsComplex(re):
_re = re.re
_im = re.im
else:
_re = re
if IsComplex(im):
_re = _re - im.im
_im = _im + im.re
else:
_im = _im + im
# this class is immutable, so setting self.re directly is
# not possible.
self.__dict__['re'] = _re
self.__dict__['im'] = _im
def __setattr__(self, name, value):
raise TypeError, 'Complex numbers are immutable'
def __hash__(self):
if not self.im:
return hash(self.re)
return hash((self.re, self.im))
def __repr__(self):
if not self.im:
return 'Complex(%r)' % (self.re,)
else:
return 'Complex(%r, %r)' % (self.re, self.im)
def __str__(self):
if not self.im:
return repr(self.re)
else:
return 'Complex(%r, %r)' % (self.re, self.im)
def __neg__(self):
return Complex(-self.re, -self.im)
def __pos__(self):
return self
def __abs__(self):
return math.hypot(self.re, self.im)
def __int__(self):
if self.im:
raise ValueError, "can't convert Complex with nonzero im to int"
return int(self.re)
def __long__(self):
if self.im:
raise ValueError, "can't convert Complex with nonzero im to long"
return long(self.re)
def __float__(self):
if self.im:
raise ValueError, "can't convert Complex with nonzero im to float"
return float(self.re)
def __cmp__(self, other):
other = ToComplex(other)
return cmp((self.re, self.im), (other.re, other.im))
def __rcmp__(self, other):
other = ToComplex(other)
return cmp(other, self)
def __nonzero__(self):
return not (self.re == self.im == 0)
abs = radius = __abs__
def angle(self, fullcircle = twopi):
return (fullcircle/twopi) * ((halfpi - math.atan2(self.re, self.im)) % twopi)
phi = angle
def __add__(self, other):
other = ToComplex(other)
return Complex(self.re + other.re, self.im + other.im)
__radd__ = __add__
def __sub__(self, other):
other = ToComplex(other)
return Complex(self.re - other.re, self.im - other.im)
def __rsub__(self, other):
other = ToComplex(other)
return other - self
def __mul__(self, other):
other = ToComplex(other)
return Complex(self.re*other.re - self.im*other.im,
self.re*other.im + self.im*other.re)
__rmul__ = __mul__
def __div__(self, other):
other = ToComplex(other)
d = float(other.re*other.re + other.im*other.im)
if not d: raise ZeroDivisionError, 'Complex division'
return Complex((self.re*other.re + self.im*other.im) / d,
(self.im*other.re - self.re*other.im) / d)
def __rdiv__(self, other):
other = ToComplex(other)
return other / self
def __pow__(self, n, z=None):
if z is not None:
raise TypeError, 'Complex does not support ternary pow()'
if IsComplex(n):
if n.im:
if self.im: raise TypeError, 'Complex to the Complex power'
else: return exp(math.log(self.re)*n)
n = n.re
r = pow(self.abs(), n)
phi = n*self.angle()
return Complex(math.cos(phi)*r, math.sin(phi)*r)
def __rpow__(self, base):
base = ToComplex(base)
return pow(base, self)
def exp(z):
r = math.exp(z.re)
return Complex(math.cos(z.im)*r,math.sin(z.im)*r)
def checkop(expr, a, b, value, fuzz = 1e-6):
print ' ', a, 'and', b,
try:
result = eval(expr)
except:
result = sys.exc_type
print '->', result
if isinstance(result, str) or isinstance(value, str):
ok = (result == value)
else:
ok = abs(result - value) <= fuzz
if not ok:
print '!!\t!!\t!! should be', value, 'diff', abs(result - value)
def test():
print 'test constructors'
constructor_test = (
# "expect" is an array [re,im] "got" the Complex.
( (0,0), Complex() ),
( (0,0), Complex() ),
( (1,0), Complex(1) ),
( (0,1), Complex(0,1) ),
( (1,2), Complex(Complex(1,2)) ),
( (1,3), Complex(Complex(1,2),1) ),
( (0,0), Complex(0,Complex(0,0)) ),
( (3,4), Complex(3,Complex(4)) ),
( (-1,3), Complex(1,Complex(3,2)) ),
( (-7,6), Complex(Complex(1,2),Complex(4,8)) ) )
cnt = [0,0]
for t in constructor_test:
cnt[0] += 1
if ((t[0][0]!=t[1].re)or(t[0][1]!=t[1].im)):
print " expected", t[0], "got", t[1]
cnt[1] += 1
print " ", cnt[1], "of", cnt[0], "tests failed"
# test operators
testsuite = {
'a+b': [
(1, 10, 11),
(1, Complex(0,10), Complex(1,10)),
(Complex(0,10), 1, Complex(1,10)),
(Complex(0,10), Complex(1), Complex(1,10)),
(Complex(1), Complex(0,10), Complex(1,10)),
],
'a-b': [
(1, 10, -9),
(1, Complex(0,10), Complex(1,-10)),
(Complex(0,10), 1, Complex(-1,10)),
(Complex(0,10), Complex(1), Complex(-1,10)),
(Complex(1), Complex(0,10), Complex(1,-10)),
],
'a*b': [
(1, 10, 10),
(1, Complex(0,10), Complex(0, 10)),
(Complex(0,10), 1, Complex(0,10)),
(Complex(0,10), Complex(1), Complex(0,10)),
(Complex(1), Complex(0,10), Complex(0,10)),
],
'a/b': [
(1., 10, 0.1),
(1, Complex(0,10), Complex(0, -0.1)),
(Complex(0, 10), 1, Complex(0, 10)),
(Complex(0, 10), Complex(1), Complex(0, 10)),
(Complex(1), Complex(0,10), Complex(0, -0.1)),
],
'pow(a,b)': [
(1, 10, 1),
(1, Complex(0,10), 1),
(Complex(0,10), 1, Complex(0,10)),
(Complex(0,10), Complex(1), Complex(0,10)),
(Complex(1), Complex(0,10), 1),
(2, Complex(4,0), 16),
],
'cmp(a,b)': [
(1, 10, -1),
(1, Complex(0,10), 1),
(Complex(0,10), 1, -1),
(Complex(0,10), Complex(1), -1),
(Complex(1), Complex(0,10), 1),
],
}
for expr in sorted(testsuite):
print expr + ':'
t = (expr,)
for item in testsuite[expr]:
checkop(*(t+item))
if __name__ == '__main__':
test()
|
nikolas/edx-platform
|
refs/heads/master
|
lms/djangoapps/licenses/models.py
|
150
|
import logging
from django.db import models, transaction
from student.models import User
from xmodule_django.models import CourseKeyField
log = logging.getLogger("edx.licenses")
class CourseSoftware(models.Model):
name = models.CharField(max_length=255)
full_name = models.CharField(max_length=255)
url = models.CharField(max_length=255)
course_id = CourseKeyField(max_length=255)
def __unicode__(self):
return u'{0} for {1}'.format(self.name, self.course_id)
class UserLicense(models.Model):
software = models.ForeignKey(CourseSoftware, db_index=True)
user = models.ForeignKey(User, null=True)
serial = models.CharField(max_length=255)
def get_courses_licenses(user, courses):
course_ids = set(course.id for course in courses)
all_software = CourseSoftware.objects.filter(course_id__in=course_ids)
assigned_licenses = UserLicense.objects.filter(software__in=all_software,
user=user)
licenses = dict.fromkeys(all_software, None)
for license in assigned_licenses:
licenses[license.software] = license
log.info(assigned_licenses)
log.info(licenses)
return licenses
def get_license(user, software):
try:
# TODO: temporary fix for when somehow a user got more that one license.
# The proper fix should use Meta.unique_together in the UserLicense model.
licenses = UserLicense.objects.filter(user=user, software=software)
license = licenses[0] if licenses else None
except UserLicense.DoesNotExist:
license = None
return license
def get_or_create_license(user, software):
license = get_license(user, software)
if license is None:
license = _create_license(user, software)
return license
def _create_license(user, software):
license = None
try:
# find one license that has not been assigned, locking the
# table/rows with select_for_update to prevent race conditions
with transaction.commit_on_success():
selected = UserLicense.objects.select_for_update()
license = selected.filter(user__isnull=True, software=software)[0]
license.user = user
license.save()
except IndexError:
# there are no free licenses
log.error('No serial numbers available for %s', software)
license = None
# TODO [rocha]look if someone has unenrolled from the class
# and already has a serial number
return license
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
refs/heads/master
|
python-packages/mne-python-0.10/mne/stats/regression.py
|
3
|
# Authors: Tal Linzen <linzen@nyu.edu>
# Teon Brooks <teon.brooks@gmail.com>
# Denis A. Engemann <denis.engemann@gmail.com>
# Jona Sassenhagen <jona.sassenhagen@gmail.com>
# Marijn van Vliet <w.m.vanvliet@gmail.com>
#
# License: BSD (3-clause)
from collections import namedtuple
from inspect import isgenerator
import warnings
from ..externals.six import string_types
import numpy as np
from scipy import linalg, sparse
from ..source_estimate import SourceEstimate
from ..epochs import _BaseEpochs
from ..evoked import Evoked, EvokedArray
from ..utils import logger, _reject_data_segments, _get_fast_dot
from ..io.pick import pick_types, pick_info
from ..fixes import in1d
def linear_regression(inst, design_matrix, names=None):
"""Fit Ordinary Least Squares regression (OLS)
Parameters
----------
inst : instance of Epochs | iterable of SourceEstimate
The data to be regressed. Contains all the trials, sensors, and time
points for the regression. For Source Estimates, accepts either a list
or a generator object.
design_matrix : ndarray, shape (n_observations, n_regressors)
The regressors to be used. Must be a 2d array with as many rows as
the first dimension of `data`. The first column of this matrix will
typically consist of ones (intercept column).
names : list-like | None
Optional parameter to name the regressors. If provided, the length must
correspond to the number of columns present in regressors
(including the intercept, if present).
Otherwise the default names are x0, x1, x2...xn for n regressors.
Returns
-------
results : dict of namedtuple
For each regressor (key) a namedtuple is provided with the
following attributes:
beta : regression coefficients
stderr : standard error of regression coefficients
t_val : t statistics (beta / stderr)
p_val : two-sided p-value of t statistic under the t distribution
mlog10_p_val : -log10 transformed p-value.
The tuple members are numpy arrays. The shape of each numpy array is
the shape of the data minus the first dimension; e.g., if the shape of
the original data was (n_observations, n_channels, n_timepoints),
then the shape of each of the arrays will be
(n_channels, n_timepoints).
"""
if names is None:
names = ['x%i' % i for i in range(design_matrix.shape[1])]
if isinstance(inst, _BaseEpochs):
picks = pick_types(inst.info, meg=True, eeg=True, ref_meg=True,
stim=False, eog=False, ecg=False,
emg=False, exclude=['bads'])
if [inst.ch_names[p] for p in picks] != inst.ch_names:
warnings.warn('Fitting linear model to non-data or bad '
'channels. Check picking', UserWarning)
msg = 'Fitting linear model to epochs'
data = inst.get_data()
out = EvokedArray(np.zeros(data.shape[1:]), inst.info, inst.tmin)
elif isgenerator(inst):
msg = 'Fitting linear model to source estimates (generator input)'
out = next(inst)
data = np.array([out.data] + [i.data for i in inst])
elif isinstance(inst, list) and isinstance(inst[0], SourceEstimate):
msg = 'Fitting linear model to source estimates (list input)'
out = inst[0]
data = np.array([i.data for i in inst])
else:
raise ValueError('Input must be epochs or iterable of source '
'estimates')
logger.info(msg + ', (%s targets, %s regressors)' %
(np.product(data.shape[1:]), len(names)))
lm_params = _fit_lm(data, design_matrix, names)
lm = namedtuple('lm', 'beta stderr t_val p_val mlog10_p_val')
lm_fits = {}
for name in names:
parameters = [p[name] for p in lm_params]
for ii, value in enumerate(parameters):
out_ = out.copy()
if isinstance(out_, SourceEstimate):
out_._data[:] = value
elif isinstance(out_, Evoked):
out_.data[:] = value
else:
raise RuntimeError('Invalid container.')
parameters[ii] = out_
lm_fits[name] = lm(*parameters)
logger.info('Done')
return lm_fits
def _fit_lm(data, design_matrix, names):
"""Aux function"""
from scipy import stats
n_samples = len(data)
n_features = np.product(data.shape[1:])
if design_matrix.ndim != 2:
raise ValueError('Design matrix must be a 2d array')
n_rows, n_predictors = design_matrix.shape
if n_samples != n_rows:
raise ValueError('Number of rows in design matrix must be equal '
'to number of observations')
if n_predictors != len(names):
raise ValueError('Number of regressor names must be equal to '
'number of column in design matrix')
y = np.reshape(data, (n_samples, n_features))
betas, resid_sum_squares, _, _ = linalg.lstsq(a=design_matrix, b=y)
df = n_rows - n_predictors
sqrt_noise_var = np.sqrt(resid_sum_squares / df).reshape(data.shape[1:])
design_invcov = linalg.inv(np.dot(design_matrix.T, design_matrix))
unscaled_stderrs = np.sqrt(np.diag(design_invcov))
beta, stderr, t_val, p_val, mlog10_p_val = (dict() for _ in range(5))
for x, unscaled_stderr, predictor in zip(betas, unscaled_stderrs, names):
beta[predictor] = x.reshape(data.shape[1:])
stderr[predictor] = sqrt_noise_var * unscaled_stderr
t_val[predictor] = beta[predictor] / stderr[predictor]
cdf = stats.t.cdf(np.abs(t_val[predictor]), df)
p_val[predictor] = (1. - cdf) * 2.
mlog10_p_val[predictor] = -np.log10(p_val[predictor])
return beta, stderr, t_val, p_val, mlog10_p_val
def linear_regression_raw(raw, events, event_id=None, tmin=-.1, tmax=1,
covariates=None, reject=None, flat=None, tstep=1.,
decim=1, picks=None, solver='pinv'):
"""Estimate regression-based evoked potentials/fields by linear modelling
This models the full M/EEG time course, including correction for
overlapping potentials and allowing for continuous/scalar predictors.
Internally, this constructs a predictor matrix X of size
n_samples * (n_conds * window length), solving the linear system
``Y = bX`` and returning ``b`` as evoked-like time series split by
condition. See [1]_.
Parameters
----------
raw : instance of Raw
A raw object. Note: be very careful about data that is not
downsampled, as the resulting matrices can be enormous and easily
overload your computer. Typically, 100 Hz sampling rate is
appropriate - or using the decim keyword (see below).
events : ndarray of int, shape (n_events, 3)
An array where the first column corresponds to samples in raw
and the last to integer codes in event_id.
event_id : dict
As in Epochs; a dictionary where the values may be integers or
iterables of integers, corresponding to the 3rd column of
events, and the keys are condition names.
tmin : float | dict
If float, gives the lower limit (in seconds) for the time window for
which all event types' effects are estimated. If a dict, can be used to
specify time windows for specific event types: keys correspond to keys
in event_id and/or covariates; for missing values, the default (-.1) is
used.
tmax : float | dict
If float, gives the upper limit (in seconds) for the time window for
which all event types' effects are estimated. If a dict, can be used to
specify time windows for specific event types: keys correspond to keys
in event_id and/or covariates; for missing values, the default (1.) is
used.
covariates : dict-like | None
If dict-like (e.g., a pandas DataFrame), values have to be array-like
and of the same length as the columns in ```events```. Keys correspond
to additional event types/conditions to be estimated and are matched
with the time points given by the first column of ```events```. If
None, only binary events (from event_id) are used.
reject : None | dict
For cleaning raw data before the regression is performed: set up
rejection parameters based on peak-to-peak amplitude in continuously
selected subepochs. If None, no rejection is done.
If dict, keys are types ('grad' | 'mag' | 'eeg' | 'eog' | 'ecg')
and values are the maximal peak-to-peak values to select rejected
epochs, e.g.::
reject = dict(grad=4000e-12, # T / m (gradiometers)
mag=4e-11, # T (magnetometers)
eeg=40e-5, # uV (EEG channels)
eog=250e-5 # uV (EOG channels))
flat : None | dict
or cleaning raw data before the regression is performed: set up
rejection parameters based on flatness of the signal. If None, no
rejection is done. If a dict, keys are ('grad' | 'mag' |
'eeg' | 'eog' | 'ecg') and values are minimal peak-to-peak values to
select rejected epochs.
tstep : float
Length of windows for peak-to-peak detection for raw data cleaning.
decim : int
Decimate by choosing only a subsample of data points. Highly
recommended for data recorded at high sampling frequencies, as
otherwise huge intermediate matrices have to be created and inverted.
picks : None | list
List of indices of channels to be included. If None, defaults to all
MEG and EEG channels.
solver : str | function
Either a function which takes as its inputs the sparse predictor
matrix X and the observation matrix Y, and returns the coefficient
matrix b; or a string (for now, only 'pinv'), in which case the
solver used is dot(scipy.linalg.pinv(dot(X.T, X)), dot(X.T, Y.T)).T.
Returns
-------
evokeds : dict
A dict where the keys correspond to conditions and the values are
Evoked objects with the ER[F/P]s. These can be used exactly like any
other Evoked object, including e.g. plotting or statistics.
References
----------
.. [1] Smith, N. J., & Kutas, M. (2015). Regression-based estimation of ERP
waveforms: II. Non-linear effects, overlap correction, and practical
considerations. Psychophysiology, 52(2), 169-189.
"""
if isinstance(solver, string_types):
if solver == 'pinv':
fast_dot = _get_fast_dot()
# inv is slightly (~10%) faster, but pinv seemingly more stable
def solver(X, Y):
return fast_dot(linalg.pinv(X.T.dot(X).todense()),
X.T.dot(Y.T)).T
else:
raise ValueError("No such solver: {0}".format(solver))
# prepare raw and events
if picks is None:
picks = pick_types(raw.info, meg=True, eeg=True, ref_meg=True)
info = pick_info(raw.info, picks, copy=True)
decim = int(decim)
info["sfreq"] /= decim
data, times = raw[:]
data = data[picks, ::decim]
times = times[::decim]
events = events.copy()
events[:, 0] -= raw.first_samp
events[:, 0] //= decim
conds = list(event_id)
if covariates is not None:
conds += list(covariates)
# time windows (per event type) are converted to sample points from times
if isinstance(tmin, (float, int)):
tmin_s = dict((cond, int(tmin * info["sfreq"])) for cond in conds)
else:
tmin_s = dict((cond, int(tmin.get(cond, -.1) * info["sfreq"]))
for cond in conds)
if isinstance(tmax, (float, int)):
tmax_s = dict(
(cond, int((tmax * info["sfreq"]) + 1.)) for cond in conds)
else:
tmax_s = dict((cond, int((tmax.get(cond, 1.) * info["sfreq"]) + 1))
for cond in conds)
# Construct predictor matrix
# We do this by creating one array per event type, shape (lags, samples)
# (where lags depends on tmin/tmax and can be different for different
# event types). Columns correspond to predictors, predictors correspond to
# time lags. Thus, each array is mostly sparse, with one diagonal of 1s
# per event (for binary predictors).
cond_length = dict()
xs = []
for cond in conds:
tmin_, tmax_ = tmin_s[cond], tmax_s[cond]
n_lags = int(tmax_ - tmin_) # width of matrix
if cond in event_id: # for binary predictors
ids = ([event_id[cond]]
if isinstance(event_id[cond], int)
else event_id[cond])
onsets = -(events[in1d(events[:, 2], ids), 0] + tmin_)
values = np.ones((len(onsets), n_lags))
else: # for predictors from covariates, e.g. continuous ones
covs = covariates[cond]
if len(covs) != len(events):
error = ("Condition {0} from ```covariates``` is "
"not the same length as ```events```").format(cond)
raise ValueError(error)
onsets = -(events[np.where(covs != 0), 0] + tmin_)[0]
v = np.asarray(covs)[np.nonzero(covs)].astype(float)
values = np.ones((len(onsets), n_lags)) * v[:, np.newaxis]
cond_length[cond] = len(onsets)
xs.append(sparse.dia_matrix((values, onsets),
shape=(data.shape[1], n_lags)))
X = sparse.hstack(xs)
# find only those positions where at least one predictor isn't 0
has_val = np.unique(X.nonzero()[0])
# additionally, reject positions based on extreme steps in the data
if reject is not None:
_, inds = _reject_data_segments(data, reject, flat, decim=None,
info=info, tstep=tstep)
for t0, t1 in inds:
has_val = np.setdiff1d(has_val, range(t0, t1))
# solve linear system
X, data = X.tocsr()[has_val], data[:, has_val]
coefs = solver(X, data)
# construct Evoked objects to be returned from output
evokeds = dict()
cum = 0
for cond in conds:
tmin_, tmax_ = tmin_s[cond], tmax_s[cond]
evokeds[cond] = EvokedArray(coefs[:, cum:cum + tmax_ - tmin_],
info=info, comment=cond,
tmin=tmin_ / float(info["sfreq"]),
nave=cond_length[cond],
kind='mean') # note that nave and kind are
cum += tmax_ - tmin_ # technically not correct
return evokeds
|
Pixomondo/rez
|
refs/heads/master_pxo
|
src/rez/vendor/distlib/version.py
|
132
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2016 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""
Implementation of a flexible versioning scheme providing support for PEP-440,
setuptools-compatible and semantic versioning.
"""
import logging
import re
from .compat import string_types
__all__ = ['NormalizedVersion', 'NormalizedMatcher',
'LegacyVersion', 'LegacyMatcher',
'SemanticVersion', 'SemanticMatcher',
'UnsupportedVersionError', 'get_scheme']
logger = logging.getLogger(__name__)
class UnsupportedVersionError(ValueError):
"""This is an unsupported version."""
pass
class Version(object):
def __init__(self, s):
self._string = s = s.strip()
self._parts = parts = self.parse(s)
assert isinstance(parts, tuple)
assert len(parts) > 0
def parse(self, s):
raise NotImplementedError('please implement in a subclass')
def _check_compatible(self, other):
if type(self) != type(other):
raise TypeError('cannot compare %r and %r' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
self._check_compatible(other)
return self._parts < other._parts
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__lt__(other) or self.__eq__(other)
def __ge__(self, other):
return self.__gt__(other) or self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self._parts)
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
@property
def is_prerelease(self):
raise NotImplementedError('Please implement in subclasses.')
class Matcher(object):
version_class = None
dist_re = re.compile(r"^(\w[\s\w'.-]*)(\((.*)\))?")
comp_re = re.compile(r'^(<=|>=|<|>|!=|={2,3}|~=)?\s*([^\s,]+)$')
num_re = re.compile(r'^\d+(\.\d+)*$')
# value is either a callable or the name of a method
_operators = {
'<': lambda v, c, p: v < c,
'>': lambda v, c, p: v > c,
'<=': lambda v, c, p: v == c or v < c,
'>=': lambda v, c, p: v == c or v > c,
'==': lambda v, c, p: v == c,
'===': lambda v, c, p: v == c,
# by default, compatible => >=.
'~=': lambda v, c, p: v == c or v > c,
'!=': lambda v, c, p: v != c,
}
def __init__(self, s):
if self.version_class is None:
raise ValueError('Please specify a version class')
self._string = s = s.strip()
m = self.dist_re.match(s)
if not m:
raise ValueError('Not valid: %r' % s)
groups = m.groups('')
self.name = groups[0].strip()
self.key = self.name.lower() # for case-insensitive comparisons
clist = []
if groups[2]:
constraints = [c.strip() for c in groups[2].split(',')]
for c in constraints:
m = self.comp_re.match(c)
if not m:
raise ValueError('Invalid %r in %r' % (c, s))
groups = m.groups()
op = groups[0] or '~='
s = groups[1]
if s.endswith('.*'):
if op not in ('==', '!='):
raise ValueError('\'.*\' not allowed for '
'%r constraints' % op)
# Could be a partial version (e.g. for '2.*') which
# won't parse as a version, so keep it as a string
vn, prefix = s[:-2], True
if not self.num_re.match(vn):
# Just to check that vn is a valid version
self.version_class(vn)
else:
# Should parse as a version, so we can create an
# instance for the comparison
vn, prefix = self.version_class(s), False
clist.append((op, vn, prefix))
self._parts = tuple(clist)
def match(self, version):
"""
Check if the provided version matches the constraints.
:param version: The version to match against this instance.
:type version: Strring or :class:`Version` instance.
"""
if isinstance(version, string_types):
version = self.version_class(version)
for operator, constraint, prefix in self._parts:
f = self._operators.get(operator)
if isinstance(f, string_types):
f = getattr(self, f)
if not f:
msg = ('%r not implemented '
'for %s' % (operator, self.__class__.__name__))
raise NotImplementedError(msg)
if not f(version, constraint, prefix):
return False
return True
@property
def exact_version(self):
result = None
if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
result = self._parts[0][1]
return result
def _check_compatible(self, other):
if type(self) != type(other) or self.name != other.name:
raise TypeError('cannot compare %s and %s' % (self, other))
def __eq__(self, other):
self._check_compatible(other)
return self.key == other.key and self._parts == other._parts
def __ne__(self, other):
return not self.__eq__(other)
# See http://docs.python.org/reference/datamodel#object.__hash__
def __hash__(self):
return hash(self.key) + hash(self._parts)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self._string)
def __str__(self):
return self._string
PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
r'(\.(post)(\d+))?(\.(dev)(\d+))?'
r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
def _pep_440_key(s):
s = s.strip()
m = PEP440_VERSION_RE.match(s)
if not m:
raise UnsupportedVersionError('Not a valid version: %s' % s)
groups = m.groups()
nums = tuple(int(v) for v in groups[1].split('.'))
while len(nums) > 1 and nums[-1] == 0:
nums = nums[:-1]
if not groups[0]:
epoch = 0
else:
epoch = int(groups[0])
pre = groups[4:6]
post = groups[7:9]
dev = groups[10:12]
local = groups[13]
if pre == (None, None):
pre = ()
else:
pre = pre[0], int(pre[1])
if post == (None, None):
post = ()
else:
post = post[0], int(post[1])
if dev == (None, None):
dev = ()
else:
dev = dev[0], int(dev[1])
if local is None:
local = ()
else:
parts = []
for part in local.split('.'):
# to ensure that numeric compares as > lexicographic, avoid
# comparing them directly, but encode a tuple which ensures
# correct sorting
if part.isdigit():
part = (1, int(part))
else:
part = (0, part)
parts.append(part)
local = tuple(parts)
if not pre:
# either before pre-release, or final release and after
if not post and dev:
# before pre-release
pre = ('a', -1) # to sort before a0
else:
pre = ('z',) # to sort after all pre-releases
# now look at the state of post and dev.
if not post:
post = ('_',) # sort before 'a'
if not dev:
dev = ('final',)
#print('%s -> %s' % (s, m.groups()))
return epoch, nums, pre, post, dev, local
_normalized_key = _pep_440_key
class NormalizedVersion(Version):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # mininum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def parse(self, s):
result = _normalized_key(s)
# _normalized_key loses trailing zeroes in the release
# clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
# However, PEP 440 prefix matching needs it: for example,
# (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
m = PEP440_VERSION_RE.match(s) # must succeed
groups = m.groups()
self._release_clause = tuple(int(v) for v in groups[1].split('.'))
return result
PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
@property
def is_prerelease(self):
return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
def _match_prefix(x, y):
x = str(x)
y = str(y)
if x == y:
return True
if not x.startswith(y):
return False
n = len(y)
return x[n] == '.'
class NormalizedMatcher(Matcher):
version_class = NormalizedVersion
# value is either a callable or the name of a method
_operators = {
'~=': '_match_compatible',
'<': '_match_lt',
'>': '_match_gt',
'<=': '_match_le',
'>=': '_match_ge',
'==': '_match_eq',
'===': '_match_arbitrary',
'!=': '_match_ne',
}
def _adjust_local(self, version, constraint, prefix):
if prefix:
strip_local = '+' not in constraint and version._parts[-1]
else:
# both constraint and version are
# NormalizedVersion instances.
# If constraint does not have a local component,
# ensure the version doesn't, either.
strip_local = not constraint._parts[-1] and version._parts[-1]
if strip_local:
s = version._string.split('+', 1)[0]
version = self.version_class(s)
return version, constraint
def _match_lt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version >= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_gt(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version <= constraint:
return False
release_clause = constraint._release_clause
pfx = '.'.join([str(i) for i in release_clause])
return not _match_prefix(version, pfx)
def _match_le(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version <= constraint
def _match_ge(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
return version >= constraint
def _match_eq(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version == constraint)
else:
result = _match_prefix(version, constraint)
return result
def _match_arbitrary(self, version, constraint, prefix):
return str(version) == str(constraint)
def _match_ne(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if not prefix:
result = (version != constraint)
else:
result = not _match_prefix(version, constraint)
return result
def _match_compatible(self, version, constraint, prefix):
version, constraint = self._adjust_local(version, constraint, prefix)
if version == constraint:
return True
if version < constraint:
return False
# if not prefix:
# return True
release_clause = constraint._release_clause
if len(release_clause) > 1:
release_clause = release_clause[:-1]
pfx = '.'.join([str(i) for i in release_clause])
return _match_prefix(version, pfx)
_REPLACEMENTS = (
(re.compile('[.+-]$'), ''), # remove trailing puncts
(re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
(re.compile('^[.-]'), ''), # remove leading puncts
(re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
(re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
(re.compile(r'\b(pre-alpha|prealpha)\b'),
'pre.alpha'), # standardise
(re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
)
_SUFFIX_REPLACEMENTS = (
(re.compile('^[:~._+-]+'), ''), # remove leading puncts
(re.compile('[,*")([\]]'), ''), # remove unwanted chars
(re.compile('[~:+_ -]'), '.'), # replace illegal chars
(re.compile('[.]{2,}'), '.'), # multiple runs of '.'
(re.compile(r'\.$'), ''), # trailing '.'
)
_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
def _suggest_semantic_version(s):
"""
Try to suggest a semantic form for a version for which
_suggest_normalized_version couldn't come up with anything.
"""
result = s.strip().lower()
for pat, repl in _REPLACEMENTS:
result = pat.sub(repl, result)
if not result:
result = '0.0.0'
# Now look for numeric prefix, and separate it out from
# the rest.
#import pdb; pdb.set_trace()
m = _NUMERIC_PREFIX.match(result)
if not m:
prefix = '0.0.0'
suffix = result
else:
prefix = m.groups()[0].split('.')
prefix = [int(i) for i in prefix]
while len(prefix) < 3:
prefix.append(0)
if len(prefix) == 3:
suffix = result[m.end():]
else:
suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
prefix = prefix[:3]
prefix = '.'.join([str(i) for i in prefix])
suffix = suffix.strip()
if suffix:
#import pdb; pdb.set_trace()
# massage the suffix.
for pat, repl in _SUFFIX_REPLACEMENTS:
suffix = pat.sub(repl, suffix)
if not suffix:
result = prefix
else:
sep = '-' if 'dev' in suffix else '+'
result = prefix + sep + suffix
if not is_semver(result):
result = None
return result
def _suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
_normalized_key(s)
return s # already rational
except UnsupportedVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is pobably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.33.post17222
# 0.9.33-r17222 -> 0.9.33.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.33.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
_normalized_key(rs)
except UnsupportedVersionError:
rs = None
return rs
#
# Legacy version processing (distribute-compatible)
#
_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
_VERSION_REPLACE = {
'pre': 'c',
'preview': 'c',
'-': 'final-',
'rc': 'c',
'dev': '@',
'': None,
'.': None,
}
def _legacy_key(s):
def get_parts(s):
result = []
for p in _VERSION_PART.split(s.lower()):
p = _VERSION_REPLACE.get(p, p)
if p:
if '0' <= p[:1] <= '9':
p = p.zfill(8)
else:
p = '*' + p
result.append(p)
result.append('*final')
return result
result = []
for p in get_parts(s):
if p.startswith('*'):
if p < '*final':
while result and result[-1] == '*final-':
result.pop()
while result and result[-1] == '00000000':
result.pop()
result.append(p)
return tuple(result)
class LegacyVersion(Version):
def parse(self, s):
return _legacy_key(s)
@property
def is_prerelease(self):
result = False
for x in self._parts:
if (isinstance(x, string_types) and x.startswith('*') and
x < '*final'):
result = True
break
return result
class LegacyMatcher(Matcher):
version_class = LegacyVersion
_operators = dict(Matcher._operators)
_operators['~='] = '_match_compatible'
numeric_re = re.compile('^(\d+(\.\d+)*)')
def _match_compatible(self, version, constraint, prefix):
if version < constraint:
return False
m = self.numeric_re.match(str(constraint))
if not m:
logger.warning('Cannot compute compatible match for version %s '
' and constraint %s', version, constraint)
return True
s = m.groups()[0]
if '.' in s:
s = s.rsplit('.', 1)[0]
return _match_prefix(version, s)
#
# Semantic versioning
#
_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
def is_semver(s):
return _SEMVER_RE.match(s)
def _semantic_key(s):
def make_tuple(s, absent):
if s is None:
result = (absent,)
else:
parts = s[1:].split('.')
# We can't compare ints and strings on Python 3, so fudge it
# by zero-filling numeric values so simulate a numeric comparison
result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
return result
m = is_semver(s)
if not m:
raise UnsupportedVersionError(s)
groups = m.groups()
major, minor, patch = [int(i) for i in groups[:3]]
# choose the '|' and '*' so that versions sort correctly
pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
return (major, minor, patch), pre, build
class SemanticVersion(Version):
def parse(self, s):
return _semantic_key(s)
@property
def is_prerelease(self):
return self._parts[1][0] != '|'
class SemanticMatcher(Matcher):
version_class = SemanticVersion
class VersionScheme(object):
def __init__(self, key, matcher, suggester=None):
self.key = key
self.matcher = matcher
self.suggester = suggester
def is_valid_version(self, s):
try:
self.matcher.version_class(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_matcher(self, s):
try:
self.matcher(s)
result = True
except UnsupportedVersionError:
result = False
return result
def is_valid_constraint_list(self, s):
"""
Used for processing some metadata fields
"""
return self.is_valid_matcher('dummy_name (%s)' % s)
def suggest(self, s):
if self.suggester is None:
result = None
else:
result = self.suggester(s)
return result
_SCHEMES = {
'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
_suggest_normalized_version),
'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
'semantic': VersionScheme(_semantic_key, SemanticMatcher,
_suggest_semantic_version),
}
_SCHEMES['default'] = _SCHEMES['normalized']
def get_scheme(name):
if name not in _SCHEMES:
raise ValueError('unknown scheme name: %r' % name)
return _SCHEMES[name]
|
mikhail-gorobets/chipsec
|
refs/heads/master
|
chipsec/hal/vmm.py
|
2
|
#!/usr/local/bin/python
#CHIPSEC: Platform Security Assessment Framework
#Copyright (c) 2010-2016, Intel Corporation
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; Version 2.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#Contact information:
#chipsec@intel.com
#
# -------------------------------------------------------------------------------
#
# CHIPSEC: Platform Hardware Security Assessment Framework
# (c) 2010-2016 Intel Corporation
#
# -------------------------------------------------------------------------------
"""
VMM specific functionality
1. Hypervisor hypercall interfaces
2. Second-level Address Translation (SLAT)
3. VirtIO devices
4. ...
"""
__version__ = '1.0'
import struct
import sys
import os.path
from chipsec.logger import logger, pretty_print_hex_buffer
import chipsec.hal.pcidb
class VMMRuntimeError (RuntimeError):
pass
class VMM:
def __init__( self, cs ):
self.cs = cs
self.helper = cs.helper
self.output = ''
(self.membuf0_va, self.membuf0_pa) = (0, 0)
(self.membuf1_va, self.membuf1_pa) = (0, 0)
chipsec.hal.pcidb.VENDORS[VIRTIO_VID] = VIRTIO_VENDOR_NAME
chipsec.hal.pcidb.DEVICES[VIRTIO_VID] = VIRTIO_DEVICES
def __del__(self):
if self.membuf0_va <> 0:
#self.helper.free_physical_mem(self.membuf0_va)
(self.membuf0_va, self.membuf0_pa) = (0, 0)
(self.membuf1_va, self.membuf1_pa) = (0, 0)
def init(self):
(self.membuf0_va, self.membuf0_pa) = self.cs.mem.alloc_physical_mem(0x2000, 0xFFFFFFFFFFFFFFFF)
(self.membuf1_va, self.membuf1_pa) = (self.membuf0_va + 0x1000, self.membuf0_pa + 0x1000)
if self.membuf0_va == 0:
logger().log( "[vmm] Could not allocate memory!")
raise
# Generic hypercall interface
def hypercall(self, rax, rbx, rcx, rdx, rdi, rsi, r8=0, r9=0, r10=0, r11=0, xmm_buffer=0):
return self.helper.hypercall(rcx, rdx, r8, r9, r10, r11, rax, rbx, rdi, rsi, xmm_buffer)
# Hypervisor-specific hypercall interfaces
def hypercall64_five_args(self, vector, arg1=0, arg2=0, arg3=0, arg4=0, arg5=0):
return self.helper.hypercall(0, arg3, arg5, 0, arg4, 0, vector, 0, arg1, arg2)
def hypercall64_memory_based(self, hypervisor_input_value, parameters, size = 0):
self.cs.mem.write_physical_mem(self.membuf0_pa, len(parameters[:0x1000]), parameters[:0x1000])
regs = self.helper.hypercall(hypervisor_input_value & ~0x00010000, self.membuf0_pa, self.membuf1_pa)
self.output = self.helper.read_physical_mem(self.membuf1_pa, size) if size > 0 else ''
return regs
def hypercall64_fast(self, hypervisor_input_value, param0 = 0, param1 = 0):
return self.helper.hypercall(hypervisor_input_value | 0x00010000, param0, param1)
def hypercall64_extended_fast(self, hypervisor_input_value, parameter_block):
(param0, param1, xmm_regs) = struct.unpack('<QQ96s', parameter_block)
self.cs.mem.write_physical_mem(self.membuf0_pa, 0x60, xmm_regs)
return self.helper.hypercall(hypervisor_input_value | 0x00010000, param0, param1, 0, 0, 0, 0, 0, 0, 0, self.membuf0_va)
#
# Dump EPT page tables at specified physical base (EPT pointer)
#
def dump_EPT_page_tables( self, eptp, pt_fname=None ):
_orig_logname = logger().LOG_FILE_NAME
paging_ept = chipsec.hal.paging.c_extended_page_tables( self.cs )
if logger().HAL: logger().log( '[vmm] dumping EPT paging hierarchy at EPTP 0x%08X...' % eptp )
if pt_fname is None: pt_fname = ('ept_%08X' % eptp)
logger().set_log_file( pt_fname )
paging_ept.read_pt_and_show_status( pt_fname, 'EPT', eptp )
logger().set_log_file( _orig_logname )
if paging_ept.failure: logger().error( 'could not dump EPT page tables' )
################################################################################
#
# VirtIO functions
#
################################################################################
VIRTIO_VID = 0x1AF4
VIRTIO_VENDOR_NAME = 'Red Hat, Inc.'
VIRTIO_VENDORS = [VIRTIO_VID]
VIRTIO_DEVICES = {
0x1000: 'VirtIO Network',
0x1001: 'VirtIO Block',
0x1002: 'VirtIO Baloon',
0x1003: 'VirtIO Console',
0x1004: 'VirtIO SCSI',
0x1005: 'VirtIO RNG',
0x1009: 'VirtIO filesystem',
0x1041: 'VirtIO network (1.0)',
0x1042: 'VirtIO block (1.0)',
0x1043: 'VirtIO console (1.0)',
0x1044: 'VirtIO RNG (1.0)',
0x1045: 'VirtIO memory balloon (1.0)',
0x1046: 'VirtIO SCSI (1.0)',
0x1049: 'VirtIO filesystem (1.0)',
0x1050: 'VirtIO GPU (1.0)',
0x1052: 'VirtIO input (1.0)',
0x1110: 'VirtIO Inter-VM shared memory'
}
def get_virtio_devices( devices ):
virtio_devices = []
for (b, d, f, vid, did) in devices:
if vid in VIRTIO_VENDORS:
virtio_devices.append((b, d, f, vid, did))
return virtio_devices
class VirtIO_Device():
def __init__(self, cs, b, d, f):
self.cs = cs
self.bus = b
self.dev = d
self.fun = f
def dump_device(self):
logger().log("\n[vmm] VirtIO device %02x:%02x.%01x" % (self.bus, self.dev, self.fun))
dev_cfg = self.cs.pci.dump_pci_config(self.bus, self.dev, self.fun)
pretty_print_hex_buffer( dev_cfg )
bars = self.cs.pci.get_device_bars(self.bus, self.dev, self.fun)
for (bar, isMMIO, is64bit, bar_off, bar_reg) in bars:
if isMMIO:
chipsec.hal.mmio.dump_MMIO( self.cs, bar, 0x1000 )
else:
self.cs.io.dump_IO( bar, 0x100, 4 )
|
Qalthos/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/apt.py
|
10
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Flowroute LLC
# Written by Matthew Williams <matthew@flowroute.com>
# Based on yum module written by Seth Vidal <skvidal at fedoraproject.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: apt
short_description: Manages apt-packages
description:
- Manages I(apt) packages (such as for Debian/Ubuntu).
version_added: "0.0.2"
options:
name:
description:
- A list of package names, like C(foo), or package specifier with version, like C(foo=1.0).
Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported.
aliases: [ package, pkg ]
state:
description:
- Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies
are installed. C(fixed) attempt to correct a system with broken dependencies in place.
default: present
choices: [ absent, build-dep, latest, present, fixed ]
update_cache:
description:
- Run the equivalent of C(apt-get update) before the operation. Can be run as part of the package installation or as a separate step.
type: bool
default: 'no'
cache_valid_time:
description:
- Update the apt cache if its older than the I(cache_valid_time). This option is set in seconds.
- As of Ansible 2.4, if explicitly set, this sets I(update_cache=yes).
default: 0
purge:
description:
- Will force purging of configuration files if the module state is set to I(absent).
type: bool
default: 'no'
default_release:
description:
- Corresponds to the C(-t) option for I(apt) and sets pin priorities
install_recommends:
description:
- Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install
recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
aliases: ['install-recommends']
type: bool
force:
description:
- 'Corresponds to the C(--force-yes) to I(apt-get) and implies C(allow_unauthenticated: yes)'
- "This option will disable checking both the packages' signatures and the certificates of the
web servers they are downloaded from."
- 'This option *is not* the equivalent of passing the C(-f) flag to I(apt-get) on the command line'
- '**This is a destructive operation with the potential to destroy your system, and it should almost never be used.**
Please also see C(man apt-get) for more information.'
type: bool
default: 'no'
allow_unauthenticated:
description:
- Ignore if packages cannot be authenticated. This is useful for bootstrapping environments that manage their own apt-key setup.
- 'C(allow_unauthenticated) is only supported with state: I(install)/I(present)'
type: bool
default: 'no'
version_added: "2.1"
upgrade:
description:
- If yes or safe, performs an aptitude safe-upgrade.
- If full, performs an aptitude full-upgrade.
- If dist, performs an apt-get dist-upgrade.
- 'Note: This does not upgrade a specific package, use state=latest for that.'
- 'Note: Since 2.4, apt-get is used as a fall-back if aptitude is not present.'
version_added: "1.1"
choices: [ dist, full, 'no', safe, 'yes' ]
default: 'no'
dpkg_options:
description:
- Add dpkg options to apt command. Defaults to '-o "Dpkg::Options::=--force-confdef" -o "Dpkg::Options::=--force-confold"'
- Options should be supplied as comma separated list
default: force-confdef,force-confold
deb:
description:
- Path to a .deb package on the remote machine.
- If :// in the path, ansible will attempt to download deb before installing. (Version added 2.1)
- Requires the C(xz-utils) package to extract the control file of the deb package to install.
required: false
version_added: "1.6"
autoremove:
description:
- If C(yes), remove unused dependency packages for all module states except I(build-dep). It can also be used as the only option.
- Previous to version 2.4, autoclean was also an alias for autoremove, now it is its own separate command. See documentation for further information.
type: bool
default: 'no'
version_added: "2.1"
autoclean:
description:
- If C(yes), cleans the local repository of retrieved package files that can no longer be downloaded.
type: bool
default: 'no'
version_added: "2.4"
policy_rc_d:
description:
- Force the exit code of /usr/sbin/policy-rc.d.
- For example, if I(policy_rc_d=101) the installed package will not trigger a service start.
- If /usr/sbin/policy-rc.d already exist, it is backed up and restored after the package installation.
- If C(null), the /usr/sbin/policy-rc.d isn't created/changed.
type: int
default: null
version_added: "2.8"
only_upgrade:
description:
- Only upgrade a package if it is already installed.
type: bool
default: 'no'
version_added: "2.1"
force_apt_get:
description:
- Force usage of apt-get instead of aptitude
type: bool
default: 'no'
version_added: "2.4"
requirements:
- python-apt (python 2)
- python3-apt (python 3)
- aptitude (before 2.4)
author: "Matthew Williams (@mgwilliams)"
notes:
- Three of the upgrade modes (C(full), C(safe) and its alias C(yes)) required C(aptitude) up to 2.3, since 2.4 C(apt-get) is used as a fall-back.
- apt starts newly installed services by default, this is what the underlying tooling does,
to avoid this you can set the ``RUNLEVEL`` environment variable to 1.
- The apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier
(If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user.
Since we don't have warnings and prompts before installing we disallow this.Use an explicit fnmatch pattern if you want wildcarding)
- When used with a `loop:` each package will be processed individually, it is much more efficient to pass the list directly to the `name` option.
'''
EXAMPLES = '''
- name: Update repositories cache and install "foo" package
apt:
name: foo
update_cache: yes
- name: Install apache httpd but avoid starting it immediately (state=present is optional)
apt:
name: apache2
state: present
environment:
RUNLEVEL: 1
- name: Remove "foo" package
apt:
name: foo
state: absent
- name: Install the package "foo"
apt:
name: foo
- name: Install a list of packages
apt:
name: "{{ packages }}"
vars:
packages:
- foo
- foo-tools
- name: Install the version '1.00' of package "foo"
apt:
name: foo=1.00
- name: Update the repository cache and update package "nginx" to latest version using default release squeeze-backport
apt:
name: nginx
state: latest
default_release: squeeze-backports
update_cache: yes
- name: Install latest version of "openjdk-6-jdk" ignoring "install-recommends"
apt:
name: openjdk-6-jdk
state: latest
install_recommends: no
- name: Upgrade all packages to the latest version
apt:
name: "*"
state: latest
- name: Update all packages to the latest version
apt:
upgrade: dist
- name: Run the equivalent of "apt-get update" as a separate step
apt:
update_cache: yes
- name: Only run "update_cache=yes" if the last one is more than 3600 seconds ago
apt:
update_cache: yes
cache_valid_time: 3600
- name: Pass options to dpkg on run
apt:
upgrade: dist
update_cache: yes
dpkg_options: 'force-confold,force-confdef'
- name: Install a .deb package
apt:
deb: /tmp/mypackage.deb
- name: Install the build dependencies for package "foo"
apt:
pkg: foo
state: build-dep
- name: Install a .deb package from the internet.
apt:
deb: https://example.com/python-ppq_0.1-1_all.deb
- name: Remove useless packages from the cache
apt:
autoclean: yes
- name: Remove dependencies that are no longer required
apt:
autoremove: yes
'''
RETURN = '''
cache_updated:
description: if the cache was updated or not
returned: success, in some cases
type: bool
sample: True
cache_update_time:
description: time of the last cache update (0 if unknown)
returned: success, in some cases
type: int
sample: 1425828348000
stdout:
description: output from apt
returned: success, when needed
type: str
sample: "Reading package lists...\nBuilding dependency tree...\nReading state information...\nThe following extra packages will be installed:\n apache2-bin ..."
stderr:
description: error output from apt
returned: success, when needed
type: str
sample: "AH00558: apache2: Could not reliably determine the server's fully qualified domain name, using 127.0.1.1. Set the 'ServerName' directive globally to ..."
''' # NOQA
# added to stave off future warnings about apt api
import warnings
warnings.filterwarnings('ignore', "apt API not stable yet", FutureWarning)
import datetime
import fnmatch
import itertools
import os
import shutil
import re
import sys
import tempfile
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.urls import fetch_file
# APT related constants
APT_ENV_VARS = dict(
DEBIAN_FRONTEND='noninteractive',
DEBIAN_PRIORITY='critical',
# We screenscrape apt-get and aptitude output for information so we need
# to make sure we use the C locale when running commands
LANG='C',
LC_ALL='C',
LC_MESSAGES='C',
LC_CTYPE='C',
)
DPKG_OPTIONS = 'force-confdef,force-confold'
APT_GET_ZERO = "\n0 upgraded, 0 newly installed"
APTITUDE_ZERO = "\n0 packages upgraded, 0 newly installed"
APT_LISTS_PATH = "/var/lib/apt/lists"
APT_UPDATE_SUCCESS_STAMP_PATH = "/var/lib/apt/periodic/update-success-stamp"
APT_MARK_INVALID_OP = 'Invalid operation'
APT_MARK_INVALID_OP_DEB6 = 'Usage: apt-mark [options] {markauto|unmarkauto} packages'
CLEAN_OP_CHANGED_STR = dict(
autoremove='The following packages will be REMOVED',
# "Del python3-q 2.4-1 [24 kB]"
autoclean='Del ',
)
HAS_PYTHON_APT = True
try:
import apt
import apt.debfile
import apt_pkg
except ImportError:
HAS_PYTHON_APT = False
if sys.version_info[0] < 3:
PYTHON_APT = 'python-apt'
else:
PYTHON_APT = 'python3-apt'
class PolicyRcD(object):
"""
This class is a context manager for the /usr/sbin/policy-rc.d file.
It allow the user to prevent dpkg to start the corresponding service when installing
a package.
https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt
"""
def __init__(self, module):
# we need the module for later use (eg. fail_json)
self.m = module
# if policy_rc_d is null then we don't need to modify policy-rc.d
if self.m.params['policy_rc_d'] is None:
return
# if the /usr/sbin/policy-rc.d already exist
# we will back it up during package installation
# then restore it
if os.path.exists('/usr/sbin/policy-rc.d'):
self.backup_dir = tempfile.mkdtemp(prefix="ansible")
else:
self.backup_dir = None
def __enter__(self):
"""
This method will be call when we enter the context, before we call `apt-get …`
"""
# if policy_rc_d is null then we don't need to modify policy-rc.d
if self.m.params['policy_rc_d'] is None:
return
# if the /usr/sbin/policy-rc.d already exist we back it up
if self.backup_dir:
try:
shutil.move('/usr/sbin/policy-rc.d', self.backup_dir)
except Exception:
self.m.fail_json(msg="Fail to move /usr/sbin/policy-rc.d to %s" % self.backup_dir)
# we write /usr/sbin/policy-rc.d so it always exit with code policy_rc_d
try:
with open('/usr/sbin/policy-rc.d', 'w') as policy_rc_d:
policy_rc_d.write('#!/bin/sh\nexit %d\n' % self.m.params['policy_rc_d'])
os.chmod('/usr/sbin/policy-rc.d', 0o0755)
except Exception:
self.m.fail_json(msg="Failed to create or chmod /usr/sbin/policy-rc.d")
def __exit__(self, type, value, traceback):
"""
This method will be call when we enter the context, before we call `apt-get …`
"""
# if policy_rc_d is null then we don't need to modify policy-rc.d
if self.m.params['policy_rc_d'] is None:
return
if self.backup_dir:
# if /usr/sbin/policy-rc.d already exists before the call to __enter__
# we restore it (from the backup done in __enter__)
try:
shutil.move(os.path.join(self.backup_dir, 'policy-rc.d'),
'/usr/sbin/policy-rc.d')
os.rmdir(self.tmpdir_name)
except Exception:
self.m.fail_json(msg="Fail to move back %s to /usr/sbin/policy-rc.d"
% os.path.join(self.backup_dir, 'policy-rc.d'))
else:
# if they wheren't any /usr/sbin/policy-rc.d file before the call to __enter__
# we just remove the file
try:
os.remove('/usr/sbin/policy-rc.d')
except Exception:
self.m.fail_json(msg="Fail to remove /usr/sbin/policy-rc.d (after package manipulation)")
def package_split(pkgspec):
parts = pkgspec.split('=', 1)
version = None
if len(parts) > 1:
version = parts[1]
return parts[0], version
def package_versions(pkgname, pkg, pkg_cache):
try:
versions = set(p.version for p in pkg.versions)
except AttributeError:
# assume older version of python-apt is installed
# apt.package.Package#versions require python-apt >= 0.7.9.
pkg_cache_list = (p for p in pkg_cache.Packages if p.Name == pkgname)
pkg_versions = (p.VersionList for p in pkg_cache_list)
versions = set(p.VerStr for p in itertools.chain(*pkg_versions))
return versions
def package_version_compare(version, other_version):
try:
return apt_pkg.version_compare(version, other_version)
except AttributeError:
return apt_pkg.VersionCompare(version, other_version)
def package_status(m, pkgname, version, cache, state):
try:
# get the package from the cache, as well as the
# low-level apt_pkg.Package object which contains
# state fields not directly accessible from the
# higher-level apt.package.Package object.
pkg = cache[pkgname]
ll_pkg = cache._cache[pkgname] # the low-level package object
except KeyError:
if state == 'install':
try:
provided_packages = cache.get_providing_packages(pkgname)
if provided_packages:
is_installed = False
upgradable = False
version_ok = False
# when virtual package providing only one package, look up status of target package
if cache.is_virtual_package(pkgname) and len(provided_packages) == 1:
package = provided_packages[0]
installed, version_ok, upgradable, has_files = package_status(m, package.name, version, cache, state='install')
if installed:
is_installed = True
return is_installed, version_ok, upgradable, False
m.fail_json(msg="No package matching '%s' is available" % pkgname)
except AttributeError:
# python-apt version too old to detect virtual packages
# mark as upgradable and let apt-get install deal with it
return False, False, True, False
else:
return False, False, False, False
try:
has_files = len(pkg.installed_files) > 0
except UnicodeDecodeError:
has_files = True
except AttributeError:
has_files = False # older python-apt cannot be used to determine non-purged
try:
package_is_installed = ll_pkg.current_state == apt_pkg.CURSTATE_INSTALLED
except AttributeError: # python-apt 0.7.X has very weak low-level object
try:
# might not be necessary as python-apt post-0.7.X should have current_state property
package_is_installed = pkg.is_installed
except AttributeError:
# assume older version of python-apt is installed
package_is_installed = pkg.isInstalled
version_is_installed = package_is_installed
if version:
versions = package_versions(pkgname, pkg, cache._cache)
avail_upgrades = fnmatch.filter(versions, version)
if package_is_installed:
try:
installed_version = pkg.installed.version
except AttributeError:
installed_version = pkg.installedVersion
# check if the version is matched as well
version_is_installed = fnmatch.fnmatch(installed_version, version)
# Only claim the package is upgradable if a candidate matches the version
package_is_upgradable = False
for candidate in avail_upgrades:
if package_version_compare(candidate, installed_version) > 0:
package_is_upgradable = True
break
else:
package_is_upgradable = bool(avail_upgrades)
else:
try:
package_is_upgradable = pkg.is_upgradable
except AttributeError:
# assume older version of python-apt is installed
package_is_upgradable = pkg.isUpgradable
return package_is_installed, version_is_installed, package_is_upgradable, has_files
def expand_dpkg_options(dpkg_options_compressed):
options_list = dpkg_options_compressed.split(',')
dpkg_options = ""
for dpkg_option in options_list:
dpkg_options = '%s -o "Dpkg::Options::=--%s"' \
% (dpkg_options, dpkg_option)
return dpkg_options.strip()
def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
# Note: apt-get does implicit regex matching when an exact package name
# match is not found. Something like this:
# matches = [pkg.name for pkg in cache if re.match(pkgspec, pkg.name)]
# (Should also deal with the ':' for multiarch like the fnmatch code below)
#
# We have decided not to do similar implicit regex matching but might take
# a PR to add some sort of explicit regex matching:
# https://github.com/ansible/ansible-modules-core/issues/1258
new_pkgspec = []
if pkgspec:
for pkgspec_pattern in pkgspec:
pkgname_pattern, version = package_split(pkgspec_pattern)
# note that none of these chars is allowed in a (debian) pkgname
if frozenset('*?[]!').intersection(pkgname_pattern):
# handle multiarch pkgnames, the idea is that "apt*" should
# only select native packages. But "apt*:i386" should still work
if ":" not in pkgname_pattern:
# Filter the multiarch packages from the cache only once
try:
pkg_name_cache = _non_multiarch
except NameError:
pkg_name_cache = _non_multiarch = [pkg.name for pkg in cache if ':' not in pkg.name] # noqa: F841
else:
# Create a cache of pkg_names including multiarch only once
try:
pkg_name_cache = _all_pkg_names
except NameError:
pkg_name_cache = _all_pkg_names = [pkg.name for pkg in cache] # noqa: F841
matches = fnmatch.filter(pkg_name_cache, pkgname_pattern)
if not matches:
m.fail_json(msg="No package(s) matching '%s' available" % str(pkgname_pattern))
else:
new_pkgspec.extend(matches)
else:
# No wildcards in name
new_pkgspec.append(pkgspec_pattern)
return new_pkgspec
def parse_diff(output):
diff = to_native(output).splitlines()
try:
# check for start marker from aptitude
diff_start = diff.index('Resolving dependencies...')
except ValueError:
try:
# check for start marker from apt-get
diff_start = diff.index('Reading state information...')
except ValueError:
# show everything
diff_start = -1
try:
# check for end marker line from both apt-get and aptitude
diff_end = next(i for i, item in enumerate(diff) if re.match('[0-9]+ (packages )?upgraded', item))
except StopIteration:
diff_end = len(diff)
diff_start += 1
diff_end += 1
return {'prepared': '\n'.join(diff[diff_start:diff_end])}
def mark_installed_manually(m, packages):
if not packages:
return
apt_mark_cmd_path = m.get_bin_path("apt-mark")
# https://github.com/ansible/ansible/issues/40531
if apt_mark_cmd_path is None:
m.warn("Could not find apt-mark binary, not marking package(s) as manually installed.")
return
cmd = "%s manual %s" % (apt_mark_cmd_path, ' '.join(packages))
rc, out, err = m.run_command(cmd)
if APT_MARK_INVALID_OP in err or APT_MARK_INVALID_OP_DEB6 in err:
cmd = "%s unmarkauto %s" % (apt_mark_cmd_path, ' '.join(packages))
rc, out, err = m.run_command(cmd)
if rc != 0:
m.fail_json(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
def install(m, pkgspec, cache, upgrade=False, default_release=None,
install_recommends=None, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
build_dep=False, fixed=False, autoremove=False, only_upgrade=False,
allow_unauthenticated=False):
pkg_list = []
packages = ""
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
package_names = []
for package in pkgspec:
if build_dep:
# Let apt decide what to install
pkg_list.append("'%s'" % package)
continue
name, version = package_split(package)
package_names.append(name)
installed, installed_version, upgradable, has_files = package_status(m, name, version, cache, state='install')
if (not installed and not only_upgrade) or (installed and not installed_version) or (upgrade and upgradable):
pkg_list.append("'%s'" % package)
if installed_version and upgradable and version:
# This happens when the package is installed, a newer version is
# available, and the version is a wildcard that matches both
#
# We do not apply the upgrade flag because we cannot specify both
# a version and state=latest. (This behaviour mirrors how apt
# treats a version with wildcard in the package)
pkg_list.append("'%s'" % package)
packages = ' '.join(pkg_list)
if packages:
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if only_upgrade:
only_upgrade = '--only-upgrade'
else:
only_upgrade = ''
if fixed:
fixed = '--fix-broken'
else:
fixed = ''
if build_dep:
cmd = "%s -y %s %s %s %s %s build-dep %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, check_arg, packages)
else:
cmd = "%s -y %s %s %s %s %s %s install %s" % (APT_GET_CMD, dpkg_options, only_upgrade, fixed, force_yes, autoremove, check_arg, packages)
if default_release:
cmd += " -t '%s'" % (default_release,)
if install_recommends is False:
cmd += " -o APT::Install-Recommends=no"
elif install_recommends is True:
cmd += " -o APT::Install-Recommends=yes"
# install_recommends is None uses the OS default
if allow_unauthenticated:
cmd += " --allow-unauthenticated"
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
status = True
changed = True
if build_dep:
changed = APT_GET_ZERO not in out
data = dict(changed=changed, stdout=out, stderr=err, diff=diff)
if rc:
status = False
data = dict(msg="'%s' failed: %s" % (cmd, err), stdout=out, stderr=err, rc=rc)
else:
status = True
data = dict(changed=False)
if not build_dep:
mark_installed_manually(m, package_names)
return (status, data)
def get_field_of_deb(m, deb_file, field="Version"):
cmd_dpkg = m.get_bin_path("dpkg", True)
cmd = cmd_dpkg + " --field %s %s" % (deb_file, field)
rc, stdout, stderr = m.run_command(cmd)
if rc != 0:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
return to_native(stdout).strip('\n')
def install_deb(m, debs, cache, force, install_recommends, allow_unauthenticated, dpkg_options):
changed = False
deps_to_install = []
pkgs_to_install = []
for deb_file in debs.split(','):
try:
pkg = apt.debfile.DebPackage(deb_file)
pkg_name = get_field_of_deb(m, deb_file, "Package")
pkg_version = get_field_of_deb(m, deb_file, "Version")
if len(apt_pkg.get_architectures()) > 1:
pkg_arch = get_field_of_deb(m, deb_file, "Architecture")
pkg_key = "%s:%s" % (pkg_name, pkg_arch)
else:
pkg_key = pkg_name
try:
installed_pkg = apt.Cache()[pkg_key]
installed_version = installed_pkg.installed.version
if package_version_compare(pkg_version, installed_version) == 0:
# Does not need to down-/upgrade, move on to next package
continue
except Exception:
# Must not be installed, continue with installation
pass
# Check if package is installable
if not pkg.check() and not force:
m.fail_json(msg=pkg._failure_string)
# add any missing deps to the list of deps we need
# to install so they're all done in one shot
deps_to_install.extend(pkg.missing_deps)
except Exception as e:
m.fail_json(msg="Unable to install package: %s" % to_native(e))
# and add this deb to the list of packages to install
pkgs_to_install.append(deb_file)
# install the deps through apt
retvals = {}
if deps_to_install:
(success, retvals) = install(m=m, pkgspec=deps_to_install, cache=cache,
install_recommends=install_recommends,
dpkg_options=expand_dpkg_options(dpkg_options))
if not success:
m.fail_json(**retvals)
changed = retvals.get('changed', False)
if pkgs_to_install:
options = ' '.join(["--%s" % x for x in dpkg_options.split(",")])
if m.check_mode:
options += " --simulate"
if force:
options += " --force-all"
cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if "stdout" in retvals:
stdout = retvals["stdout"] + out
else:
stdout = out
if "diff" in retvals:
diff = retvals["diff"]
if 'prepared' in diff:
diff['prepared'] += '\n\n' + out
else:
diff = parse_diff(out)
if "stderr" in retvals:
stderr = retvals["stderr"] + err
else:
stderr = err
if rc == 0:
m.exit_json(changed=True, stdout=stdout, stderr=stderr, diff=diff)
else:
m.fail_json(msg="%s failed" % cmd, stdout=stdout, stderr=stderr)
else:
m.exit_json(changed=changed, stdout=retvals.get('stdout', ''), stderr=retvals.get('stderr', ''), diff=retvals.get('diff', ''))
def remove(m, pkgspec, cache, purge=False, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False):
pkg_list = []
pkgspec = expand_pkgspec_from_fnmatches(m, pkgspec, cache)
for package in pkgspec:
name, version = package_split(package)
installed, installed_version, upgradable, has_files = package_status(m, name, version, cache, state='remove')
if installed_version or (has_files and purge):
pkg_list.append("'%s'" % package)
packages = ' '.join(pkg_list)
if not packages:
m.exit_json(changed=False)
else:
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if purge:
purge = '--purge'
else:
purge = ''
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
cmd = "%s -q -y %s %s %s %s %s remove %s" % (APT_GET_CMD, dpkg_options, purge, force_yes, autoremove, check_arg, packages)
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'apt-get remove %s' failed: %s" % (packages, err), stdout=out, stderr=err, rc=rc)
m.exit_json(changed=True, stdout=out, stderr=err, diff=diff)
def cleanup(m, purge=False, force=False, operation=None,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS)):
if operation not in frozenset(['autoremove', 'autoclean']):
raise AssertionError('Expected "autoremove" or "autoclean" cleanup operation, got %s' % operation)
if force:
force_yes = '--force-yes'
else:
force_yes = ''
if purge:
purge = '--purge'
else:
purge = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
cmd = "%s -y %s %s %s %s %s" % (APT_GET_CMD, dpkg_options, purge, force_yes, operation, check_arg)
with PolicyRcD(m):
rc, out, err = m.run_command(cmd)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'apt-get %s' failed: %s" % (operation, err), stdout=out, stderr=err, rc=rc)
changed = CLEAN_OP_CHANGED_STR[operation] in out
m.exit_json(changed=changed, stdout=out, stderr=err, diff=diff)
def upgrade(m, mode="yes", force=False, default_release=None,
use_apt_get=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS), autoremove=False,
allow_unauthenticated=False,
):
if autoremove:
autoremove = '--auto-remove'
else:
autoremove = ''
if m.check_mode:
check_arg = '--simulate'
else:
check_arg = ''
apt_cmd = None
prompt_regex = None
if mode == "dist" or (mode == "full" and use_apt_get):
# apt-get dist-upgrade
apt_cmd = APT_GET_CMD
upgrade_command = "dist-upgrade %s" % (autoremove)
elif mode == "full" and not use_apt_get:
# aptitude full-upgrade
apt_cmd = APTITUDE_CMD
upgrade_command = "full-upgrade"
else:
if use_apt_get:
apt_cmd = APT_GET_CMD
upgrade_command = "upgrade --with-new-pkgs %s" % (autoremove)
else:
# aptitude safe-upgrade # mode=yes # default
apt_cmd = APTITUDE_CMD
upgrade_command = "safe-upgrade"
prompt_regex = r"(^Do you want to ignore this warning and proceed anyway\?|^\*\*\*.*\[default=.*\])"
if force:
if apt_cmd == APT_GET_CMD:
force_yes = '--force-yes'
else:
force_yes = '--assume-yes --allow-untrusted'
else:
force_yes = ''
allow_unauthenticated = '--allow-unauthenticated' if allow_unauthenticated else ''
if apt_cmd is None:
if use_apt_get:
apt_cmd = APT_GET_CMD
else:
m.fail_json(msg="Unable to find APTITUDE in path. Please make sure "
"to have APTITUDE in path or use 'force_apt_get=True'")
apt_cmd_path = m.get_bin_path(apt_cmd, required=True)
cmd = '%s -y %s %s %s %s %s' % (apt_cmd_path, dpkg_options, force_yes, allow_unauthenticated,
check_arg, upgrade_command)
if default_release:
cmd += " -t '%s'" % (default_release,)
with PolicyRcD(m):
rc, out, err = m.run_command(cmd, prompt_regex=prompt_regex)
if m._diff:
diff = parse_diff(out)
else:
diff = {}
if rc:
m.fail_json(msg="'%s %s' failed: %s" % (apt_cmd, upgrade_command, err), stdout=out, rc=rc)
if (apt_cmd == APT_GET_CMD and APT_GET_ZERO in out) or (apt_cmd == APTITUDE_CMD and APTITUDE_ZERO in out):
m.exit_json(changed=False, msg=out, stdout=out, stderr=err)
m.exit_json(changed=True, msg=out, stdout=out, stderr=err, diff=diff)
def get_cache_mtime():
"""Return mtime of a valid apt cache file.
Stat the apt cache file and if no cache file is found return 0
:returns: ``int``
"""
cache_time = 0
if os.path.exists(APT_UPDATE_SUCCESS_STAMP_PATH):
cache_time = os.stat(APT_UPDATE_SUCCESS_STAMP_PATH).st_mtime
elif os.path.exists(APT_LISTS_PATH):
cache_time = os.stat(APT_LISTS_PATH).st_mtime
return cache_time
def get_updated_cache_time():
"""Return the mtime time stamp and the updated cache time.
Always retrieve the mtime of the apt cache or set the `cache_mtime`
variable to 0
:returns: ``tuple``
"""
cache_mtime = get_cache_mtime()
mtimestamp = datetime.datetime.fromtimestamp(cache_mtime)
updated_cache_time = int(time.mktime(mtimestamp.timetuple()))
return mtimestamp, updated_cache_time
# https://github.com/ansible/ansible-modules-core/issues/2951
def get_cache(module):
'''Attempt to get the cache object and update till it works'''
cache = None
try:
cache = apt.Cache()
except SystemError as e:
if '/var/lib/apt/lists/' in to_native(e).lower():
# update cache until files are fixed or retries exceeded
retries = 0
while retries < 2:
(rc, so, se) = module.run_command(['apt-get', 'update', '-q'])
retries += 1
if rc == 0:
break
if rc != 0:
module.fail_json(msg='Updating the cache to correct corrupt package lists failed:\n%s\n%s' % (to_native(e), so + se), rc=rc)
# try again
cache = apt.Cache()
else:
module.fail_json(msg=to_native(e))
return cache
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'build-dep', 'fixed', 'latest', 'present']),
update_cache=dict(type='bool', aliases=['update-cache']),
cache_valid_time=dict(type='int', default=0),
purge=dict(type='bool', default=False),
package=dict(type='list', aliases=['pkg', 'name']),
deb=dict(type='path'),
default_release=dict(type='str', aliases=['default-release']),
install_recommends=dict(type='bool', aliases=['install-recommends']),
force=dict(type='bool', default=False),
upgrade=dict(type='str', choices=['dist', 'full', 'no', 'safe', 'yes']),
dpkg_options=dict(type='str', default=DPKG_OPTIONS),
autoremove=dict(type='bool', default=False),
autoclean=dict(type='bool', default=False),
policy_rc_d=dict(type='int', default=None),
only_upgrade=dict(type='bool', default=False),
force_apt_get=dict(type='bool', default=False),
allow_unauthenticated=dict(type='bool', default=False, aliases=['allow-unauthenticated']),
),
mutually_exclusive=[['deb', 'package', 'upgrade']],
required_one_of=[['autoremove', 'deb', 'package', 'update_cache', 'upgrade']],
supports_check_mode=True,
)
module.run_command_environ_update = APT_ENV_VARS
if not HAS_PYTHON_APT:
if module.check_mode:
module.fail_json(msg="%s must be installed to use check mode. "
"If run normally this module can auto-install it." % PYTHON_APT)
try:
module.warn("Updating cache and auto-installing missing dependency: %s" % PYTHON_APT)
module.run_command(['apt-get', 'update'], check_rc=True)
module.run_command(['apt-get', 'install', '--no-install-recommends', PYTHON_APT, '-y', '-q'], check_rc=True)
global apt, apt_pkg
import apt
import apt.debfile
import apt_pkg
except ImportError:
module.fail_json(msg="Could not import python modules: apt, apt_pkg. "
"Please install %s package." % PYTHON_APT)
global APTITUDE_CMD
APTITUDE_CMD = module.get_bin_path("aptitude", False)
global APT_GET_CMD
APT_GET_CMD = module.get_bin_path("apt-get")
p = module.params
if p['upgrade'] == 'no':
p['upgrade'] = None
use_apt_get = p['force_apt_get']
if not use_apt_get and not APTITUDE_CMD:
module.warn("Could not find aptitude. Using apt-get instead")
use_apt_get = True
updated_cache = False
updated_cache_time = 0
install_recommends = p['install_recommends']
allow_unauthenticated = p['allow_unauthenticated']
dpkg_options = expand_dpkg_options(p['dpkg_options'])
autoremove = p['autoremove']
autoclean = p['autoclean']
# Get the cache object
cache = get_cache(module)
try:
if p['default_release']:
try:
apt_pkg.config['APT::Default-Release'] = p['default_release']
except AttributeError:
apt_pkg.Config['APT::Default-Release'] = p['default_release']
# reopen cache w/ modified config
cache.open(progress=None)
mtimestamp, updated_cache_time = get_updated_cache_time()
# Cache valid time is default 0, which will update the cache if
# needed and `update_cache` was set to true
updated_cache = False
if p['update_cache'] or p['cache_valid_time']:
now = datetime.datetime.now()
tdelta = datetime.timedelta(seconds=p['cache_valid_time'])
if not mtimestamp + tdelta >= now:
# Retry to update the cache up to 3 times
err = ''
for retry in range(3):
try:
cache.update()
break
except apt.cache.FetchFailedException as e:
err = to_native(e)
else:
module.fail_json(msg='Failed to update apt cache: %s' % err)
cache.open(progress=None)
mtimestamp, post_cache_update_time = get_updated_cache_time()
if updated_cache_time != post_cache_update_time:
updated_cache = True
updated_cache_time = post_cache_update_time
# If there is nothing else to do exit. This will set state as
# changed based on if the cache was updated.
if not p['package'] and not p['upgrade'] and not p['deb']:
module.exit_json(
changed=updated_cache,
cache_updated=updated_cache,
cache_update_time=updated_cache_time
)
force_yes = p['force']
if p['upgrade']:
upgrade(module, p['upgrade'], force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, allow_unauthenticated)
if p['deb']:
if p['state'] != 'present':
module.fail_json(msg="deb only supports state=present")
if '://' in p['deb']:
p['deb'] = fetch_file(module, p['deb'])
install_deb(module, p['deb'], cache,
install_recommends=install_recommends,
allow_unauthenticated=allow_unauthenticated,
force=force_yes, dpkg_options=p['dpkg_options'])
unfiltered_packages = p['package'] or ()
packages = [package.strip() for package in unfiltered_packages if package != '*']
all_installed = '*' in unfiltered_packages
latest = p['state'] == 'latest'
if latest and all_installed:
if packages:
module.fail_json(msg='unable to install additional packages when upgrading all installed packages')
upgrade(module, 'yes', force_yes, p['default_release'], use_apt_get, dpkg_options, autoremove, allow_unauthenticated)
if packages:
for package in packages:
if package.count('=') > 1:
module.fail_json(msg="invalid package spec: %s" % package)
if latest and '=' in package:
module.fail_json(msg='version number inconsistent with state=latest: %s' % package)
if not packages:
if autoclean:
cleanup(module, p['purge'], force=force_yes, operation='autoclean', dpkg_options=dpkg_options)
if autoremove:
cleanup(module, p['purge'], force=force_yes, operation='autoremove', dpkg_options=dpkg_options)
if p['state'] in ('latest', 'present', 'build-dep', 'fixed'):
state_upgrade = False
state_builddep = False
state_fixed = False
if p['state'] == 'latest':
state_upgrade = True
if p['state'] == 'build-dep':
state_builddep = True
if p['state'] == 'fixed':
state_fixed = True
success, retvals = install(
module,
packages,
cache,
upgrade=state_upgrade,
default_release=p['default_release'],
install_recommends=install_recommends,
force=force_yes,
dpkg_options=dpkg_options,
build_dep=state_builddep,
fixed=state_fixed,
autoremove=autoremove,
only_upgrade=p['only_upgrade'],
allow_unauthenticated=allow_unauthenticated
)
# Store if the cache has been updated
retvals['cache_updated'] = updated_cache
# Store when the update time was last
retvals['cache_update_time'] = updated_cache_time
if success:
module.exit_json(**retvals)
else:
module.fail_json(**retvals)
elif p['state'] == 'absent':
remove(module, packages, cache, p['purge'], force=force_yes, dpkg_options=dpkg_options, autoremove=autoremove)
except apt.cache.LockFailedException:
module.fail_json(msg="Failed to lock apt for exclusive operation")
except apt.cache.FetchFailedException:
module.fail_json(msg="Could not fetch updated apt files")
if __name__ == "__main__":
main()
|
Stibbons/pyyaml
|
refs/heads/master
|
lib3/yaml/cyaml.py
|
274
|
__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
'CBaseDumper', 'CSafeDumper', 'CDumper']
from _yaml import CParser, CEmitter
from .constructor import *
from .serializer import *
from .representer import *
from .resolver import *
class CBaseLoader(CParser, BaseConstructor, BaseResolver):
def __init__(self, stream):
CParser.__init__(self, stream)
BaseConstructor.__init__(self)
BaseResolver.__init__(self)
class CSafeLoader(CParser, SafeConstructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
SafeConstructor.__init__(self)
Resolver.__init__(self)
class CLoader(CParser, Constructor, Resolver):
def __init__(self, stream):
CParser.__init__(self, stream)
Constructor.__init__(self)
Resolver.__init__(self)
class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
SafeRepresenter.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
class CDumper(CEmitter, Serializer, Representer, Resolver):
def __init__(self, stream,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
CEmitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width, encoding=encoding,
allow_unicode=allow_unicode, line_break=line_break,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
Representer.__init__(self, default_style=default_style,
default_flow_style=default_flow_style)
Resolver.__init__(self)
|
kollibar/spipStatic
|
refs/heads/master
|
spipStatic.py
|
1
|
#! /usr/bin/env python3
import csv,re,os.path
from collections import OrderedDict
from postTraitement import postTraitement,traitementSecurite
from balises import LISTE_BALISES,baliseGenerique
def loadDataCSV(fichier):
fichierData=open(FICHIER,'r')
dataCsv=csv.reader(fichierData,dialect=csv.Sniffer)
entete=None
data=[]
for ligne in dataCsv:
if entete==None:
entete=ligne
else:
dictLigne={}
if len(ligne)>len(entete):
raise IOError('ligne d\'entete comprenant moins de données que certaines lignes')
for i in range(len(ligne)):
dictLigne[entete[i]]=ligne[i]
data.append(dictLigne)
fichierData.close()
return data
FICHIER='./data.csv'
MODELE='./modele.html'
data=loadDataCSV(FICHIER)
fModele=open(MODELE,'r').read()
# Lecture du fichier modele et extraction de la liste de variable (format #VARIABLE)
listeComplete=re.findall('#[A-Z_][A-Z_0-9]*',fModele)
listeVariable=listeComplete
##for variable in listeComplete:
## if variable not in listeVariable:
## listeVariable.append(variable)
### determine la position de chaque variable
##dictPos=OrderedDict()
##i=0
##for var in listeComplete:
## i=fModele.find(var,i)
## dictPos[var]=i
## i+=1
for ligne in data:
valid=True
if 'invisible' in ligne.keys():
if ligne['invisible'].lower in ['true','x']:
valid=False
#vérifie la validité du nom de fichier
if 'nom_fichier' not in ligne.keys():
valid=False
else:
if ligne['nom_fichier']=='':
valid=False
if valid: # si la ligne est valide, traitement
fichier=fModele
i=0
# remplacement de la liste des variables
varNum=0
while varNum<len(listeVariable):
var=listeVariable[varNum]
n=1
j=fichier.find(var,i)
if fichier[j+len(var)]=='{':
k=fichier.find('}',j+len(var))
param=tuple(fichier[j+len(var)+1:k-1].split(','))
else:
k=j+len(var)
param=None
tv=0 # tv=0 -> traitement normal, tv=1 -> sans postTraitement, tv=2 -> sans traitement (ni sécu, ni postTraitement)
if fichier[j+len(var)]=='*':
if fichier[j+len(var)+1]=='*':
tv=2
else:
tv=1
k+=tv
# recherche de la valeur de la balise
if var[1:].lower() in ligne.keys(): # variable trouvé dans la ligne en cours
valeur=ligne[var[1:].lower()]
elif var[1:].lower() in LISTE_BALISES:
if param==None:
valeur=baliseGenerique(var[1:].lower())
else:
valeur=baliseGenerique(var[1:].lower(),*args)
else:
valeur=var # si pas trouvé, pas de remplacement
# application des traitements sur la balise (traitement sécurité et postTraitement)
if tv<2:
valeur=traitementSecurite(valeur)
if tv<1:
valeur=postTraitement(valeur)
if fichier[j+len(var)]=='|': # ajout des filtres
pass # A FAIRE
if fichier[j-1]=='(': #TRAITEMENT DES BLOCS [ (#BALISE) ]
deb=fichier.rfind('[',0,j)
d=fichier.rfind(']',0,j)
while d>deb:
deb=fichier.rfind('[',0,deb)
d=fichier.rfind(']',0,d)
if deb==-1:
raise IOError('erreur [] [ manquant')
fin=fichier.find(']',k)
f=fichier.find('[',k)
while f<fin and f!=-1:
fin=fichier.find(']',fin)
f=fichier.find('[',f)
if fin==-1:
raise IOError('erreur [] ] manquant')
if valeur!='': # si pas vide, on garde le bloc
i=j+len(valeur)-2 # actualisation position debut recherche prochaine balise
valeur=fichier[deb+1:j-1]+valeur+fichier[k+1:fin]
else: # si vide on supprime le bloc
i=deb # actualisation position debut recherche prochaine balise
# verifie la présence de balise dans la zone à supprimer et incrémenter d'autant n pour les sauter !!
if varNum+1<len(listeVariable):
while fichier.find(listeVariable[varNum+n],k,fin)!=-1:
n+=1
if varNum+n==len(listeVariable):
break
j=deb
k=fin+1
else: # si pas de bloc
i=j+len(valeur) # actualisation position debut recherche prochaine balise
#calcul du decalage de position des variables suivantes
decalage=j+len(valeur)-k
# modifie la variable
fichier=fichier[:j]+valeur+fichier[k:]
varNum+=n
## #actualise la position de toutes les variables
## for elt in dictPos.keys():
## if dictPos[elt]>=k:
## dictPos[elt]+=decalage
# enregistrement du fichier
print(ligne['nom_fichier'])
fCree=open(ligne['nom_fichier'],'w')
fCree.write(fichier)
fCree.close()
|
lightcn/odoo
|
refs/heads/8.0
|
openerp/addons/test_convert/__openerp__.py
|
437
|
{
'name': 'test_convert',
'description': "Data for xml conversion tests",
'version': '0.0.1',
}
|
eerimoq/pumbaa
|
refs/heads/master
|
examples/ssl_client/client.py
|
6
|
#!/usr/bin/env python3
import sys
import socket
import ssl
import pprint
server_sock = socket.socket()
server_sock.connect((sys.argv[1], 10023))
# Require a certificate from the server. We used a self-signed
# certificate so here ca_certs must be the server certificate itself.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations("server.crt")
ssl_server_sock = context.wrap_socket(server_sock)
print(repr(ssl_server_sock.getpeername()))
print(ssl_server_sock.cipher())
print(pprint.pformat(ssl_server_sock.getpeercert()))
print('write: Hello!')
ssl_server_sock.send(b"Hello!")
print('read:', ssl_server_sock.recv(8))
|
Dave667/service
|
refs/heads/master
|
plugin.video.planeta-online.tv/resources/lib/addon.py
|
2
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2011 XBMC-Russia, HD-lab Team, E-mail: dev@hd-lab.ru
# Writer (c) 2011, Kostynoy S.A., E-mail: seppius2@gmail.com
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/licenses/gpl.html
import sys, xbmc, xbmcgui, xbmcplugin, xbmcaddon
import os, urllib, urllib2, re
import html5lib
from html5lib import treebuilders
__addon__ = xbmcaddon.Addon( id = 'plugin.video.planeta-online.tv' )
__language__ = __addon__.getLocalizedString
addon_id = __addon__.getAddonInfo( 'id' )
#addon_path = __addon__.getAddonInfo( 'path' )
addon_name = __addon__.getAddonInfo( 'name' )
addon_version = __addon__.getAddonInfo( 'version' )
#addon_author = __addon__.getAddonInfo( 'author' )
#icon = xbmc.translatePath( os.path.join( addon_path, 'icon.png' ) )
xbmc.log('[%s] Starting version [%s] "%s"' % (addon_id, addon_version, addon_name), 1)
h = int(sys.argv[1])
def GET(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Opera/9.80 (X11; Linux i686; U; ru) Presto/2.7.62 Version/11.00')
req.add_header('Accept', 'text/html, application/xml, application/xhtml+xml, */*')
req.add_header('Accept-Language', 'ru,en;q=0.9')
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
def getitems(params):
http = GET('http://www.planeta-online.tv/')
if http != None:
DT = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder('dom')).parse(http)
for div0 in DT.getElementsByTagName('div'):
if div0.getAttribute('id') == 'mainChannelList':
for div1 in div0.getElementsByTagName('a'):
if div1.getAttribute('class') == 'tip_trigger chA':
title = None
img = None
for child in div1.childNodes:
if child.nodeType == child.TEXT_NODE:
title = child.data.encode('utf8')
else:
for imgs in child.getElementsByTagName('img'):
img = 'http://www.planeta-online.tv%s' % imgs.getAttribute('src').encode('utf8')
if title and img:
uri = '%s?%s' % (sys.argv[0], urllib.urlencode({'func':'play', 'href':div1.getAttribute('href')}))
i = xbmcgui.ListItem(title, iconImage = img, thumbnailImage = img)
i.setProperty('IsPlayable', 'true')
xbmcplugin.addDirectoryItem(h, uri, i)
xbmcplugin.endOfDirectory(h)
def play(params):
url = 'http://www.planeta-online.tv%s' % params['href']
response = GET(url)
SWFObject = 'http://www.planeta-online.tv%s' % re.compile('embedSWF\("(.*?)"').findall(response)[0]
flashvars = re.compile('.*?:"(.*?)"').findall(re.compile('var flashvars = \{(.*?)\};', re.DOTALL).findall(response)[0].replace(',',',\n'))
for fval in flashvars:
if fval.startswith('rtmp://'):
xbmcplugin.setResolvedUrl(h, True, xbmcgui.ListItem(path = '%s swfurl=%s pageUrl=%s swfVfy=True Live=True' % (fval, SWFObject, url)))
return True
def get_params(paramstring):
param=[]
if len(paramstring)>=2:
params=paramstring
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
if len(param) > 0:
for cur in param:
param[cur] = urllib.unquote_plus(param[cur])
return param
def addon_main():
params = get_params(sys.argv[2])
try:
func = params['func']
except:
func = None
xbmc.log( '[%s]: Primary input' % addon_id, 1 )
getitems(params)
if func != None:
try: pfunc = globals()[func]
except:
pfunc = None
xbmc.log( '[%s]: Function "%s" not found' % (addon_id, func), 4 )
showMessage('Internal addon error', 'Function "%s" not found' % func, 2000)
if pfunc: pfunc(params)
|
nwinkler/thefuck
|
refs/heads/master
|
tests/rules/test_no_command.py
|
2
|
from mock import patch, Mock
from thefuck.rules.no_command import match, get_new_command
def test_match():
with patch('thefuck.rules.no_command._get_all_callables',
return_value=['vim', 'apt-get']):
assert match(Mock(stderr='vom: not found', script='vom file.py'), None)
assert not match(Mock(stderr='qweqwe: not found', script='qweqwe'), None)
assert not match(Mock(stderr='some text', script='vom file.py'), None)
def test_get_new_command():
with patch('thefuck.rules.no_command._get_all_callables',
return_value=['vim', 'apt-get']):
assert get_new_command(
Mock(stderr='vom: not found',
script='vom file.py'),
None) == 'vim file.py'
|
enjaz/enjaz
|
refs/heads/master
|
studentvoice/admin.py
|
2
|
# -*- coding: utf-8 -*-
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User, Permission, Group
from django.core.exceptions import ObjectDoesNotExist
from studentvoice.models import Voice, Vote, Recipient, Response, View
class ResponseInline(admin.StackedInline):
model = Response
max_num = 1
extra = 0
class VoiceInline(admin.StackedInline):
model = Voice
verbose_name = u"تعليق"
verbose_name_plural = u"التعليقات"
extra = 0
class ResponseFilter(admin.SimpleListFilter):
title = u"الردود"
parameter_name = 'responses'
def lookups(self, request, model_admin):
return (
('1', u'له رد'),
('0', u'ليس له رد'),
)
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(response__isnull=False)
if self.value() == '0':
return queryset.exclude(response__isnull=False)
class PublishedFilter(admin.SimpleListFilter):
title = u"النشر"
parameter_name = 'published'
def lookups(self, request, model_admin):
return (
('P', u'منشور'),
('U', u'لم يراجع بعد'),
('D', u'محذوف'),
)
def queryset(self, request, queryset):
if self.value() == 'P':
return queryset.filter(is_published=True)
elif self.value() == 'U':
return queryset.filter(is_published=None)
elif self.value() == 'D':
return queryset.filter(is_published=False)
class RevisionFilter(admin.SimpleListFilter):
"""For those that were submitted by an anonymous user, and pending
revision."""
title = u"التعليقات"
parameter_name = 'comment'
def lookups(self, request, model_admin):
return (
('1', u'تعليقات'),
('0', u'مواضيع'),
)
def queryset(self, request, queryset):
if self.value() == '1':
return queryset.filter(parent__isnull=False)
if self.value() == '0':
return queryset.filter(parent__isnull=True)
class VoiceAdmin(admin.ModelAdmin):
list_display = ('get_title', 'get_full_name', 'submission_date', 'edit_date',
'is_published', 'is_editable', 'score',
'number_of_views', 'is_reported')
list_filter = [ResponseFilter, PublishedFilter, RevisionFilter]
inlines = [ResponseInline, VoiceInline]
def get_title(self, obj):
if obj.title:
return obj.title
else:
greatest_parent = obj.get_greatest_parent()
return u"تعليق على %s" % greatest_parent.title
get_title.short_description = u"العنوان"
def get_full_name(self, obj):
try:
return obj.submitter.common_profile.get_ar_full_name()
except ObjectDoesNotExist:
return obj.submitter.username
get_full_name.short_description = u"المستخدم"
admin.site.register(Voice, VoiceAdmin)
admin.site.register(Recipient)
admin.site.register(Response)
admin.site.register(Vote)
admin.site.register(View)
|
fitermay/intellij-community
|
refs/heads/master
|
python/testData/refactoring/extractmethod/NameCollisionFile.before.py
|
83
|
def hello():
pass
<selection>print("Hello")</selection>
|
bstando/limbo-android
|
refs/heads/master
|
jni/qemu/roms/seabios/tools/checkstack.py
|
42
|
#!/usr/bin/env python
# Script that tries to find how much stack space each function in an
# object is using.
#
# Copyright (C) 2008 Kevin O'Connor <kevin@koconnor.net>
#
# This file may be distributed under the terms of the GNU GPLv3 license.
# Usage:
# objdump -m i386 -M i8086 -M suffix -d out/rom16.o | tools/checkstack.py
import sys
import re
# Functions that change stacks
STACKHOP = ['__send_disk_op']
# List of functions we can assume are never called.
#IGNORE = ['panic', '__dprintf']
IGNORE = ['panic']
OUTPUTDESC = """
#funcname1[preamble_stack_usage,max_usage_with_callers]:
# insn_addr:called_function [usage_at_call_point+caller_preamble,total_usage]
#
#funcname2[p,m,max_usage_to_yield_point]:
# insn_addr:called_function [u+c,t,usage_to_yield_point]
"""
# Find out maximum stack usage for a function
def calcmaxstack(funcs, funcaddr):
info = funcs[funcaddr]
# Find max of all nested calls.
maxusage = info[1]
maxyieldusage = doesyield = 0
if info[3] is not None:
maxyieldusage = info[3]
doesyield = 1
info[2] = maxusage
info[4] = info[3]
seenbefore = {}
totcalls = 0
for insnaddr, calladdr, usage in info[6]:
callinfo = funcs.get(calladdr)
if callinfo is None:
continue
if callinfo[2] is None:
calcmaxstack(funcs, calladdr)
if callinfo[0] not in seenbefore:
seenbefore[callinfo[0]] = 1
totcalls += 1 + callinfo[5]
funcnameroot = callinfo[0].split('.')[0]
if funcnameroot in IGNORE:
# This called function is ignored - don't contribute it to
# the max stack.
continue
if funcnameroot in STACKHOP:
if usage > maxusage:
maxusage = usage
if callinfo[4] is not None:
doesyield = 1
if usage > maxyieldusage:
maxyieldusage = usage
continue
totusage = usage + callinfo[2]
if totusage > maxusage:
maxusage = totusage
if callinfo[4] is not None:
doesyield = 1
totyieldusage = usage + callinfo[4]
if totyieldusage > maxyieldusage:
maxyieldusage = totyieldusage
info[2] = maxusage
if doesyield:
info[4] = maxyieldusage
info[5] = totcalls
# Try to arrange output so that functions that call each other are
# near each other.
def orderfuncs(funcaddrs, availfuncs):
l = [(availfuncs[funcaddr][5], availfuncs[funcaddr][0], funcaddr)
for funcaddr in funcaddrs if funcaddr in availfuncs]
l.sort()
l.reverse()
out = []
while l:
count, name, funcaddr = l.pop(0)
if funcaddr not in availfuncs:
continue
calladdrs = [calls[1] for calls in availfuncs[funcaddr][6]]
del availfuncs[funcaddr]
out = out + orderfuncs(calladdrs, availfuncs) + [funcaddr]
return out
# Update function info with a found "yield" point.
def noteYield(info, stackusage):
prevyield = info[3]
if prevyield is None or prevyield < stackusage:
info[3] = stackusage
# Update function info with a found "call" point.
def noteCall(info, subfuncs, insnaddr, calladdr, stackusage):
if (calladdr, stackusage) in subfuncs:
# Already noted a nearly identical call - ignore this one.
return
info[6].append((insnaddr, calladdr, stackusage))
subfuncs[(calladdr, stackusage)] = 1
hex_s = r'[0-9a-f]+'
re_func = re.compile(r'^(?P<funcaddr>' + hex_s + r') <(?P<func>.*)>:$')
re_asm = re.compile(
r'^[ ]*(?P<insnaddr>' + hex_s
+ r'):\t.*\t(addr32 )?(?P<insn>.+?)[ ]*((?P<calladdr>' + hex_s
+ r') <(?P<ref>.*)>)?$')
re_usestack = re.compile(
r'^(push[f]?[lw])|(sub.* [$](?P<num>0x' + hex_s + r'),%esp)$')
def calc():
# funcs[funcaddr] = [funcname, basicstackusage, maxstackusage
# , yieldusage, maxyieldusage, totalcalls
# , [(insnaddr, calladdr, stackusage), ...]]
funcs = {-1: ['<indirect>', 0, 0, None, None, 0, []]}
cur = None
atstart = 0
stackusage = 0
# Parse input lines
for line in sys.stdin.readlines():
m = re_func.match(line)
if m is not None:
# Found function
funcaddr = int(m.group('funcaddr'), 16)
funcs[funcaddr] = cur = [m.group('func'), 0, None, None, None, 0, []]
stackusage = 0
atstart = 1
subfuncs = {}
continue
m = re_asm.match(line)
if m is not None:
insn = m.group('insn')
im = re_usestack.match(insn)
if im is not None:
if insn.startswith('pushl') or insn.startswith('pushfl'):
stackusage += 4
continue
elif insn.startswith('pushw') or insn.startswith('pushfw'):
stackusage += 2
continue
stackusage += int(im.group('num'), 16)
if atstart:
if '%esp' in insn or insn.startswith('leal'):
# Still part of initial header
continue
cur[1] = stackusage
atstart = 0
insnaddr = m.group('insnaddr')
calladdr = m.group('calladdr')
if calladdr is None:
if insn.startswith('lcallw'):
noteCall(cur, subfuncs, insnaddr, -1, stackusage + 4)
noteYield(cur, stackusage + 4)
elif insn.startswith('int'):
noteCall(cur, subfuncs, insnaddr, -1, stackusage + 6)
noteYield(cur, stackusage + 6)
elif insn.startswith('sti'):
noteYield(cur, stackusage)
else:
# misc instruction
continue
else:
# Jump or call insn
calladdr = int(calladdr, 16)
ref = m.group('ref')
if '+' in ref:
# Inter-function jump.
pass
elif insn.startswith('j'):
# Tail call
noteCall(cur, subfuncs, insnaddr, calladdr, 0)
elif insn.startswith('calll'):
noteCall(cur, subfuncs, insnaddr, calladdr, stackusage + 4)
else:
print "unknown call", ref
noteCall(cur, subfuncs, insnaddr, calladdr, stackusage)
# Reset stack usage to preamble usage
stackusage = cur[1]
#print "other", repr(line)
# Calculate maxstackusage
for funcaddr, info in funcs.items():
if info[2] is not None:
continue
calcmaxstack(funcs, funcaddr)
# Sort functions for output
funcaddrs = orderfuncs(funcs.keys(), funcs.copy())
# Show all functions
print OUTPUTDESC
for funcaddr in funcaddrs:
name, basicusage, maxusage, yieldusage, maxyieldusage, count, calls = \
funcs[funcaddr]
if maxusage == 0 and maxyieldusage is None:
continue
yieldstr = ""
if maxyieldusage is not None:
yieldstr = ",%d" % maxyieldusage
print "\n%s[%d,%d%s]:" % (name, basicusage, maxusage, yieldstr)
for insnaddr, calladdr, stackusage in calls:
callinfo = funcs.get(calladdr, ("<unknown>", 0, 0, 0, None))
yieldstr = ""
if callinfo[4] is not None:
yieldstr = ",%d" % (stackusage + callinfo[4])
print " %04s:%-40s [%d+%d,%d%s]" % (
insnaddr, callinfo[0], stackusage, callinfo[1]
, stackusage+callinfo[2], yieldstr)
def main():
calc()
if __name__ == '__main__':
main()
|
newemailjdm/scipy
|
refs/heads/master
|
scipy/io/mmio.py
|
63
|
"""
Matrix Market I/O in Python.
See http://math.nist.gov/MatrixMarket/formats.html
for information about the Matrix Market format.
"""
#
# Author: Pearu Peterson <pearu@cens.ioc.ee>
# Created: October, 2004
#
# References:
# http://math.nist.gov/MatrixMarket/
#
from __future__ import division, print_function, absolute_import
import os
import sys
from numpy import (asarray, real, imag, conj, zeros, ndarray, concatenate,
ones, ascontiguousarray, vstack, savetxt, fromfile,
fromstring)
from numpy.compat import asbytes, asstr
from scipy._lib.six import string_types
from scipy.sparse import coo_matrix, isspmatrix
__all__ = ['mminfo', 'mmread', 'mmwrite', 'MMFile']
# -----------------------------------------------------------------------------
def mminfo(source):
"""
Return size and storage parameters from Matrix Market file-like 'source'.
Parameters
----------
source : str or file-like
Matrix Market filename (extension .mtx) or open file-like object
Returns
-------
rows : int
Number of matrix rows.
cols : int
Number of matrix columns.
entries : int
Number of non-zero entries of a sparse matrix
or rows*cols for a dense matrix.
format : str
Either 'coordinate' or 'array'.
field : str
Either 'real', 'complex', 'pattern', or 'integer'.
symmetry : str
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
"""
return MMFile.info(source)
# -----------------------------------------------------------------------------
def mmread(source):
"""
Reads the contents of a Matrix Market file-like 'source' into a matrix.
Parameters
----------
source : str or file-like
Matrix Market filename (extensions .mtx, .mtz.gz)
or open file-like object.
Returns
-------
a : ndarray or coo_matrix
Dense or sparse matrix depending on the matrix format in the
Matrix Market file.
"""
return MMFile().read(source)
# -----------------------------------------------------------------------------
def mmwrite(target, a, comment='', field=None, precision=None, symmetry=None):
"""
Writes the sparse or dense array `a` to Matrix Market file-like `target`.
Parameters
----------
target : str or file-like
Matrix Market filename (extension .mtx) or open file-like object.
a : array like
Sparse or dense 2D array.
comment : str, optional
Comments to be prepended to the Matrix Market file.
field : None or str, optional
Either 'real', 'complex', 'pattern', or 'integer'.
precision : None or int, optional
Number of digits to display for real or complex values.
symmetry : None or str, optional
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
If symmetry is None the symmetry type of 'a' is determined by its
values.
"""
MMFile().write(target, a, comment, field, precision, symmetry)
###############################################################################
class MMFile (object):
__slots__ = ('_rows',
'_cols',
'_entries',
'_format',
'_field',
'_symmetry')
@property
def rows(self):
return self._rows
@property
def cols(self):
return self._cols
@property
def entries(self):
return self._entries
@property
def format(self):
return self._format
@property
def field(self):
return self._field
@property
def symmetry(self):
return self._symmetry
@property
def has_symmetry(self):
return self._symmetry in (self.SYMMETRY_SYMMETRIC,
self.SYMMETRY_SKEW_SYMMETRIC,
self.SYMMETRY_HERMITIAN)
# format values
FORMAT_COORDINATE = 'coordinate'
FORMAT_ARRAY = 'array'
FORMAT_VALUES = (FORMAT_COORDINATE, FORMAT_ARRAY)
@classmethod
def _validate_format(self, format):
if format not in self.FORMAT_VALUES:
raise ValueError('unknown format type %s, must be one of %s' %
(format, self.FORMAT_VALUES))
# field values
FIELD_INTEGER = 'integer'
FIELD_REAL = 'real'
FIELD_COMPLEX = 'complex'
FIELD_PATTERN = 'pattern'
FIELD_VALUES = (FIELD_INTEGER, FIELD_REAL, FIELD_COMPLEX, FIELD_PATTERN)
@classmethod
def _validate_field(self, field):
if field not in self.FIELD_VALUES:
raise ValueError('unknown field type %s, must be one of %s' %
(field, self.FIELD_VALUES))
# symmetry values
SYMMETRY_GENERAL = 'general'
SYMMETRY_SYMMETRIC = 'symmetric'
SYMMETRY_SKEW_SYMMETRIC = 'skew-symmetric'
SYMMETRY_HERMITIAN = 'hermitian'
SYMMETRY_VALUES = (SYMMETRY_GENERAL, SYMMETRY_SYMMETRIC,
SYMMETRY_SKEW_SYMMETRIC, SYMMETRY_HERMITIAN)
@classmethod
def _validate_symmetry(self, symmetry):
if symmetry not in self.SYMMETRY_VALUES:
raise ValueError('unknown symmetry type %s, must be one of %s' %
(symmetry, self.SYMMETRY_VALUES))
DTYPES_BY_FIELD = {FIELD_INTEGER: 'i',
FIELD_REAL: 'd',
FIELD_COMPLEX: 'D',
FIELD_PATTERN: 'd'}
# -------------------------------------------------------------------------
@staticmethod
def reader():
pass
# -------------------------------------------------------------------------
@staticmethod
def writer():
pass
# -------------------------------------------------------------------------
@classmethod
def info(self, source):
"""
Return size, storage parameters from Matrix Market file-like 'source'.
Parameters
----------
source : str or file-like
Matrix Market filename (extension .mtx) or open file-like object
Returns
-------
rows : int
Number of matrix rows.
cols : int
Number of matrix columns.
entries : int
Number of non-zero entries of a sparse matrix
or rows*cols for a dense matrix.
format : str
Either 'coordinate' or 'array'.
field : str
Either 'real', 'complex', 'pattern', or 'integer'.
symmetry : str
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
"""
stream, close_it = self._open(source)
try:
# read and validate header line
line = stream.readline()
mmid, matrix, format, field, symmetry = \
[asstr(part.strip()) for part in line.split()]
if not mmid.startswith('%%MatrixMarket'):
raise ValueError('source is not in Matrix Market format')
if not matrix.lower() == 'matrix':
raise ValueError("Problem reading file header: " + line)
# http://math.nist.gov/MatrixMarket/formats.html
if format.lower() == 'array':
format = self.FORMAT_ARRAY
elif format.lower() == 'coordinate':
format = self.FORMAT_COORDINATE
# skip comments
while line.startswith(b'%'):
line = stream.readline()
line = line.split()
if format == self.FORMAT_ARRAY:
if not len(line) == 2:
raise ValueError("Header line not of length 2: " + line)
rows, cols = map(int, line)
entries = rows * cols
else:
if not len(line) == 3:
raise ValueError("Header line not of length 3: " + line)
rows, cols, entries = map(int, line)
return (rows, cols, entries, format, field.lower(),
symmetry.lower())
finally:
if close_it:
stream.close()
# -------------------------------------------------------------------------
@staticmethod
def _open(filespec, mode='rb'):
""" Return an open file stream for reading based on source.
If source is a file name, open it (after trying to find it with mtx and
gzipped mtx extensions). Otherwise, just return source.
Parameters
----------
filespec : str or file-like
String giving file name or file-like object
mode : str, optional
Mode with which to open file, if `filespec` is a file name.
Returns
-------
fobj : file-like
Open file-like object.
close_it : bool
True if the calling function should close this file when done,
false otherwise.
"""
close_it = False
if isinstance(filespec, string_types):
close_it = True
# open for reading
if mode[0] == 'r':
# determine filename plus extension
if not os.path.isfile(filespec):
if os.path.isfile(filespec+'.mtx'):
filespec = filespec + '.mtx'
elif os.path.isfile(filespec+'.mtx.gz'):
filespec = filespec + '.mtx.gz'
elif os.path.isfile(filespec+'.mtx.bz2'):
filespec = filespec + '.mtx.bz2'
# open filename
if filespec.endswith('.gz'):
import gzip
stream = gzip.open(filespec, mode)
elif filespec.endswith('.bz2'):
import bz2
stream = bz2.BZ2File(filespec, 'rb')
else:
stream = open(filespec, mode)
# open for writing
else:
if filespec[-4:] != '.mtx':
filespec = filespec + '.mtx'
stream = open(filespec, mode)
else:
stream = filespec
return stream, close_it
# -------------------------------------------------------------------------
@staticmethod
def _get_symmetry(a):
m, n = a.shape
if m != n:
return MMFile.SYMMETRY_GENERAL
issymm = True
isskew = True
isherm = a.dtype.char in 'FD'
# sparse input
if isspmatrix(a):
# check if number of nonzero entries of lower and upper triangle
# matrix are equal
a = a.tocoo()
(row, col) = a.nonzero()
if (row < col).sum() != (row > col).sum():
return MMFile.SYMMETRY_GENERAL
# define iterator over symmetric pair entries
a = a.todok()
def symm_iterator():
for ((i, j), aij) in a.items():
if i > j:
aji = a[j, i]
yield (aij, aji)
# non-sparse input
else:
# define iterator over symmetric pair entries
def symm_iterator():
for j in range(n):
for i in range(j+1, n):
aij, aji = a[i][j], a[j][i]
yield (aij, aji)
# check for symmetry
for (aij, aji) in symm_iterator():
if issymm and aij != aji:
issymm = False
if isskew and aij != -aji:
isskew = False
if isherm and aij != conj(aji):
isherm = False
if not (issymm or isskew or isherm):
break
# return symmetry value
if issymm:
return MMFile.SYMMETRY_SYMMETRIC
if isskew:
return MMFile.SYMMETRY_SKEW_SYMMETRIC
if isherm:
return MMFile.SYMMETRY_HERMITIAN
return MMFile.SYMMETRY_GENERAL
# -------------------------------------------------------------------------
@staticmethod
def _field_template(field, precision):
return {MMFile.FIELD_REAL: '%%.%ie\n' % precision,
MMFile.FIELD_INTEGER: '%i\n',
MMFile.FIELD_COMPLEX: '%%.%ie %%.%ie\n' %
(precision, precision)
}.get(field, None)
# -------------------------------------------------------------------------
def __init__(self, **kwargs):
self._init_attrs(**kwargs)
# -------------------------------------------------------------------------
def read(self, source):
"""
Reads the contents of a Matrix Market file-like 'source' into a matrix.
Parameters
----------
source : str or file-like
Matrix Market filename (extensions .mtx, .mtz.gz)
or open file object.
Returns
-------
a : ndarray or coo_matrix
Dense or sparse matrix depending on the matrix format in the
Matrix Market file.
"""
stream, close_it = self._open(source)
try:
self._parse_header(stream)
return self._parse_body(stream)
finally:
if close_it:
stream.close()
# -------------------------------------------------------------------------
def write(self, target, a, comment='', field=None, precision=None,
symmetry=None):
"""
Writes sparse or dense array `a` to Matrix Market file-like `target`.
Parameters
----------
target : str or file-like
Matrix Market filename (extension .mtx) or open file-like object.
a : array like
Sparse or dense 2D array.
comment : str, optional
Comments to be prepended to the Matrix Market file.
field : None or str, optional
Either 'real', 'complex', 'pattern', or 'integer'.
precision : None or int, optional
Number of digits to display for real or complex values.
symmetry : None or str, optional
Either 'general', 'symmetric', 'skew-symmetric', or 'hermitian'.
If symmetry is None the symmetry type of 'a' is determined by its
values.
"""
stream, close_it = self._open(target, 'wb')
try:
self._write(stream, a, comment, field, precision, symmetry)
finally:
if close_it:
stream.close()
else:
stream.flush()
# -------------------------------------------------------------------------
def _init_attrs(self, **kwargs):
"""
Initialize each attributes with the corresponding keyword arg value
or a default of None
"""
attrs = self.__class__.__slots__
public_attrs = [attr[1:] for attr in attrs]
invalid_keys = set(kwargs.keys()) - set(public_attrs)
if invalid_keys:
raise ValueError('''found %s invalid keyword arguments, please only
use %s''' % (tuple(invalid_keys),
public_attrs))
for attr in attrs:
setattr(self, attr, kwargs.get(attr[1:], None))
# -------------------------------------------------------------------------
def _parse_header(self, stream):
rows, cols, entries, format, field, symmetry = \
self.__class__.info(stream)
self._init_attrs(rows=rows, cols=cols, entries=entries, format=format,
field=field, symmetry=symmetry)
# -------------------------------------------------------------------------
def _parse_body(self, stream):
rows, cols, entries, format, field, symm = (self.rows, self.cols,
self.entries, self.format,
self.field, self.symmetry)
try:
from scipy.sparse import coo_matrix
except ImportError:
coo_matrix = None
dtype = self.DTYPES_BY_FIELD.get(field, None)
has_symmetry = self.has_symmetry
is_complex = field == self.FIELD_COMPLEX
is_skew = symm == self.SYMMETRY_SKEW_SYMMETRIC
is_herm = symm == self.SYMMETRY_HERMITIAN
is_pattern = field == self.FIELD_PATTERN
if format == self.FORMAT_ARRAY:
a = zeros((rows, cols), dtype=dtype)
line = 1
i, j = 0, 0
while line:
line = stream.readline()
if not line or line.startswith(b'%'):
continue
if is_complex:
aij = complex(*map(float, line.split()))
else:
aij = float(line)
a[i, j] = aij
if has_symmetry and i != j:
if is_skew:
a[j, i] = -aij
elif is_herm:
a[j, i] = conj(aij)
else:
a[j, i] = aij
if i < rows-1:
i = i + 1
else:
j = j + 1
if not has_symmetry:
i = 0
else:
i = j
if not (i in [0, j] and j == cols):
raise ValueError("Parse error, did not read all lines.")
elif format == self.FORMAT_COORDINATE and coo_matrix is None:
# Read sparse matrix to dense when coo_matrix is not available.
a = zeros((rows, cols), dtype=dtype)
line = 1
k = 0
while line:
line = stream.readline()
if not line or line.startswith(b'%'):
continue
l = line.split()
i, j = map(int, l[:2])
i, j = i-1, j-1
if is_complex:
aij = complex(*map(float, l[2:]))
else:
aij = float(l[2])
a[i, j] = aij
if has_symmetry and i != j:
if is_skew:
a[j, i] = -aij
elif is_herm:
a[j, i] = conj(aij)
else:
a[j, i] = aij
k = k + 1
if not k == entries:
ValueError("Did not read all entries")
elif format == self.FORMAT_COORDINATE:
# Read sparse COOrdinate format
if entries == 0:
# empty matrix
return coo_matrix((rows, cols), dtype=dtype)
try:
if not _is_fromfile_compatible(stream):
flat_data = fromstring(stream.read(), sep=' ')
else:
# fromfile works for normal files
flat_data = fromfile(stream, sep=' ')
except Exception:
# fallback - fromfile fails for some file-like objects
flat_data = fromstring(stream.read(), sep=' ')
# TODO use iterator (e.g. xreadlines) to avoid reading
# the whole file into memory
if is_pattern:
flat_data = flat_data.reshape(-1, 2)
I = ascontiguousarray(flat_data[:, 0], dtype='intc')
J = ascontiguousarray(flat_data[:, 1], dtype='intc')
V = ones(len(I), dtype='int8') # filler
elif is_complex:
flat_data = flat_data.reshape(-1, 4)
I = ascontiguousarray(flat_data[:, 0], dtype='intc')
J = ascontiguousarray(flat_data[:, 1], dtype='intc')
V = ascontiguousarray(flat_data[:, 2], dtype='complex')
V.imag = flat_data[:, 3]
else:
flat_data = flat_data.reshape(-1, 3)
I = ascontiguousarray(flat_data[:, 0], dtype='intc')
J = ascontiguousarray(flat_data[:, 1], dtype='intc')
V = ascontiguousarray(flat_data[:, 2], dtype='float')
I -= 1 # adjust indices (base 1 -> base 0)
J -= 1
if has_symmetry:
mask = (I != J) # off diagonal mask
od_I = I[mask]
od_J = J[mask]
od_V = V[mask]
I = concatenate((I, od_J))
J = concatenate((J, od_I))
if is_skew:
od_V *= -1
elif is_herm:
od_V = od_V.conjugate()
V = concatenate((V, od_V))
a = coo_matrix((V, (I, J)), shape=(rows, cols), dtype=dtype)
else:
raise NotImplementedError(format)
return a
# ------------------------------------------------------------------------
def _write(self, stream, a, comment='', field=None, precision=None,
symmetry=None):
if isinstance(a, list) or isinstance(a, ndarray) or \
isinstance(a, tuple) or hasattr(a, '__array__'):
rep = self.FORMAT_ARRAY
a = asarray(a)
if len(a.shape) != 2:
raise ValueError('Expected 2 dimensional array')
rows, cols = a.shape
if field is not None:
if field == self.FIELD_INTEGER:
a = a.astype('i')
elif field == self.FIELD_REAL:
if a.dtype.char not in 'fd':
a = a.astype('d')
elif field == self.FIELD_COMPLEX:
if a.dtype.char not in 'FD':
a = a.astype('D')
else:
if not isspmatrix(a):
raise ValueError('unknown matrix type: %s' % type(a))
rep = 'coordinate'
rows, cols = a.shape
typecode = a.dtype.char
if precision is None:
if typecode in 'fF':
precision = 8
else:
precision = 16
if field is None:
kind = a.dtype.kind
if kind == 'i':
field = 'integer'
elif kind == 'f':
field = 'real'
elif kind == 'c':
field = 'complex'
else:
raise TypeError('unexpected dtype kind ' + kind)
if symmetry is None:
symmetry = self._get_symmetry(a)
# validate rep, field, and symmetry
self.__class__._validate_format(rep)
self.__class__._validate_field(field)
self.__class__._validate_symmetry(symmetry)
# write initial header line
stream.write(asbytes('%%MatrixMarket matrix {0} {1} {2}\n'.format(rep,
field, symmetry)))
# write comments
for line in comment.split('\n'):
stream.write(asbytes('%%%s\n' % (line)))
template = self._field_template(field, precision)
# write dense format
if rep == self.FORMAT_ARRAY:
# write shape spec
stream.write(asbytes('%i %i\n' % (rows, cols)))
if field in (self.FIELD_INTEGER, self.FIELD_REAL):
if symmetry == self.SYMMETRY_GENERAL:
for j in range(cols):
for i in range(rows):
stream.write(asbytes(template % a[i, j]))
else:
for j in range(cols):
for i in range(j, rows):
stream.write(asbytes(template % a[i, j]))
elif field == self.FIELD_COMPLEX:
if symmetry == self.SYMMETRY_GENERAL:
for j in range(cols):
for i in range(rows):
aij = a[i, j]
stream.write(asbytes(template % (real(aij),
imag(aij))))
else:
for j in range(cols):
for i in range(j, rows):
aij = a[i, j]
stream.write(asbytes(template % (real(aij),
imag(aij))))
elif field == self.FIELD_PATTERN:
raise ValueError('pattern type inconsisted with dense format')
else:
raise TypeError('Unknown field type %s' % field)
# write sparse format
else:
coo = a.tocoo() # convert to COOrdinate format
# if symmetry format used, remove values above main diagonal
if symmetry != self.SYMMETRY_GENERAL:
lower_triangle_mask = coo.row >= coo.col
coo = coo_matrix((coo.data[lower_triangle_mask],
(coo.row[lower_triangle_mask],
coo.col[lower_triangle_mask])),
shape=coo.shape)
# write shape spec
stream.write(asbytes('%i %i %i\n' % (rows, cols, coo.nnz)))
# make indices and data array
if field == self.FIELD_PATTERN:
IJV = vstack((coo.row, coo.col)).T
elif field in [self.FIELD_INTEGER, self.FIELD_REAL]:
IJV = vstack((coo.row, coo.col, coo.data)).T
elif field == self.FIELD_COMPLEX:
IJV = vstack((coo.row, coo.col, coo.data.real,
coo.data.imag)).T
else:
raise TypeError('Unknown field type %s' % field)
IJV[:, :2] += 1 # change base 0 -> base 1
# formats for row indices, col indices and data columns
fmt = ('%i', '%i') + ('%%.%dg' % precision,) * (IJV.shape[1]-2)
# save to file
savetxt(stream, IJV, fmt=fmt)
def _is_fromfile_compatible(stream):
"""
Check whether `stream` is compatible with numpy.fromfile.
Passing a gzipped file object to ``fromfile/fromstring`` doesn't work with
Python3.
"""
if sys.version_info[0] < 3:
return True
bad_cls = []
try:
import gzip
bad_cls.append(gzip.GzipFile)
except ImportError:
pass
try:
import bz2
bad_cls.append(bz2.BZ2File)
except ImportError:
pass
bad_cls = tuple(bad_cls)
return not isinstance(stream, bad_cls)
# -----------------------------------------------------------------------------
if __name__ == '__main__':
import time
for filename in sys.argv[1:]:
print('Reading', filename, '...', end=' ')
sys.stdout.flush()
t = time.time()
mmread(filename)
print('took %s seconds' % (time.time() - t))
|
LegendaryPoro/FTL13
|
refs/heads/master
|
tools/mapmerge/map_conflict_fixer.py
|
61
|
import map_helpers
import sys
import os
import time
def main(relative_root):
git_version = map_helpers.run_shell_command("git version")
if not git_version:
print("ERROR: Failed to run git. Make sure it is installed and in your PATH.")
return False
print("--- DISCLAIMER ---")
print("This script is in a testing phase. Verify all the results yourself to make sure you got what you expected. Make sure to read the readme to learn how to use this.")
input("Press Enter to GO\n")
file_conflicts = map_helpers.run_shell_command("git diff --name-only --diff-filter=U").split("\n")
map_conflicts = [path for path in file_conflicts if path[len(path)-3::] == "dmm"]
for i in range(0, len(map_conflicts)):
print("[{}]: {}".format(i, map_conflicts[i]))
selection = input("Choose maps you want to fix (example: 1,3-5,12):\n")
selection = selection.replace(" ", "")
selection = selection.split(",")
#shamelessly copied from mapmerger cli
valid_indices = list()
for m in selection:
index_range = m.split("-")
if len(index_range) == 1:
index = map_helpers.string_to_num(index_range[0])
if index >= 0 and index < len(map_conflicts):
valid_indices.append(index)
elif len(index_range) == 2:
index0 = map_helpers.string_to_num(index_range[0])
index1 = map_helpers.string_to_num(index_range[1])
if index0 >= 0 and index0 <= index1 and index1 < len(map_conflicts):
valid_indices.extend(range(index0, index1 + 1))
if not len(valid_indices):
print("No map selected, exiting.")
sys.exit()
print("Attempting to fix the following maps:")
for i in valid_indices:
print(map_conflicts[i])
marker = None
priority = 0
print("\nFixing modes:")
print("[{}]: Dictionary conflict fixing mode".format(map_helpers.MAP_FIX_DICTIONARY))
print("[{}]: Full map conflict fixing mode".format(map_helpers.MAP_FIX_FULL))
mode = map_helpers.string_to_num(input("Select fixing mode [Dictionary]: "))
if mode != map_helpers.MAP_FIX_FULL:
mode = map_helpers.MAP_FIX_DICTIONARY
print("DICTIONARY mode selected.")
else:
marker = input("FULL mode selected. Input a marker [/obj/effect/debugging/marker]: ")
if not marker:
marker = "/obj/effect/debugging/marker"
print("Marker selected: {}".format(marker))
print("\nVersion priorities:")
print("[{}]: Your version".format(map_helpers.MAP_FIX_PRIORITY_OURS))
print("[{}]: Their version".format(map_helpers.MAP_FIX_PRIORITY_THEIRS))
priority = map_helpers.string_to_num(input("Select priority [Yours]: "))
if priority != map_helpers.MAP_FIX_PRIORITY_THEIRS:
priority = map_helpers.MAP_FIX_PRIORITY_OURS
print("Your version will be prioritized.")
else:
print("Their version will be prioritized.")
ed = "FIXED" if mode == map_helpers.MAP_FIX_DICTIONARY else "MARKED"
ing = "FIXING" if mode == map_helpers.MAP_FIX_DICTIONARY else "MARKING"
print("\nMaps will be converted to TGM.")
print("Writing maps to 'file_path/file_name.fixed.dmm'. Please verify the results before commiting.")
if mode == map_helpers.MAP_FIX_FULL:
print("After editing the marked maps, run them through the map merger!")
input("Press Enter to start.")
print(".")
time.sleep(0.3)
print(".")
for i in valid_indices:
path = map_conflicts[i]
print("{}: {}".format(ing, path))
ours_map_raw_text = map_helpers.run_shell_command("git show HEAD:{}".format(path))
theirs_map_raw_text = map_helpers.run_shell_command("git show MERGE_HEAD:{}".format(path))
common_ancestor_hash = map_helpers.run_shell_command("git merge-base HEAD MERGE_HEAD").strip()
base_map_raw_text = map_helpers.run_shell_command("git show {}:{}".format(common_ancestor_hash, path))
ours_map = map_helpers.parse_map(ours_map_raw_text)
theirs_map = map_helpers.parse_map(theirs_map_raw_text)
base_map = map_helpers.parse_map(base_map_raw_text)
if map_helpers.fix_map_git_conflicts(base_map, ours_map, theirs_map, mode, marker, priority, relative_root+path):
print("{}: {}".format(ed, path))
print(".")
main(sys.argv[1])
|
vest-thermostat/server
|
refs/heads/master
|
vest/home/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
dunkenj/smpy
|
refs/heads/master
|
scripts/fitting.py
|
1
|
import numpy as np
import array
import os, sys
import re
import time
import multiprocessing, logging
# mpl = multiprocessing.log_to_stderr()
# mpl.setLevel(logging.INFO)
import h5py
import logging
from astropy.table import Table, Column
from astropy import units as u
from scipy.interpolate import griddata
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p","--params", type=str,
help = "Parameter file")
parser.add_argument("-q", "--quiet", help = "Suppress extra outputs",
action = "store_true")
args = parser.parse_args()
quiet = args.quiet
params_root = re.split(".py", args.params)[0]
if os.path.isfile(params_root+".pyc"):
os.remove(params_root+".pyc")
import importlib
try:
params = importlib.import_module(params_root)
print('Successfully loaded "{}" as params'.format(args.params))
#reload(params)
except:
print('Failed to load "{}" as params'.format(args.params))
raise
if quiet:
quietprint = lambda *a: None
else:
def quietprint(*args):
for arg in args:
print (arg)
# Fitting function definition for later use by Processess
def galaxyFit(inputQueue, printQueue, printlock):
for gal in iter(inputQueue.get, 'STOP'):
j = np.argmin(np.abs(z-zobs[gal])) # Find closest model redshift
flux_obs = obs[gal,:]
flux_err = obs_err[gal,:]
#flux_obs[fo <= 0.] = 0. # Set negative fluxes to zero
I = np.where(flux_err > 0.)[0] # Find bands with no observation
if len(I) == 0:
if include_rest:
M_scaled = np.ones(len(fo)) * -99.
restframe_output = ' '.join(M_scaled.astype('str'))
output_string = '{} {} {} {} {} {} {} {}' \
' {} {} {} {} {} {} {} {} {}'.format(gal+1,ID[gal],zobs[gal],-99,-99,-99,-99,-99,-99, -99, -99,-99,len(I),-99,z[j],restframe_output,'\n')
else:
output_string = '{} {} {} {} {} {} {} {} {} {} {} {} {} {} {}'.format(gal+1,ID[gal],zobs[gal],-99,-99,-99,-99,-99,-99,-99, -99,-99,len(I),-99,'\n')
printQueue.put(output_string)
continue
flux_obs = flux_obs[I] # and exclude from fit
flux_err = flux_err[I]
flux_models = f[j,I,:]
tot_err = np.sqrt(flux_err**2 + (0.1*flux_obs)**2)
top = 0.
bottom = 0.
for i in range(len(flux_obs)):
top += (flux_models[i,:]*flux_obs[i])/(tot_err[i]**2)
bottom += (flux_models[i,:]**2)/(tot_err[i]**2)
scale = top/bottom
scale = np.reshape(scale, (n_metal, n_tg, n_tau, n_tauv, n_fesc))
chisq = 0.
for i in range(len(flux_obs)):
chisq += ((np.abs(scale*flux_models[i,:]-flux_obs[i])**2)/(flux_err[i])**2)
chimin, minind = np.nanmin(chisq), np.nanargmin(chisq)
if np.isinf(chimin) or np.isnan(minind):
if include_rest:
M_scaled = np.ones(len(flux_obs)) * -99.
restframe_output = ' '.join(M_scaled.astype('str'))
output_string = '{} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}'.format(gal+1,ID[gal],zobs[gal],-99,-99,-99,-99,-99,-99, -99, -99,-99,len(I),-99,z[j],restframe_output,'\n')
else:
output_string = '{} {} {} {} {} {} {} {} {} {} {} {} {} {} {}'.format(gal+1,ID[gal],zobs[gal],-99,-99,-99,-99,-99,-99,-99, -99,-99,len(I),-99,'\n')
printQueue.put(output_string)
continue
#Find the coordinate of the model with the bestfit mass
mi, tgi, ti, tvi, fi = np.unravel_index(minind,
(n_metal, n_tg,
n_tau, n_tauv, n_fesc))
Bestfit_Mass = np.log10(scale[mi, tgi, ti, tvi, fi]*flux_corr)
Bestfit_SFR = (scale[mi, tgi, ti, tvi, fi] *
SFR[mi, tgi, ti, tvi, fi]*flux_corr)
#Bestfit_Beta = beta[tgi,tvi,ti,mi]
Bestfit_Beta = -99.
#Scale the observed tot_mag band of the template to be the same as the observed tot_mag band of the galaxy
#Convert the templates so they are no longer units of per stellar mass
F_rest = f[0,:]*scale[mi, tgi, ti, tvi, fi]*flux_corr
restframeMags = 23.9 - 2.5*np.log10(F_rest)
#UV_rest = UV_flux[0]*scale[tgi,tvi,ti,mi]*flux_corr
#restframeMUV = 23.9 - 2.5*np.log10(UV_rest)
M_scaled = restframeMags[:, mi, tgi, ti, tvi, fi]
#MUV_scaled = restframeMUV[tgi,tvi,ti,mi]
MUV_scaled = -99.
if np.isnan(Bestfit_Mass) or np.isinf(chimin):
Bestfit_Mass = -99
#M_scaled[:] = -99
tgs = -99
tvs = -99
taus = -99
mis = -99
escape_fraction = -99
else:
tgs = tg[tgi]/1e9
tvs = tv[tvi]
taus = tau[ti]
mis = metallicities[mi]
escape_fraction = fesc[fi]
printlock.acquire()
print('{:6d} {:8d} {:>5.2f} {:>7.2f} {:>8.1f} {:>8.3f} {:>5.1f} {:>8.2f} {:>4.2f} {:>5.2f}'.format(gal+1,ID[gal], zobs[gal],Bestfit_Mass,chimin,tgs,tvs,taus,mis,np.log10(Bestfit_SFR)))
if include_rest:
restframe_output = ' '.join(M_scaled.astype('str'))
output_string = '{} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {} {}'.format(gal+1,ID[gal],zobs[gal],Bestfit_Mass,chimin,tgs,tvs,taus,mis, MUV_scaled, minind,Bestfit_SFR,len(I),Bestfit_Beta,z[j],restframe_output,'\n')
else:
output_string = '{} {} {} {} {} {} {} {} {} {} {} {} {} {} {}'.format(gal+1,ID[gal],zobs[gal],Bestfit_Mass,chimin,tgs,tvs,taus,mis, MUV_scaled, minind,Bestfit_SFR,len(I),Bestfit_Beta,'\n')
printlock.release()
printQueue.put(output_string)
def galaxyFit2(inputQueue, printQueue, printlock):
for gal in iter(inputQueue.get, 'STOP'):
output_string = '{0[0]} {0[1]} {0[2]} {0[3]} {0[4]} {0[5]} ' + \
'{0[6]} {0[7]} {0[8]} {0[9]} {0[10]} {0[11]} ' + \
'{0[12]} {0[13]} {0[14]}'
j = np.argmin(np.abs(z-zobs[gal])) # Find closest model redshift
log_mass_min, log_mass_max = 7, 13
log_sfr_min, log_sfr_max = -3, 4
flux_obs = obs[gal,:]
flux_err = obs_err[gal,:]
#flux_obs[fo <= 0.] = 0. # Set negative fluxes to zero
#I = np.where(flux_err > 0.)[0] # Find bands with no observation
I = (flux_err > 0.) * ((filt_lambda / (1+z[j])) < 3e5)
flux_obs = flux_obs[I] * zp_offsets[I] # and exclude from fit
flux_err = flux_err[I] * zp_offsets[I]
flux_models = f[j,I,:,j,:]
if params.temp_err != None:
terr = griddata(terr_wl, terr_sigma, filt_lambda[I] / (1+z[j]))
tot_err = np.sqrt(flux_err**2 + (terr*flux_obs)**2 + (params.flux_err*flux_obs)**2)
else:
tot_err = np.sqrt(flux_err**2 + (params.flux_err*flux_obs)**2)
top = 0.
bottom = 0.
for i in range(len(flux_obs)):
top += (flux_models[i,:]*flux_obs[i])/(tot_err[i]**2)
bottom += (flux_models[i,:]**2)/(tot_err[i]**2)
scale = top/bottom
scale = np.reshape(scale, (n_metal, n_tau, n_tauv, n_fesc))
chisq = 0.
for i in range(len(flux_obs)):
chisq += ((np.abs(scale*flux_models[i,:]-flux_obs[i])**2)/(tot_err[i])**2)
chimin, minind = np.nanmin(chisq), np.nanargmin(chisq)
likelihood = np.reshape(np.exp(-0.5*chisq),
(n_metal, n_tau, n_tauv, n_fesc))
likelihood[np.isnan(likelihood)] = 0.
likelihood = np.abs(likelihood/likelihood.sum())
if np.isinf(chimin) or np.isnan(minind):
output_string = '{n} {id} {zobs} {ztemp} {mass_best} {sfr_best} '+ \
'{chi_best} {tvs} {taus} {mis} {fesc} '+ \
'{mass_med} {mass_l68} {mass_u68} ' + \
'{sfr_med} {sfr_l68} {sfr_u68} ' + \
'{nfilts} '
output_values = {'n': gal+1,
'id': ID[gal],
'zobs': zobs[gal], 'ztemp':z[j],
'mass_best': -99.,
'sfr_best': -99,
'chi_best': -99,
'tvs': -99, 'taus': -99,
'mis': -99, 'fesc': -99,
'mass_med': -99, 'mass_l68': -99, 'mass_u68': -99,
'sfr_med': -99, 'sfr_l68': -99, 'sfr_u68': -99,
'nfilts': np.sum(I)}
output_array = [gal+1, ID[gal], zobs[gal],
Bestfit_Mass, chimin, tvs, taus, mis,
MUV_scaled, minind, Bestfit_SFR, np.sum(I), -99., '\n']
output = output_string.format(**output_values)
printlock.acquire()
print_string = "{0[0]:6d} {0[1]:8d} {0[2]:>5.2f} " + \
"{0[3]:>7.2f} {0[4]:>8.3f} " + \
"{0[5]:>5.1f} {0[6]:>8.2f} {0[7]:>4.2f} " + \
"{0[8]:>5.2f}"
print_array = [gal+1, ID[gal], zobs[gal],
-99, -99,
-99, -99, -99,
-99]
print(print_string.format(print_array))
printlock.release()
else:
#Find the coordinate of the model with the bestfit mass
mi, ti, tvi, fi = np.unravel_index(minind,
(n_metal,
n_tau, n_tauv, n_fesc))
Masses = np.abs(np.log10(scale*flux_corr))
SFRs = np.log10(scale * SFR[:,j,:] * flux_corr)
mass_hist = np.histogram(Masses.flatten(),
range = (log_mass_min, log_mass_max),
bins = 120,
weights = likelihood.flatten(),
density = True)
sfr_hist = np.histogram(SFRs.flatten(),
range = (log_sfr_min, log_sfr_max),
bins = 140,
weights = likelihood.flatten(),
density = True)
Bestfit_Mass = np.abs(np.log10(scale[mi, ti, tvi, fi]*flux_corr))
Bestfit_SFR = np.abs(np.log10(scale[mi, ti, tvi, fi] *
SFR[mi, j, ti, tvi, fi]*flux_corr))
Bestfit_fluxes = (scale[mi, ti, tvi, fi] *
f[j,:, mi, j, ti, tvi, fi] *
flux_corr)
tgs = tg[j]/1e9
tvs = tv[tvi]
taus = tau[ti]
mis = metallicities[mi]
escape_fraction = fesc[fi]
m16, m50, m84 = weighted_quantile(Masses.flatten(),
[0.16, 0.5, 0.84],
sample_weight=likelihood.flatten(),
values_sorted=False)
s16, s50, s84 = weighted_quantile(SFRs.flatten(),
[0.16, 0.5, 0.84],
sample_weight=likelihood.flatten(),
values_sorted=False)
printlock.acquire()
MUV_scaled = -99.
Bestfit_Beta = -99.
print_string = "{0[0]:6d} {0[1]:8d} {0[2]:>5.2f} " + \
"{0[3]:>7.2f} {0[4]:>8.3f} " + \
"{0[5]:>5.1f} {0[6]:>8.2f} {0[7]:>4.2f} " + \
"{0[8]:>5.2f}"
print_array = [gal+1, ID[gal], zobs[gal],
Bestfit_Mass, chimin,
tvs, taus, mis,
Bestfit_SFR]
print(print_string.format(print_array))
printlock.release()
output_string = '{n} {id} {zobs} {ztemp} {mass_best} {sfr_best} '+ \
'{chi_best} {tvs} {taus} {mis} {fesc} '+ \
'{mass_med} {mass_l68} {mass_u68} ' + \
'{sfr_med} {sfr_l68} {sfr_u68} ' + \
'{nfilts} '
output_values = {'n': gal+1,
'id': ID[gal],
'zobs': zobs[gal], 'ztemp':z[j],
'mass_best': Bestfit_Mass,
'sfr_best': Bestfit_SFR,
'chi_best': chimin,
'tvs': tvs, 'taus': taus,
'mis': mis, 'fesc': escape_fraction,
'mass_med': m50, 'mass_l68': m16, 'mass_u68': m84,
'sfr_med': s50, 'sfr_l68': s16, 'sfr_u68': s84,
'nfilts': np.sum(I)}
output_array = [gal+1, ID[gal], zobs[gal],
Bestfit_Mass, chimin, tvs, taus, mis,
MUV_scaled, minind, Bestfit_SFR, np.sum(I), -99., '\n']
output = output_string.format(**output_values)
if include_rest:
if np.isinf(chimin) or np.isnan(minind):
M_scaled = np.ones(len(flux_obs)) * -99.
restframe_output = ' '.join(M_scaled.astype('str'))
output = output + restframe_output + ' \n'
else:
F_rest = np.array(f[0, :, mi, j, ti, tvi, fi] *
scale[mi, ti, tvi, fi] * flux_corr)
restframeMags = 23.9 - 2.5*np.log10(F_rest)
restframe_output = ' '.join(restframeMags.astype('str'))
output = output + restframe_output + ' \n'
else:
output = output + ' \n'
printQueue.put([gal, output, mass_hist, sfr_hist, Bestfit_fluxes,
[obs[gal, :], obs_err[gal, :]]])
def galaxyFitMz(inputQueue, printQueue, printlock):
for gal in iter(inputQueue.get, 'STOP'):
output_string = '{0[0]} {0[1]} {0[2]} {0[3]} {0[4]} {0[5]} ' + \
'{0[6]} {0[7]} {0[8]} {0[9]} {0[10]} {0[11]} ' + \
'{0[12]} {0[13]} {0[14]}'
log_mass_min, log_mass_max = 7, 13
log_sfr_min, log_sfr_max = -3, 4
# Set up output arrays
chi_z_best = np.zeros(len(z))
m_z_best = np.zeros(len(z))
m_z_median = np.zeros(len(z))
m_z_u68 = np.zeros(len(z))
m_z_l68 = np.zeros(len(z))
sfr_z_best = np.zeros(len(z))
sfr_z_median = np.zeros(len(z))
sfr_z_u68 = np.zeros(len(z))
sfr_z_l68 = np.zeros(len(z))
#m_z_hist = np.zeros((len(z), 120))
#sfr_z_hist = np.zeros((len(z), 140))
flux_obs = obs[gal,:]
flux_err = obs_err[gal,:]
#flux_obs[fo <= 0.] = 0. # Set negative fluxes to zero
I = np.where(flux_err > 0.)[0] # Find bands with no observation
if len(I) == 0:
output_array = [gal+1, ID[gal], zobs[gal], z[j],
-99, -99, -99, -99, -99, -99, -99,
-99,-99,len(I),-99,'\n']
output = output_string.format(output_array)
if include_rest:
M_scaled = np.ones(len(flux_obs)) * -99.
restframe_output = ' '.join(M_scaled.astype('str'))
output = output + restframe_output + ' \n'
else:
output = output + ' \n'
printQueue.put(output_string)
continue
flux_obs = flux_obs[I] # and exclude from fit
flux_err = flux_err[I]
tot_err = np.sqrt(flux_err**2 + (params.flux_err*flux_obs)**2)
for j, jz in enumerate(z):
#j = np.argmin(np.abs(z-zobs[gal])) # Find closest model redshift
flux_models = f[j,I,:,j]
top = 0.
bottom = 0.
for i in range(len(flux_obs)):
top += (flux_models[i,:]*flux_obs[i])/(tot_err[i]**2)
bottom += (flux_models[i,:]**2)/(tot_err[i]**2)
scale = top/bottom
scale = np.reshape(scale, (n_metal, n_tau, n_tauv, n_fesc))
chisq = 0.
for i in range(len(flux_obs)):
chisq += ((np.abs(scale*flux_models[i,:]-flux_obs[i])**2)/(tot_err[i])**2)
chimin, minind = np.nanmin(chisq), np.nanargmin(chisq)
likelihood = np.reshape(np.exp(-0.5*chisq),
(n_metal, n_tau, n_tauv, n_fesc))
likelihood[np.isnan(likelihood)] = 0.
likelihood = np.abs(likelihood/likelihood.sum())
if np.isinf(chimin) or np.isnan(minind):
output_array = [gal+1, ID[gal], zobs[gal], z[j],
-99, -99, -99, -99, -99, -99,
-99,-99,len(I),-99,'\n']
output = output_string.format(output_array)
else:
#Find the coordinate of the model with the bestfit mass
mi, ti, tvi, fi = np.unravel_index(minind,
(n_metal,
n_tau, n_tauv, n_fesc))
Masses = np.log10(np.abs(scale * flux_corr))
SFRs = np.log10(np.abs(scale * SFR[:,j] * flux_corr))
"""
mass_hist = np.histogram(Masses.flatten(),
range = (log_mass_min, log_mass_max),
bins = 120,
weights = likelihood.flatten(),
density = True)
sfr_hist = np.histogram(SFRs.flatten(),
range = (log_sfr_min, log_sfr_max),
bins = 140,
weights = likelihood.flatten(),
density = True)
"""
Bestfit_Mass = np.log10(np.abs(scale[mi, ti, tvi, fi]*flux_corr))
Bestfit_SFR = np.log10(np.abs(scale[mi, ti, tvi, fi]) *
SFR[mi, j, ti, tvi, fi]*flux_corr)
if np.isnan(Bestfit_Mass) or np.isinf(chimin):
Bestfit_Mass = -99
#M_scaled[:] = -99
tvs = -99
taus = -99
mis = -99
escape_fraction = -99
else:
tvs = tv[tvi]
taus = tau[ti]
mis = metallicities[mi]
escape_fraction = fesc[fi]
m16, m50, m84 = weighted_quantile(Masses.flatten(),
[0.16, 0.5, 0.84],
sample_weight=likelihood.flatten(),
values_sorted=False)
s16, s50, s84 = weighted_quantile(SFRs.flatten(),
[0.16, 0.5, 0.84],
sample_weight=likelihood.flatten(),
values_sorted=False)
chi_z_best[j] = chimin
m_z_best[j] = Bestfit_Mass
sfr_z_best[j] = Bestfit_SFR
m_z_l68[j], m_z_median[j], m_z_u68[j] = m16, m50, m84
sfr_z_l68[j], sfr_z_median[j], sfr_z_u68[j] = s16, s50, s84
MUV_scaled = -99.
Bestfit_Beta = -99.
printlock.acquire()
j = np.argmin(np.abs(z-zobs[gal])) # Find closest model redshift
print_string = "{0[0]:6d} {0[1]:8d} {0[2]:>5.2f} " + \
"{0[3]:>7.2f} {0[4]:>8.1f} {0[5]:>8.3f}"
print_array = [gal+1, ID[gal], zobs[gal],
m_z_best[j], chi_z_best[j],
sfr_z_best[j]]
print(print_string.format(print_array))
output_string = '{n} {id} {zobs} {ztemp} {mass_best} {sfr_best} '+ \
'{chi_best} ' + \
'{mass_med} {mass_l68} {mass_u68} ' + \
'{sfr_med} {sfr_l68} {sfr_u68} ' + \
'{nfilts} '
output_values = {'n': gal+1,
'id': ID[gal],
'zobs': zobs[gal], 'ztemp':z[j],
'mass_best': Bestfit_Mass,
'sfr_best': Bestfit_SFR,
'chi_best': chimin,
'mass_med': m50, 'mass_l68': m16, 'mass_u68': m84,
'sfr_med': s50, 'sfr_l68': s16, 'sfr_u68': s84,
'nfilts': len(I)}
output = output_string.format(**output_values) + ' \n'
printlock.release()
printQueue.put([gal, output,
[m_z_best, sfr_z_best, chi_z_best],
[m_z_l68, m_z_median, m_z_u68]
[sfr_z_l68, sfr_z_median, sfr_z_u68]])
def getObservations(inputpath):
input_data = Table.read(inputpath,format=input_format).filled(-99.)
column_names = list(input_data.columns.keys())
ID = input_data[ID_col]
zobs = input_data[z_col]
filter_names = []
k,l = 0,0
for ii in range(len(column_names)):
if column_names[ii].lower().endswith(flux_col_end.lower()):
if k == 0:
fluxes = input_data[column_names[ii]]
else:
fluxes = np.column_stack((fluxes,input_data[column_names[ii]]))
k+=1
filter_names.append(column_names[ii])
if column_names[ii].lower().endswith(fluxerr_col_end.lower()):
if l == 0:
fluxerrs = input_data[column_names[ii]]
else:
fluxerrs = np.column_stack((fluxerrs,input_data[column_names[ii]]))
l+=1
"""
if filts_used != None:
try:
fluxes = fluxes[:,filts_used]
fluxerrs = fluxerrs[:,filts_used]
except:r
print('Filter mismatch 1')
# Array slicing fail
"""
return ID, zobs, fluxes, fluxerrs, k, filter_names
class _function_wrapper:
"""
This is a hack to make the likelihood function pickleable when ``args``
or ``kwargs`` are also included.
Stolen from emcee
"""
def __init__(self, f, args, kwargs):
self.f = f
self.args = args
self.kwargs = kwargs
def __call__(self, x):
try:
return self.f(x, *self.args, **self.kwargs)
except:
import traceback
print("emcee: Exception while calling your likelihood function:")
print(" params:", x)
print(" args:", self.args)
print(" kwargs:", self.kwargs)
print(" exception:")
traceback.print_exc()
raise
def weighted_quantile(values, quantiles, sample_weight=None, values_sorted=False, old_style=False):
""" Very close to np.percentile, but supports weights.
NOTE: quantiles should be in [0, 1]!
:param values: np.array with data
:param quantiles: array-like with many quantiles needed
:param sample_weight: array-like of the same length as `array`
:param values_sorted: bool, if True, then will avoid sorting of initial array
:param old_style: if True, will correct output to be consistent with np.percentile.
:return: np.array with computed quantiles.
"""
values = np.array(values)
quantiles = np.array(quantiles)
if sample_weight is None:
sample_weight = np.ones(len(values))
sample_weight = np.array(sample_weight)
assert np.all(quantiles >= 0) and np.all(quantiles <= 1), 'quantiles should be in [0, 1]'
if not values_sorted:
sorter = np.argsort(values)
values = values[sorter]
sample_weight = sample_weight[sorter]
weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
if old_style:
# To be convenient with np.percentile
weighted_quantiles -= weighted_quantiles[0]
weighted_quantiles /= weighted_quantiles[-1]
else:
weighted_quantiles /= np.sum(sample_weight)
return np.interp(quantiles, weighted_quantiles, values)
if __name__ == '__main__':
logfile = open("error.log", "w")
original_stderr = sys.stderr
sys.stderr = logfile
start = time.time()
"""
SECTION 1
"""
model_path = params.model_path
input_catalog = params.input_catalog
input_format = params.input_format
z_col = params.z_col
ID_col = params.ID_col
flux_col_end = params.flux_col_end
fluxerr_col_end = params.fluxerr_col_end
ncpus = params.ncpus
filts_used = params.filts_used
include_rest = params.include_rest
output_path = params.output_catalog_path
output_format = params.output_format
output_hdf_path = params.output_hdf_path
calc_mode = params.fitting_mode
flux_corr = params.flux_corr
ID, zobs, obs, obs_err, filters_found, filter_names = getObservations(input_catalog)
"""
Section 2
"""
print("Loading synthetic mags and mass array:")
models = h5py.File(model_path, 'r')
tg = models['ages'][()]
tv = models['dust'][()]
tau = models['sfh'][()]
metallicities = models['metallicities'][()]
fesc = models['fesc'][()]
Mshape = models['fluxes'].shape
z = models['z']
nfilts = Mshape[1]
n_metal = Mshape[2]
n_tg = Mshape[3]
n_tau = Mshape[4]
n_tauv = Mshape[5]
n_fesc = Mshape[6]
filt_lambda = models['wl'][()]
#UV_flux = synmags['UV_flux']
SFR = models['SFR']
Ms = models['Ms']
if params.zp_offsets != None:
zp_offsets = Table.read(params.zp_offsets, format='ascii.no_header')['col1']
if params.temp_err != None:
terr_wl, terr_sigma = np.loadtxt(params.temp_err).T
if (nfilts == filters_found) and (filts_used == None):
f = models['fluxes']
elif (nfilts != filters_found) and (filts_used == None):
raise Exception('Mis-match between model and observed filter numbers')
elif filts_used != None:
try:
f = models['fluxes'][:,filts_used]
obs = obs[:,filts_used]
obs_err = obs_err[:,filts_used]
filter_names = np.array(filter_names)[filts_used]
except:
print('Mis-match between model and observed filter numbers')
raise
# Slice fail
print ("Done.")
"""
SECTION 3
"""
if os.path.isfile(output_path+".temp_output.txt"):
os.remove(output_path+".temp_output.txt")
temp_file = open(output_path+".temp_output.txt","w")
"""
SECTION 4
Chi-sq calculation
"""
out_string = '{0:6s} {1:8s} {2:>5s} {3:>7s} {4:>8s}' + \
'{5:>5s} {6:>8s} {7:>4s} {8:>5s}'
print(out_string.format('N','ID','zobs','Best', 'chimin',
'tauv','tau','met', 'sfr'))
loop_start = time.time()
ncpus = np.clip(ncpus, 1, multiprocessing.cpu_count())
inputQueue = multiprocessing.Queue()
printQueue = multiprocessing.Queue()
printlock = multiprocessing.Lock()
if calc_mode == 'hist':
output_hdf = h5py.File(output_hdf_path, 'w')
output_hdf.create_dataset("mass_pdf", (len(ID), 120), dtype="f")
output_hdf.create_dataset("sfr_pdf", (len(ID), 140), dtype="f")
output_hdf.create_dataset("fit_flux", (len(ID), f.shape[1]), dtype="f")
output_hdf.create_dataset("obs_flux", (len(ID), f.shape[1]), dtype="f")
output_hdf.create_dataset("obs_fluxerr", (len(ID), f.shape[1]),
dtype="f")
output_hdf.create_dataset("lambda_filt", data = models["wl"])
output_hdf.create_dataset("fwhm_filt", data = models["fwhm"])
fitFunction = galaxyFit2
elif calc_mode == 'Mz':
output_hdf = h5py.File(output_hdf_path, 'w')
output_hdf.create_dataset("m_z_best", (len(ID), len(z)), dtype="f")
output_hdf.create_dataset("sfr_z_best", (len(ID), len(z)), dtype="f")
output_hdf.create_dataset("chi_z_best", (len(ID), len(z)), dtype="f")
output_hdf.create_dataset("m_z_median", (len(ID), len(z)), dtype="f")
output_hdf.create_dataset("m_z_l68", (len(ID), len(z)), dtype="f")
output_hdf.create_dataset("m_z_u68", (len(ID), len(z)), dtype="f")
output_hdf.create_dataset("sfr_z_median", (len(ID), len(z)), dtype="f")
output_hdf.create_dataset("sfr_z_u68", (len(ID), len(z)), dtype="f")
output_hdf.create_dataset("sfr_z_l68", (len(ID), len(z)), dtype="f")
output_hdf.create_dataset("z", data=z)
fitFunction = galaxyFitMz
else:
fitFunction = galaxyFit
for i in range( ncpus ):
multiprocessing.Process(target = fitFunction,
args = (inputQueue, printQueue,
printlock)).start()
# Put elements in the send queue for processing
for gal in range( len(ID) ):
inputQueue.put( gal )
if calc_mode == 'hist':
for i, gal in enumerate(ID):
j, out, mass_hist, sfr_hist, fit_flux, obs_flux = printQueue.get()
if i == 0:
mass_centers = 0.5*(mass_hist[1][1:] + mass_hist[1][:-1])
sfr_centers = 0.5*(sfr_hist[1][1:] + sfr_hist[1][:-1])
output_hdf.create_dataset("mass_bins", data = mass_centers)
output_hdf.create_dataset("sfr_bins", data = sfr_centers)
output_hdf["mass_pdf"][j] = mass_hist[0]
output_hdf["sfr_pdf"][j] = sfr_hist[0]
output_hdf["fit_flux"][j] = fit_flux
output_hdf["obs_flux"][j] = obs_flux[0]
output_hdf["obs_fluxerr"][j] = obs_flux[1]
temp_file.write( out )
elif calc_mode == 'Mz':
for i, gal in enumerate(ID):
j, out, pz_best, mz_median, sfrz_median = printQueue.get()
output_hdf["m_z_best"][j,:] = pz_best[0]
output_hdf["sfr_z_best"][j,:] = pz_best[1]
output_hdf["chi_z_best"][j,:] = pz_best[2]
output_hdf["m_z_l68"][j,:] = mz_median[0]
output_hdf["m_z_median"][j,:] = mz_median[1]
output_hdf["m_z_u68"][j,:] = mz_median[2]
output_hdf["sfr_z_l68"][j,:] = sfrz_median[0]
output_hdf["sfr_z_median"][j,:] = sfrz_median[1]
output_hdf["sfr_z_u68"][j,:] = sfrz_median[2]
temp_file.write( out )
else:
for i, gal in enumerate(ID):
printout = printQueue.get()
temp_file.write( printout )
#print len(mass_array), len(muv_array), len(beta_array)
# Stop all the running processes
for i in range( ncpus ):
inputQueue.put( 'STOP' )
# Close both send and receive queues
inputQueue.close()
printQueue.close()
temp_file.close()
models.close()
output_hdf.close()
print("Fitting time taken: {:.2f} {}".format(time.time()-loop_start,
'\n'))
"""
Section 3
Reload, format and save output table
"""
while temp_file.closed == False:
pause(0.1)
data = np.loadtxt(output_path+".temp_output.txt")
try:
rows, cols = data.shape
except:
cols = len(data)
output = Table()
names = ['N', 'ID', 'z', 'zmodel',
'Mass_best', 'SFR_best', 'chi_best',
'Dust_best', 'SFH_best',
'Metallicity_best', 'fesc_best',
'Mass_median', 'Mass_l68', 'Mass_u68',
'SFR_median', 'SFR_l68', 'SFR_u68',
'Nfilts']
units = [None, None, None, None,
u.Msun, u.Msun/u.yr, None,
None, None,
None, None,
u.Msun, u.Msun, u.Msun,
u.Msun/u.yr, u.Msun/u.yr, u.Msun/u.yr,
None]
types = ['i4', 'i4', 'f4', 'f4',
'f4', 'f4', 'f4',
'f4', 'f4',
'f4', 'f4',
'f4', 'f4', 'f4',
'f4', 'f4', 'f4',
'i4']
if include_rest:
for name in filter_names:
names.append(name[:-len(flux_col_end)]+'_rest')
units.append(u.mag)
types.append('f4')
for col in range(cols):
column = Column( data[:,col], name = names[col], unit=units[col], dtype=types[col])
output.add_column(column)
table_format = params.output_format
output.sort('ID')
if os.path.isfile(output_path):
os.remove(output_path)
output.write(output_path,format=table_format, overwrite=True)
print('Catalog saved')
os.remove(temp_file.name)
print('\n')
print("Total time taken: "+str(time.time()-start))
sys.stderr = original_stderr
logfile.close()
|
sheins/saraheins
|
refs/heads/master
|
node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/input_test.py
|
604
|
#!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the input.py file."""
import gyp.input
import unittest
import sys
class TestFindCycles(unittest.TestCase):
def setUp(self):
self.nodes = {}
for x in ('a', 'b', 'c', 'd', 'e'):
self.nodes[x] = gyp.input.DependencyGraphNode(x)
def _create_dependency(self, dependent, dependency):
dependent.dependencies.append(dependency)
dependency.dependents.append(dependent)
def test_no_cycle_empty_graph(self):
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_line(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_dag(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['a'], self.nodes['c'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_cycle_self_reference(self):
self._create_dependency(self.nodes['a'], self.nodes['a'])
self.assertEquals([(self.nodes['a'], self.nodes['a'])],
self.nodes['a'].FindCycles())
def test_cycle_two_nodes(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self.assertEquals([(self.nodes['a'], self.nodes['b'], self.nodes['a'])],
self.nodes['a'].FindCycles())
self.assertEquals([(self.nodes['b'], self.nodes['a'], self.nodes['b'])],
self.nodes['b'].FindCycles())
def test_two_cycles(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['b'])
cycles = self.nodes['a'].FindCycles()
self.assertTrue(
(self.nodes['a'], self.nodes['b'], self.nodes['a']) in cycles)
self.assertTrue(
(self.nodes['b'], self.nodes['c'], self.nodes['b']) in cycles)
self.assertEquals(2, len(cycles))
def test_big_cycle(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
self._create_dependency(self.nodes['d'], self.nodes['e'])
self._create_dependency(self.nodes['e'], self.nodes['a'])
self.assertEquals([(self.nodes['a'],
self.nodes['b'],
self.nodes['c'],
self.nodes['d'],
self.nodes['e'],
self.nodes['a'])],
self.nodes['a'].FindCycles())
if __name__ == '__main__':
unittest.main()
|
zdary/intellij-community
|
refs/heads/master
|
python/testData/inspections/StatementEffect_after.py
|
83
|
class klass:
def foo(self):
pass
var = klass()
var.foo()
|
meanmee/keras
|
refs/heads/master
|
tests/auto/test_embeddings.py
|
74
|
import unittest
import numpy as np
from keras.models import Sequential
from keras.layers.core import Merge, Dense, Activation, Flatten
from keras.layers.embeddings import Embedding
from theano import function
from keras.constraints import unitnorm
class TestEmbedding(unittest.TestCase):
def setUp(self):
self.X1 = np.array([[1], [2]], dtype='int32')
self.W1 = np.array([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]], dtype='float32')
def test_unitnorm_constraint(self):
lookup = Sequential()
lookup.add(Embedding(3, 2, weights=[self.W1], W_constraint=unitnorm()))
lookup.add(Flatten())
lookup.add(Dense(2, 1))
lookup.add(Activation('sigmoid'))
lookup.compile(loss='binary_crossentropy', optimizer='sgd', class_mode='binary')
lookup.train_on_batch(self.X1, np.array([[1], [0]], dtype='int32'))
norm = np.linalg.norm(lookup.params[0].get_value(), axis=1)
self.assertTrue(np.allclose(norm, np.ones_like(norm).astype('float32')))
if __name__ == '__main__':
unittest.main()
|
paloda/android_kernel_bq_vegetalte
|
refs/heads/cm-12.1
|
tools/perf/tests/attr.py
|
3174
|
#! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
|
vitiral/micropython
|
refs/heads/master
|
tests/basics/builtin_allany.py
|
113
|
# test builtin "all" and "any"
tests = (
(),
[],
[False],
[True],
[False, True],
[True, False],
[False, False],
[True, True],
range(10),
)
for test in tests:
print(all(test))
for test in tests:
print(any(test))
|
nicoboss/Floatmotion
|
refs/heads/master
|
OpenGL/GLES1/OES/packed_depth_stencil.py
|
8
|
'''OpenGL extension OES.packed_depth_stencil
This module customises the behaviour of the
OpenGL.raw.GLES1.OES.packed_depth_stencil to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/packed_depth_stencil.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.packed_depth_stencil import *
from OpenGL.raw.GLES1.OES.packed_depth_stencil import _EXTENSION_NAME
def glInitPackedDepthStencilOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
|
tudorvio/tempest
|
refs/heads/master
|
tempest/hacking/checks.py
|
18
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import pep8
PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
'trove', 'ironic', 'savanna', 'heat', 'ceilometer',
'zaqar', 'sahara']
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
TEST_DEFINITION = re.compile(r'^\s*def test.*')
SETUP_TEARDOWN_CLASS_DEFINITION = re.compile(r'^\s+def (setUp|tearDown)Class')
SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
RAND_NAME_HYPHEN_RE = re.compile(r".*rand_name\(.+[\-\_][\"\']\)")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
def import_no_clients_in_api_and_scenario_tests(physical_line, filename):
"""Check for client imports from tempest/api & tempest/scenario tests
T102: Cannot import OpenStack python clients
"""
if "tempest/api" in filename or "tempest/scenario" in filename:
res = PYTHON_CLIENT_RE.match(physical_line)
if res:
return (physical_line.find(res.group(1)),
("T102: python clients import not allowed"
" in tempest/api/* or tempest/scenario/* tests"))
def scenario_tests_need_service_tags(physical_line, filename,
previous_logical):
"""Check that scenario tests have service tags
T104: Scenario tests require a services decorator
"""
if 'tempest/scenario/' in filename and '/test_' in filename:
if TEST_DEFINITION.match(physical_line):
if not SCENARIO_DECORATOR.match(previous_logical):
return (physical_line.find('def'),
"T104: Scenario tests require a service decorator")
def no_setup_teardown_class_for_tests(physical_line, filename):
if pep8.noqa(physical_line):
return
if 'tempest/test.py' not in filename:
if SETUP_TEARDOWN_CLASS_DEFINITION.match(physical_line):
return (physical_line.find('def'),
"T105: (setUp|tearDown)Class can not be used in tests")
def no_vi_headers(physical_line, line_number, lines):
"""Check for vi editor configuration in source files.
By default vi modelines can only appear in the first or
last 5 lines of a source file.
T106
"""
# NOTE(gilliard): line_number is 1-indexed
if line_number <= 5 or line_number > len(lines) - 5:
if VI_HEADER_RE.match(physical_line):
return 0, "T106: Don't put vi configuration in source files"
def service_tags_not_in_module_path(physical_line, filename):
"""Check that a service tag isn't in the module path
A service tag should only be added if the service name isn't already in
the module path.
T107
"""
# NOTE(mtreinish) Scenario tests always need service tags, but subdirs are
# created for services like heat which would cause false negatives for
# those tests, so just exclude the scenario tests.
if 'tempest/scenario' not in filename:
matches = SCENARIO_DECORATOR.match(physical_line)
if matches:
services = matches.group(1).split(',')
for service in services:
service_name = service.strip().strip("'")
modulepath = os.path.split(filename)[0]
if service_name in modulepath:
return (physical_line.find(service_name),
"T107: service tag should not be in path")
def no_hyphen_at_end_of_rand_name(logical_line, filename):
"""Check no hyphen at the end of rand_name() argument
T108
"""
if './tempest/api/network/' in filename:
# Network API tests are migrating from Tempest to Neutron repo now.
# So here should avoid network API tests checks.
return
msg = "T108: hyphen should not be specified at the end of rand_name()"
if RAND_NAME_HYPHEN_RE.match(logical_line):
return 0, msg
def no_mutable_default_args(logical_line):
"""Check that mutable object isn't used as default argument
N322: Method's default argument shouldn't be mutable
"""
msg = "N322: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
def factory(register):
register(import_no_clients_in_api_and_scenario_tests)
register(scenario_tests_need_service_tags)
register(no_setup_teardown_class_for_tests)
register(no_vi_headers)
register(service_tags_not_in_module_path)
register(no_hyphen_at_end_of_rand_name)
register(no_mutable_default_args)
|
mjsottile/GZSatelliteFinder
|
refs/heads/master
|
old_code/trainer.py
|
1
|
"""
trainer.py
given a set of labeled images, train a classifier to then evaluate against
a test set.
training data is specified as a set of pairs that map GZ identifiers
to labels. labels are integers whose interpretation is left up to the user.
mjsottile@gmail.com // dec. 2013
"""
import mysdss
import config as cfg
import FeatureExtractor as features
import numpy as np
from sklearn import cluster
# read configuration
params = cfg.read_gztf_config("trailfinder.cfg")
sdss_database = params["sdss_database"]
images_root = params["images_root"]
# read training set
db = mysdss.read_sdss_trainingset("training_set.csv")
# convert labels to ints
for gz_id in db:
db[gz_id] = int(db[gz_id])
max_category = max(db.values())
min_category = min(db.values())
totrows = 0
data = []
print "Computing features."
i = 0
labels = []
for gz_id in db:
imagefile = params["images_root"]+gz_id+".jpg"
x = features.line_signature_wrapper(imagefile, params)
labels.extend([gz_id for j in range(len(x))])
data.append(x)
print str(int(100 * (float(i) / float(len(db))))) + "% done."
i = i+1
d = np.concatenate(data)
print "Clustering."
lookup = {}
for gz_id in db:
lookup[gz_id] = []
km = cluster.KMeans(n_clusters=params["kmeans_num_clusters"])
km.fit(d)
n = len(km.labels_)
for l in range(n):
lookup[labels[l]].append(km.labels_[l])
# remove duplicates
for gz_id in db:
lookup[gz_id] = list(set(lookup[gz_id]))
# perform binning
bins = np.zeros((params["kmeans_num_clusters"], (max_category-min_category)+1))
# create matrix of kmeans category vs training set label to determine which
# kmeans category most frequently patches a given training label
for gz_id in db:
for km_cat in lookup[gz_id]:
bins[km_cat-1, db[gz_id]-1] = bins[km_cat-1, db[gz_id]-1]+1
print bins
|
foospidy/CACConsole
|
refs/heads/master
|
modules/CloudAtCostConsole.py
|
1
|
# CloudAtCostConsole Copyright (C) 2015 foospidy
# https://github.com/foospidy/CACConsole
# See LICENSE for details
# Cloud At Cost Console
import os
import time
from twisted.internet import stdio, reactor
from twisted.protocols import basic
from twisted.python import log
import sqlite3 as lite
import cacpy as CACPy
class CloudAtCostConsole(basic.LineReceiver):
from os import linesep as delimiter
def __init__(self, dbfile):
self.dbfile = dbfile
self.db = lite.connect(self.dbfile)
self.cursor = self.db.cursor()
self.using = []
self.cac = None
self.cursor.execute('CREATE TABLE IF NOT EXISTS accounts ( account VARCHAR(200) NOT NULL, apikey VARCHAR(200) NOT NULL );')
def connectionMade(self):
self.do_banner()
self.sendLine('For help type \'help\'.')
self.transport.write('CaC>')
def lineReceived(self, line):
if not line:
self.transport.write('CaC>')
return
# Parse command
commandParts = line.split()
command = commandParts[0].lower()
args = commandParts[1:]
try:
method = getattr(self, 'do_' + command)
except AttributeError, e:
self.sendLine('Error: no such command.')
else:
try:
method(*args)
except Exception, e:
self.sendLine('Error: ' + str(e))
self.sendLine('Make sure you have the latest CACPY, run: sudo pip install --upgrade cacpy')
self.transport.write('CaC>')
def do_help(self, command=None):
"""help [command]: List commands, or show help on the given command"""
if command:
self.sendLine(getattr(self, 'do_' + command).__doc__)
else:
commands = [cmd[3:] for cmd in dir(self) if cmd.startswith('do_')]
self.sendLine("Valid commands:\n\t" +"\n\t".join(commands))
self.sendLine('Type "help [command]" for more info.')
### utilities ####################
def do_ping(self, serverid):
"""ping: Ping a server. Usage: ping [<serverid>|all] """
if not self.using:
self.sendLine('No account selected! Type: help use')
return
servers = self.cac.get_server_info()
for i in range(0, len(servers)):
server_data = servers[i]
sid = server_data['sid'].encode('UTF-8')
ip = server_data['ip'].encode('UTF-8')
if 'all' == serverid:
response = os.system('ping -c 3 ' + ip)
elif serverid == sid:
response = os.system('ping -c 3 ' + ip)
def do_usage(self):
"""usage: Show server(s) utilization"""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
servers = self.cac.get_server_info()
if 'error' == servers['status']:
self.sendLine('Error: ' + servers['error_description'].encode('UTF-8'))
return
self.sendLine('---------------------------------------------------------------------------------------------------------------------')
self.sendLine('{0:11} {1:32} {2:15} {3:4} {4:18} {5:18} {6:10}'.format('SID', 'Hostname', 'Label', 'CPUs', 'RAM', 'Storage', 'Status'))
self.sendLine('---------------------------------------------------------------------------------------------------------------------')
for i in range(0, len(servers['data'])):
server_data = servers['data'][i]
try:
sid = server_data['sid'].encode('UTF-8')
if server_data['label'] is not None:
label = server_data['label'].encode('UTF-8')
else:
label = ''
hostname = server_data['hostname'].encode('UTF-8')
template = server_data['template'].encode('UTF-8')
cpu = server_data['cpu'].encode('UTF-8')
cpuusage = server_data['cpuusage'].encode('UTF-8')
ram = server_data['ram'].encode('UTF-8')
ramusage = round(float(server_data['ramusage']) / int(server_data['ram']) * 100, 2)
storage = server_data['storage'].encode('UTF-8')
hdusage = round(float(server_data['hdusage']) / int(server_data['storage']) * 100, 2)
status = server_data['status'].encode('UTF-8')
self.sendLine('{0:11} {1:32} {2:15} {3:4} {4:18} {5:18} {6:10}'.format(sid, hostname, label, cpu, str(ramusage) + '% of ' + ram, str(hdusage) + '% of ' + storage, status))
except Exception as e:
self.sendLine('Error reading host information, perhaps server is re-imaging or powered off?')
def do_bash(self):
"""bash: Drop to bash shell. Type 'exit' to return to CACConsole"""
response = os.system('/bin/bash')
### power ####################
def do_power(self, power='none', serverid='none'):
"""power: Change power state of server(s). Usage: power [on|off|reset] [serverid|all]"""
if 'on' == power:
self._power_on(serverid)
elif 'off' == power:
self._power_off(serverid)
elif 'reset' == power:
self._power_reset(serverid)
else:
self.sendLine('Invalid arguments! Usage:')
self.do_help('power')
def _power_on(self, serverid='none'):
"""_power_on: Power on a server or all servers. Usage: power on [<serverid>|all]"""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
if 'none' == serverid:
self.sendLine('Invalid arguments! Usage:')
self.do_help('power')
elif 'all' == serverid:
self._power_on_all()
else:
power = self.cac.power_on_server(serverid)
status = power['status'].encode('UTF-8')
if 'ok' == status:
action = power['action'].encode('UTF-8')
taskid = power['taskid']
result = power['result'].encode('UTF-8')
log.msg('Server power on. sid ' + serverid)
self.sendLine(action + ': ' + result + '(taskid: ' + str(taskid) + ')')
else:
error_description = power['error_description'].encode('UTF-8')
self.sendLine(status + ': ' + error_description)
def _power_on_all(self):
"""_power_on_all: Power on all servers."""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
servers = self.cac.get_server_info()
for i in range(0, len(servers)):
server_data = servers[i]
serverid = server_data['sid'].encode('UTF-8')
power = self.cac.power_on_server(serverid)
status = power['status'].encode('UTF-8')
if 'ok' == status:
action = power['action'].encode('UTF-8')
taskid = power['taskid']
result = power['result'].encode('UTF-8')
log.msg('Server poweron. sid ' + serverid)
self.sendLine(action + ': ' + result + '(taskid: ' + str(taskid) + ')')
time.sleep(1) # give CaC API a break before continuing
else:
error_description = power['error_description'].encode('UTF-8')
self.sendLine(status + ': ' + error_description)
def _power_off(self, serverid='none'):
"""_power_off: Power off a server or all servers. Usage: power off [<serverid>|all]"""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
if 'all' == serverid:
self._power_off_all()
else:
power = self.cac.power_off_server(serverid)
status = power['status'].encode('UTF-8')
if 'ok' == status:
action = power['action'].encode('UTF-8')
taskid = power['taskid']
result = power['result'].encode('UTF-8')
log.msg('Server poweroff. sid ' + serverid)
self.sendLine(action + ': ' + result + '(taskid: ' + str(taskid) + ')')
else:
error_description = power['error_description'].encode('UTF-8')
self.sendLine(status + ': ' + error_description)
def _power_off_all(self):
"""_poweroff_all: Power off all servers."""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
servers = self.cac.get_server_info()
for i in range(0, len(servers)):
server_data = servers[i]
serverid = server_data['sid'].encode('UTF-8')
power = self.cac.power_off_server(serverid)
status = power['status'].encode('UTF-8')
if 'ok' == status:
action = power['action'].encode('UTF-8')
taskid = power['taskid']
result = power['result'].encode('UTF-8')
log.msg('Server poweroff. sid ' + serverid)
self.sendLine(action + ': ' + result + '(taskid: ' + str(taskid) + ')')
time.sleep(1) # give CaC API a break before continuing
else:
error_description = power['error_description'].encode('UTF-8')
self.sendLine(status + ': ' + error_description)
def _power_reset(self, serverid='none'):
"""reset: Restart a server or all servers. Usage: power reset [<serverid>|all]"""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
if 'all' == serverid:
self._power_reset_all()
else:
power = self.cac.reset_server(serverid)
status = power['status'].encode('UTF-8')
if 'ok' == status:
action = power['action'].encode('UTF-8')
taskid = power['taskid']
result = power['result'].encode('UTF-8')
log.msg('Server reset. sid ' + serverid)
self.sendLine(action + ': ' + result + '(taskid: ' + str(taskid) + ')')
else:
error_description = power['error_description'].encode('UTF-8')
self.sendLine(status + ': ' + error_description)
def _power_reset_all(self):
"""_reset_all: Restart all servers."""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
servers = self.cac.get_server_info()
for i in range(0, len(servers)):
server_data = servers[i]
serverid = server_data['sid'].encode('UTF-8')
power = self.cac.reset_server(serverid)
status = power['status'].encode('UTF-8')
if 'ok' == status:
action = power['action'].encode('UTF-8')
taskid = power['taskid']
result = power['result'].encode('UTF-8')
log.msg('Server reset. sid ' + serverid)
self.sendLine(action + ': ' + result + '(taskid: ' + str(taskid) + ')')
time.sleep(1) # give CaC API a break before continuing
else:
error_description = power['error_description'].encode('UTF-8')
self.sendLine(status + ': ' + error_description)
### List ####################
def do_list(self, list='servers'):
"""list: List information. Usage: list [accounts|servers|tasks|templates|resources]"""
if 'accounts' == list:
self._list_accounts()
elif 'tasks' == list:
self._list_tasks()
elif 'templates' == list:
self._list_templates()
elif 'resources' == list:
self._list_resources()
else:
self._list_servers()
def _list_resources(self):
""" _list_resources: List CloudPRO resources"""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
resources = self.cac.get_resources()
if 'error' == resources['status']:
self.sendLine('Error: ' + resources['error_description'].encode('UTF-8'))
return
total = resources['data']['total']
used = resources['data']['used']
self.sendLine('CPU: ' + str(used['cpu_used']) + ' of ' + str(total['cpu_total']))
self.sendLine('RAM: ' + str(used['ram_used']) + ' of ' + str(total['ram_total']))
self.sendLine('Storage: ' + str(used['storage_used']) + ' of ' + str(total['storage_total']))
def _list_tasks(self):
"""_list_tasks: List all tasks in operation"""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
tasks = self.cac.get_task_info()
if 'error' == tasks['status']:
self.sendLine('Error: ' + tasks['error_description'].encode('UTF-8'))
return
if 0 == len(tasks['data']):
self.sendLine('No current tasks')
for i in range(0, len(tasks['data'])):
task_data = tasks['data'][i]
serverid = str(task_data['serverid']).encode('UTF-8')
action = str(task_data['action']).encode('UTF-8')
status = str(task_data['status']).encode('UTF-8')
self.sendLine(serverid + '\t' + action + '\t' + status)
def _list_templates(self):
"""_list_templates: List all templates available"""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
templates = self.cac.get_template_info()
if 'error' == templates['status']:
self.sendLine('Error: ' + templates['error_description'].encode('UTF-8'))
return
for i in range(0, len(templates['data'])):
template_data = templates['data'][i]
id = template_data['id'].encode('UTF-8')
detail = template_data['detail'].encode('UTF-8')
self.sendLine(id + '\t' + detail)
def _list_servers(self):
"""_list_servers: List all servers on the account"""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
servers = self.cac.get_server_info()
if 'error' == servers['status']:
self.sendLine('Error: ' + servers['error_description'].encode('UTF-8'))
return
self.sendLine('---------------------------------------------------------------------------------------------------------------------')
self.sendLine('{0:11} {1:32} {2:15} {3:15} {4:5} {5:6} {6:7} {7:10}'.format('SID', 'Hostname', 'Label', 'ip', 'CPUs', 'RAM', 'Storage', 'Status'))
self.sendLine('---------------------------------------------------------------------------------------------------------------------')
for i in range(0, len(servers['data'])):
server_data = servers['data'][i]
try:
sid = server_data['sid'].encode('UTF-8')
hostname = server_data['hostname'].encode('UTF-8')
if server_data['label'] is not None:
label = server_data['label'].encode('UTF-8')
else:
label = ''
ip = server_data['ip'].encode('UTF-8')
template = server_data['template'].encode('UTF-8')
cpu = server_data['cpu'].encode('UTF-8')
ram = server_data['ram'].encode('UTF-8')
storage = server_data['storage'].encode('UTF-8')
status = server_data['status'].encode('UTF-8')
self.sendLine('{0:11} {1:32} {2:15} {3:15} {4:5} {5:6} {6:7} {7:10}'.format(sid, hostname, label, ip, cpu, ram, storage, status))
except Exception as e:
self.sendLine('Error reading host information, perhaps server is re-imaging?')
### server management ####################
def do_server(self, cmd='none', param1='none', param2='none', param3='none', param4='none'):
"""server: Server management.
Usage:
server [runmode|rename|reversedns|console|build|delete]
server runmode <serverid> [normal|safe]
server rename <serverid> <name>
server reversedns <serverid> <hostname>
server console
server build <cpu> <ram> <storage> <os>
- run 'list templates' to get the os ID.
server delete <serverid>
"""
if 'runmode' == cmd:
self._server_runmode(param1, param2)
elif 'rename' == cmd:
self._server_rename(param1, param2)
elif 'reversedns' == cmd:
self._server_reversedns(param1, param2)
elif 'console' == cmd:
self._server_console(param1)
elif 'build' == cmd:
self._server_build(param1, param2, param3, param4)
elif 'delete' == cmd:
self._server_delete(param1)
else:
self.do_help('server')
def _server_runmode(self, serverid, new_mode):
"""_server_runmode: Change the run mode for a server. Usage: runmode <serverid> [normal|safe]"""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
mode = self.cac.set_run_mode(serverid, new_mode)
status = mode['status'].encode('UTF-8')
if 'ok' == status:
msg = 'Server with sid ' + str(serverid) + ' changed runmode to ' + new_mode
log.msg(msg)
self.sendLine(msg)
else:
error_description = mode['error_description'].encode('UTF-8')
msg = status + ': ' + error_description + ': ' + serverid
log.msg('Runmode: ' + msg)
self.sendLine(msg)
def _server_rename(self, serverid, name):
"""rename: Change the label for a server. Usage: rename <serverid> <name>"""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
rename = self.cac.rename_server(serverid, name)
status = rename['status'].encode('UTF-8')
if 'ok' == status:
msg = 'Server with sid ' + serverid + ' renamed to ' + name
log.msg(msg)
self.sendLine(msg)
else:
error_description = rename['error_description'].encode('UTF-8')
msg = status + ': ' + error_description + ': ' + serverid
log.msg('Rename server: ' + msg)
self.sendLine(msg)
def _server_reversedns(self, serverid, hostname):
"""_server_reversedns: Modify the reverse DNS & hostname of the VPS. Usage: server reversedns <serverid> <hostname>"""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
rdns = self.cac.change_hostname(serverid, hostname)
status = rdns['status'].encode('UTF-8')
if 'ok' == status:
msg = 'Server with sid ' + serverid + ' modified reverse DNS and hostname to ' + hostname
log.msg(msg)
self.sendLine(msg)
else:
error_description = rdns['error_description'].encode('UTF-8')
msg = status + ': ' + error_description + ': ' + serverid
log.msg('Modify reverse DNS: ' + msg)
self.sendLine(msg)
def _server_console(self, serverid):
if not self.using:
self.sendLine('No account selected! Type: help use')
return
console = self.cac.get_console_url(serverid)
self.sendLine(console.encode('UTF-8'))
def _server_build(self, cpu='none', ram='none', storage='none', os='none'):
"""_server_build: Build a server. Usage: server build <cpu> <ram> <storage> <os>"""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
server = self.cac.server_build(cpu, ram, storage, os)
status = server['status'].encode('UTF-8')
if 'ok' == status:
taskid = server['taskid']
msg = 'Server created! Task ID (' + str(taskid) + ')'
log.msg(msg)
self.sendLine(msg)
else:
error_description = server['error_description'].encode('UTF-8')
msg = status + ': ' + error_description
log.msg('Server build: ' + msg)
self.sendLine(msg)
self.do_help('server')
def _server_delete(self, serverid):
"""_server_delete: Delete a server and free resources. Usage: server delete <serverid>"""
if not self.using:
self.sendLine('No account selected! Type: help use')
return
delete = self.cac.server_delete(serverid)
status = delete['status'].encode('UTF-8')
if 'ok' == status:
msg = 'Server with sid ' + str(serverid) + ' deleted! '
log.msg(msg)
self.sendLine(msg)
else:
error_description = delete['error_description'].encode('UTF-8')
msg = status + ': ' + error_description + ': ' + serverid
log.msg('Server delete: ' + msg)
self.sendLine(msg)
### account ####################
def do_whoami(self):
"""whoami: Display current account being used for queries"""
if self.using:
self.sendLine('You are currently ' + self.using[0].encode('UTF-8'))
else:
self.sendLine('You are currently nobody. Cheer up, you\'ll be somebody someday.')
def do_use(self, email):
"""use: Select an account to use for API calls. Usage: use <account>"""
params = [email]
self.cursor.execute("SELECT account, apikey FROM accounts WHERE account=?", params)
rows = self.cursor.fetchall()
if 0 == len(rows):
self.sendLine('No account found for ' + email)
else:
for row in rows:
self.using = [row[0], row[1]]
self.cac = CACPy.CACPy(self.using[0], self.using[1])
log.msg('Changed account ' + email)
self.sendLine('Now using ' + email)
def do_account(self, cmd='none', email='none', apikey='none'):
"""account: Perform account management functions. Usage: account [add|delete]"""
if 'add' == cmd:
self._account_add(email, apikey)
elif 'delete' == cmd:
self._account_delete(email)
else:
self.do_help('account')
def _account_delete(self, email):
"""del_account: delete an account. Example: del_account example@example.com"""
# todo: check if account exist first
params = [email]
self.cursor.execute("DELETE FROM accounts WHERE account=?", params)
self.db.commit()
if self.using:
if email == self.using[0]:
self.using = []
log.msg('Deleted account for ' + email)
self.sendLine('Deleted! I hope you were sure because this cannot be undone.')
def _account_add(self, email, apikey):
"""add_account: """
self.sendLine('Adding entry for ' + email + ' with apikey ' + apikey)
params = [email, apikey]
self.cursor.execute("INSERT INTO accounts VALUES (?, ?)", params)
self.db.commit()
log.msg('Added account for ' + email)
self.sendLine('Done!')
def _list_accounts(self):
"""list_accounts: List all configured services"""
self.cursor.execute("SELECT account FROM accounts;")
rows = self.cursor.fetchall()
i = 0
for row in rows:
i = i + 1
self.sendLine(str(i) + '.\t' + row[0].encode('utf-8'))
def do_banner(self):
"""banner: Display CloudAtCostConsole banner"""
banner = 'ICAgX19fX18gXyAgICAgICAgICAgICAgICAgXyAgICAgICAgIF8gICAgICBfX19fXyAgICAgICAgICBfICAgCiAgLyBfX19ffCB8ICAgICAgICAgICAgICAgfCB8ICAgICAgIHwgfCAgICAvIF9fX198ICAgICAgICB8IHwgIAogfCB8ICAgIHwgfCBfX18gIF8gICBfICBfX3wgfCAgIF9fIF98IHxfICB8IHwgICAgIF9fXyAgX19ffCB8XyAKIHwgfCAgICB8IHwvIF8gXHwgfCB8IHwvIF9gIHwgIC8gX2AgfCBfX3wgfCB8ICAgIC8gXyBcLyBfX3wgX198CiB8IHxfX19ffCB8IChfKSB8IHxffCB8IChffCB8IHwgKF98IHwgfF8gIHwgfF9fX3wgKF8pIFxfXyBcIHxfIAogIFxfX19fX3xffFxfX18vIFxfXyxffFxfXyxffCAgXF9fLF98XF9ffCAgXF9fX19fXF9fXy98X19fL1xfX3wKICAgX19fX18gICAgX19fXyAgICBfICAgXyAgICBfX19fXyAgICBfX19fICAgIF8gICAgICAgIF9fX19fXyAgCiAgLyBfX19ffCAgLyBfXyBcICB8IFwgfCB8ICAvIF9fX198ICAvIF9fIFwgIHwgfCAgICAgIHwgIF9fX198IAogfCB8ICAgICAgfCB8ICB8IHwgfCAgXHwgfCB8IChfX18gICB8IHwgIHwgfCB8IHwgICAgICB8IHxfXyAgICAKIHwgfCAgICAgIHwgfCAgfCB8IHwgLiBgIHwgIFxfX18gXCAgfCB8ICB8IHwgfCB8ICAgICAgfCAgX198ICAgCiB8IHxfX19fICB8IHxfX3wgfCB8IHxcICB8ICBfX19fKSB8IHwgfF9ffCB8IHwgfF9fX18gIHwgfF9fX18gIAogIFxfX19fX3wgIFxfX19fLyAgfF98IFxffCB8X19fX18vICAgXF9fX18vICB8X19fX19ffCB8X19fX19ffCAKICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgCiAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgIA=='
self.sendLine(banner.decode("base64"))
self.sendLine('CloudAtCostConsole Copytright (c) 2015 foospidy')
self.sendLine('Don\'t forget to whitelist your IP address in your CaC panel settings.\n')
def do_quit(self):
"""quit: Quit CloudAtCostConsole"""
log.msg('Quiters gonna quit!')
self.sendLine('Goodbye.')
self.transport.loseConnection()
def connectionLost(self, reason):
# stop the reactor
log.msg('Connection lost! Shutting down reactor.')
reactor.stop()
|
dahlstrom-g/intellij-community
|
refs/heads/master
|
python/testData/refactoring/move/usageFromFunction/before/src/a.py
|
83
|
def f():
pass
def use_f():
f()
|
gkoelln/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/fczenit.py
|
30
|
# coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
)
class FczenitIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?fc-zenit\.ru/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://fc-zenit.ru/video/41044/',
'md5': '0e3fab421b455e970fa1aa3891e57df0',
'info_dict': {
'id': '41044',
'ext': 'mp4',
'title': 'Так пишется история: казанский разгром ЦСКА на «Зенит-ТВ»',
'timestamp': 1462283735,
'upload_date': '20160503',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
msi_id = self._search_regex(
r"(?s)config\s*=\s*{.+?video_id\s*:\s*'([^']+)'", webpage, 'msi id')
msi_data = self._download_json(
'http://player.fc-zenit.ru/msi/video', msi_id, query={
'video': msi_id,
})['data']
title = msi_data['name']
formats = [{
'format_id': q.get('label'),
'url': q['url'],
'height': int_or_none(q.get('label')),
} for q in msi_data['qualities'] if q.get('url')]
self._sort_formats(formats)
tags = [tag['label'] for tag in msi_data.get('tags', []) if tag.get('label')]
return {
'id': video_id,
'title': title,
'thumbnail': msi_data.get('preview'),
'formats': formats,
'duration': float_or_none(msi_data.get('duration')),
'timestamp': int_or_none(msi_data.get('date')),
'tags': tags,
}
|
BT-jmichaud/e-commerce
|
refs/heads/8.0
|
sale_payment_method/__openerp__.py
|
15
|
# -*- coding: utf-8 -*-
##############################################################################
#
# sale_payment_method for OpenERP
# Copyright (C) 2011 Akretion Sébastien BEAU <sebastien.beau@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Sale Payment Method',
'version': '0.2.1',
'category': 'Generic Modules/Others',
'license': 'AGPL-3',
'author': "Akretion,Odoo Community Association (OCA)",
'website': 'http://www.akretion.com/',
'depends': ['sale',
],
'data': ['sale_view.xml',
'payment_method_view.xml',
'security/ir.model.access.csv',
'security/rules.xml',
],
'demo': [],
'installable': True,
}
|
sbg2133/miscellaneous_projects
|
refs/heads/master
|
lic/streamlines_scratch.py
|
1
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.ndimage as ndimage
from magnetic_dipole import dipole
plt.ion()
fig, ax = plt.subplots(figsize = (8, 5))
def pix_idx(P, ys, xs):
"""Returns grid indices corresponding to point P"""
x_idx = (np.abs(xs[0] - P[0])).argmin()
y_idx = (np.abs(ys[:,0] - P[1])).argmin()
return x_idx, y_idx
def get_vector(P, vectors, ys, xs):
"""Returns the vector located at pixel coordinate P"""
x_idx, y_idx = pix_idx(P, ys, xs)
vx, vy = vectors[0][y_idx, x_idx], vectors[1][y_idx, x_idx]
angle = np.degrees(np.arctan2(vy, vx))
return vx, vy, angle
def plot_pot():
"""plots the scalar field"""
ax = plt.gca()
im = ax.imshow(pot, origin = "lower", extent = [xs[0].min(), xs[0].max(),\
ys[:,0].min(), ys[:,0].max()])
return
def plot_vectors(vectors, ys, xs, nskip = 1, pot = False):
"""Creates an arrow plot of the vector field"""
skip = (slice(None, None, nskip), slice(None, None, nskip))
ax = plt.gca()
if pot:
plot_pot()
mag = np.sqrt(vectors[0]**2 + vectors[1]**2)
ax.quiver(xs[skip], ys[skip], (vectors[0]/mag)[skip], (vectors[1]/mag)[skip],\
angles = 'xy', units = 'xy', scale_units = 'xy', headwidth = 2,\
headaxislength = 2, headlength = 2, scale = 1)
#ax.set(xticks = X, yticks = Y, aspect=1, title='Scratch', xlabel = 'x', ylabel = 'y')
return
def new_P(idx, start, temp_pos, ys, xs, back = False):
"""Uses Euler's method to advect the streamline to the next position.
@param idx: integers corresponding to start pixel position
@param start: the starting coordinates (center of streamline)
@param temp_pos: buffer for new advected position
returns: temp_pos
seg: distance to the previous temp_pos"""
if (idx == 1):
vx, vy = get_vector(start, vectors, ys, xs)[:2]
else:
vx, vy, angle = get_vector(temp_pos[idx - 1], vectors, ys, xs)
if (np.isnan(vx) or np.isnan(vy)):
pass
#else:
#vx2, vy2, angle2 = get_vector(temp_pos[idx - 2], vectors, ys, xs)
#if (np.abs(angle2 - angle) >= 155.):
# return temp_pos, None
if back:
vx *= -1.
vy *= -1.
Px = np.int(temp_pos[idx - 1][0])
Py = np.int(temp_pos[idx - 1][1])
mag = np.sqrt(vx**2 + vy**2)
s_top = ((Py + 1) - temp_pos[idx - 1][1])*(mag/vy)
s_bot = (Py - temp_pos[idx - 1][0])*(mag/vy)
s_right = ((Px + 1) - temp_pos[idx - 1][0])*(mag/vx)
s_left = (Px - temp_pos[idx - 1][1])*(mag/vx)
slist = np.array([s_top, s_bot, s_left, s_right])
for s in slist:
if (np.isnan(s)):
return temp_pos, None
if (slist < 0).all():
s = np.min(np.abs(slist))
else:
s = np.min(slist[slist >= 0.])
# add small amount to s to ensure that P is new pixel
s += 0.08
new_Px = temp_pos[idx - 1][0] + ((vx/mag)*s)
new_Py = temp_pos[idx - 1][1] + ((vy/mag)*s)
if (np.abs(new_Px - temp_pos[idx - 1][0]) > 2.):
return temp_pos, None
if (np.abs(new_Py - temp_pos[idx - 1][1]) > 2.):
return temp_pos, None
#if (new_Px > xs[0].max() or new_Px < xs[0].min()):
# return temp_pos, None
#if (new_Py > ys[:,0].max() or new_Py < ys[:,0].min()):
# return temp_pos, None
temp_pos.append((new_Px, new_Py))
return temp_pos, s
def sl(start, vectors, ys, xs, plot = False):
"""Calculates a streamline centered on start"""
# forward advection
forward_pos = []
forward_seg = []
forward_pos.append(start)
for i in range(1, Psteps):
forward_pos, seg = new_P(i, start, forward_pos, ys, xs)
if seg is not None:
forward_seg.append(seg)
else:
break
# backward advection
back_pos = []
back_seg = []
back_pos.append(start)
for i in range(1, Psteps):
back_pos, seg = new_P(i, start, back_pos, ys, xs, back = True)
if seg is not None:
back_seg.append(seg)
else:
break
streamline = list(reversed(forward_pos[1:]))
streamline.extend(back_pos)
# clean streamline of NaNs
remove_idx = []
count = 0
for P in streamline:
__, __, angle = get_vector(P, vectors, ys, xs)
if (np.isnan(angle)):
remove_idx.append(count)
count += 1
if remove_idx:
for idx in sorted(remove_idx, reverse=True):
del streamline[idx]
#temp = clean_streamline(temp)
streamline = np.array(streamline)
if (plot):
ax = plt.gca()
for P in streamline:
dx, dy, __ = get_vector(P, vectors, ys, xs)
mag = np.sqrt(dx**2 + dy**2)
ax.quiver(P[0], P[1], (dx/mag), (dy/mag), angles = 'xy',\
units = 'xy', scale_units = 'xy', headwidth = 2, headaxislength = 2,\
headlength = 2, scale = 1)
if (plot):
ax.scatter(streamline[:,0], streamline[:,1])
ax.plot(streamline[:,0], streamline[:,1])
return forward_seg, forward_pos, back_seg, back_pos, streamline
def plot_streams(vectors, xs, ys, nskip = 1, vec = False, pot = False):
"""plots all streamlines. Launches a streamline from every
grid point, modulo nskip.
@param nskip: skip every nskip pixel
@param vec: show arrow vectors
@param pot: show potentials"""
if vec:
plot_vectors(vectors, ys, xs, nskip)
if pot:
plot_pot()
ax = plt.gca()
for i in xs[0][::nskip]:
for j in ys[:,0][::nskip]:
__, __, __, __, streamline = sl([i, j], vectors, ys, xs)
if not streamline.size:
continue
#if len(s.streamline[:,0]) < 5:
# continue
ax.plot(streamline[:,0], streamline[:,1])
#ax.set(xticks = X, yticks = Y, aspect=1, title='Scratch', xlabel = 'x', ylabel = 'y')
plt.tight_layout()
return
def kern(k, s, L, hanning = False):
"""the convolution kernel"""
# boxcar filter
if not hanning:
return k + s
else:
return k + (np.cos((s * np.pi) / L) + 1.)/2.
def partial_integral(forward_seg, forward_pos, back_seg, back_pos,\
streamline, texture, ys, xs, hanning = False, back = False):
"""computes the line integral convolution in the forward
or backward direction along a streamline.
returns: Fsum - the LIC in one direction, for a single streamline
hsum - a normalization factor"""
if back:
segs = back_seg
else:
segs = forward_seg
np.insert(segs, 0, 0.)
L = len(segs)
klen = 31
Fsum = 0.
hsum = 0.
s = 0.
k0, k1 = 0., 0.
for l in range(L):
for i in range(1, len(segs) - 2):
s += segs[i - 1]
s_plus = s + segs[i + 1]
if not hanning:
k1 += s_plus
k0 += s
else:
k1 += (np.cos((s_plus * np.pi) / klen) + 1.)/2.
k0 += (np.cos((s * np.pi) / klen) + 1.)/2.
h = k1 - k0
hsum += h
if back:
tex_val = texture[pix_idx(back_pos[l], ys, xs)]
else:
tex_val = texture[pix_idx(forward_pos[l], ys, xs)]
Fsum += tex_val * h
return Fsum, hsum
def lic(forward_seg, forward_pos, back_seg, back_pos, streamline, texture, ys, xs):
"""performs a line integral convolution for a single streamline."""
# compute forward integral
# compute forward integral
F_forward, h_forward = partial_integral(forward_seg, forward_pos,\
back_seg, back_pos, streamline, texture, ys, xs, hanning = True)
F_back, h_back = partial_integral(forward_seg, forward_pos,\
back_seg, back_pos, streamline, texture, ys, xs, back = True, hanning = True)
print F_forward, h_forward
raw_input()
if ((h_forward + h_back) == 0):
temp_lic = 0.
if ((F_forward + F_back) == 0):
temp_lic = 0.
else:
temp_lic = (F_forward + F_back) / (h_forward + h_back)
return temp_lic
def plot_lic(shape, vectors, texture):
"""plots the LIC"""
xs, ys = np.meshgrid(X,Y)
image = np.zeros((shape[0], shape[1]))
ax = plt.gca()
lics = []
idx = 0
for i in xs[0]:
for j in ys[:,0]:
start = [i,j]
forward_seg, forward_pos, back_seg, back_pos,\
streamline = sl(start, vectors, ys, xs)
if (start == [0,0]):
print forward_pos
print
print forward_seg
print
print dx[0][0], dy[0][0]
temp_lic = lic(forward_seg, forward_pos, back_seg, back_pos,\
streamline, texture, ys, xs)
lics.append(temp_lic)
print temp_lic
raw_input()
if ((idx > 0) and temp_lic == 0.):
temp_lic = lics[idx - 1]
image[pix_idx(start, ys, xs)] = temp_lic
idx += 1
im = ax.imshow(image, origin="lower", cmap = "gist_heat")
plt.autoscale(False)
#ax.set(xticks = X, yticks = Y, aspect=1, title='Scratch', xlabel = 'x', ylabel = 'y')
plt.tight_layout()
return image
xsize, ysize = 800, 800
xmax, ymax = 200, 200
X = np.linspace(0, xmax, xsize)
Y = np.linspace(0, ymax, ysize)
x, y = np.meshgrid(X,Y)
Psteps = 20
"""
### point masses ###
pot = np.zeros((2*xsize, 2*ysize))
mass = [2**10, 2**10]
pos = [(15.4,15.2), (36.8,39.1)]
for i in range(len(pos)):
r = np.sqrt((x - pos[i][0])**2 + (y - pos[i][1])**2)
pot += mass[i] / r
pot[~np.isfinite(pot)] = 0.0
interp = 4
if (interp > 1):
pot = ndimage.zoom(pot, interp, order = 1)
y, x = np.mgrid[:pot.shape[0], :pot.shape[1]]
dy, dx = np.gradient(pot, np.diff(y[:2,0])[0], np.diff(x[0,:2])[0])
"""
### magnetic dipole ###
dx, dy = dipole(m=[5., 5.], r=np.meshgrid(X,Y), r0=[xmax/2. + 0.1, ymax/2. + 0.3]).astype('float32')
vectors = np.array([dx,dy])
white = np.random.rand(xsize, ysize)
with file('texture.dat', 'w') as outfile:
for row in white:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
with file('dx.dat', 'w') as outfile:
for row in dx:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
with file('dy.dat', 'w') as outfile:
for row in dy:
np.savetxt(outfile, row, newline = " ")
outfile.write('\n')
#plot_streams(vectors, x, y, nskip = 10, vec = True)
image = plot_lic([xsize, ysize], vectors, white)
|
Matt-Deacalion/django
|
refs/heads/master
|
tests/invalid_models_tests/test_backend_specific.py
|
191
|
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.core.checks import Error
from django.db import connections, models
from django.test import mock
from .base import IsolatedModelsTestCase
def dummy_allow_migrate(db, app_label, **hints):
# Prevent checks from being run on the 'other' database, which doesn't have
# its check_field() method mocked in the test.
return db == 'default'
class BackendSpecificChecksTests(IsolatedModelsTestCase):
@mock.patch('django.db.models.fields.router.allow_migrate', new=dummy_allow_migrate)
def test_check_field(self):
""" Test if backend specific checks are performed. """
error = Error('an error', hint=None)
class Model(models.Model):
field = models.IntegerField()
field = Model._meta.get_field('field')
with mock.patch.object(connections['default'].validation, 'check_field', return_value=[error]):
errors = field.check()
self.assertEqual(errors, [error])
|
lyceel/engine
|
refs/heads/master
|
build/win/importlibs/create_importlib_win.py
|
185
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
"""Creates an import library from an import description file."""
import ast
import logging
import optparse
import os
import os.path
import shutil
import subprocess
import sys
import tempfile
_USAGE = """\
Usage: %prog [options] [imports-file]
Creates an import library from imports-file.
Note: this script uses the microsoft assembler (ml.exe) and the library tool
(lib.exe), both of which must be in path.
"""
_ASM_STUB_HEADER = """\
; This file is autogenerated by create_importlib_win.py, do not edit.
.386
.MODEL FLAT, C
.CODE
; Stubs to provide mangled names to lib.exe for the
; correct generation of import libs.
"""
_DEF_STUB_HEADER = """\
; This file is autogenerated by create_importlib_win.py, do not edit.
; Export declarations for generating import libs.
"""
_LOGGER = logging.getLogger()
class _Error(Exception):
pass
class _ImportLibraryGenerator(object):
def __init__(self, temp_dir):
self._temp_dir = temp_dir
def _Shell(self, cmd, **kw):
ret = subprocess.call(cmd, **kw)
_LOGGER.info('Running "%s" returned %d.', cmd, ret)
if ret != 0:
raise _Error('Command "%s" returned %d.' % (cmd, ret))
def _ReadImportsFile(self, imports_file):
# Slurp the imports file.
return ast.literal_eval(open(imports_file).read())
def _WriteStubsFile(self, import_names, output_file):
output_file.write(_ASM_STUB_HEADER)
for name in import_names:
output_file.write('%s PROC\n' % name)
output_file.write('%s ENDP\n' % name)
output_file.write('END\n')
def _WriteDefFile(self, dll_name, import_names, output_file):
output_file.write(_DEF_STUB_HEADER)
output_file.write('NAME %s\n' % dll_name)
output_file.write('EXPORTS\n')
for name in import_names:
name = name.split('@')[0]
output_file.write(' %s\n' % name)
def _CreateObj(self, dll_name, imports):
"""Writes an assembly file containing empty declarations.
For each imported function of the form:
AddClipboardFormatListener@4 PROC
AddClipboardFormatListener@4 ENDP
The resulting object file is then supplied to lib.exe with a .def file
declaring the corresponding non-adorned exports as they appear on the
exporting DLL, e.g.
EXPORTS
AddClipboardFormatListener
In combination, the .def file and the .obj file cause lib.exe to generate
an x86 import lib with public symbols named like
"__imp__AddClipboardFormatListener@4", binding to exports named like
"AddClipboardFormatListener".
All of this is perpetrated in a temporary directory, as the intermediate
artifacts are quick and easy to produce, and of no interest to anyone
after the fact."""
# Create an .asm file to provide stdcall-like stub names to lib.exe.
asm_name = dll_name + '.asm'
_LOGGER.info('Writing asm file "%s".', asm_name)
with open(os.path.join(self._temp_dir, asm_name), 'wb') as stubs_file:
self._WriteStubsFile(imports, stubs_file)
# Invoke on the assembler to compile it to .obj.
obj_name = dll_name + '.obj'
cmdline = ['ml.exe', '/nologo', '/c', asm_name, '/Fo', obj_name]
self._Shell(cmdline, cwd=self._temp_dir, stdout=open(os.devnull))
return obj_name
def _CreateImportLib(self, dll_name, imports, architecture, output_file):
"""Creates an import lib binding imports to dll_name for architecture.
On success, writes the import library to output file.
"""
obj_file = None
# For x86 architecture we have to provide an object file for correct
# name mangling between the import stubs and the exported functions.
if architecture == 'x86':
obj_file = self._CreateObj(dll_name, imports)
# Create the corresponding .def file. This file has the non stdcall-adorned
# names, as exported by the destination DLL.
def_name = dll_name + '.def'
_LOGGER.info('Writing def file "%s".', def_name)
with open(os.path.join(self._temp_dir, def_name), 'wb') as def_file:
self._WriteDefFile(dll_name, imports, def_file)
# Invoke on lib.exe to create the import library.
# We generate everything into the temporary directory, as the .exp export
# files will be generated at the same path as the import library, and we
# don't want those files potentially gunking the works.
dll_base_name, ext = os.path.splitext(dll_name)
lib_name = dll_base_name + '.lib'
cmdline = ['lib.exe',
'/machine:%s' % architecture,
'/def:%s' % def_name,
'/out:%s' % lib_name]
if obj_file:
cmdline.append(obj_file)
self._Shell(cmdline, cwd=self._temp_dir, stdout=open(os.devnull))
# Copy the .lib file to the output directory.
shutil.copyfile(os.path.join(self._temp_dir, lib_name), output_file)
_LOGGER.info('Created "%s".', output_file)
def CreateImportLib(self, imports_file, output_file):
# Read the imports file.
imports = self._ReadImportsFile(imports_file)
# Creates the requested import library in the output directory.
self._CreateImportLib(imports['dll_name'],
imports['imports'],
imports.get('architecture', 'x86'),
output_file)
def main():
parser = optparse.OptionParser(usage=_USAGE)
parser.add_option('-o', '--output-file',
help='Specifies the output file path.')
parser.add_option('-k', '--keep-temp-dir',
action='store_true',
help='Keep the temporary directory.')
parser.add_option('-v', '--verbose',
action='store_true',
help='Verbose logging.')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('You must provide an imports file.')
if not options.output_file:
parser.error('You must provide an output file.')
options.output_file = os.path.abspath(options.output_file)
if options.verbose:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARN)
temp_dir = tempfile.mkdtemp()
_LOGGER.info('Created temporary directory "%s."', temp_dir)
try:
# Create a generator and create the import lib.
generator = _ImportLibraryGenerator(temp_dir)
ret = generator.CreateImportLib(args[0], options.output_file)
except Exception, e:
_LOGGER.exception('Failed to create import lib.')
ret = 1
finally:
if not options.keep_temp_dir:
shutil.rmtree(temp_dir)
_LOGGER.info('Deleted temporary directory "%s."', temp_dir)
return ret
if __name__ == '__main__':
sys.exit(main())
|
boomsbloom/dtm-fmri
|
refs/heads/master
|
DTM/for_gensim/lib/python2.7/site-packages/numpy/lib/info.py
|
51
|
"""
Basic functions used by several sub-packages and
useful to have in the main name-space.
Type Handling
-------------
================ ===================
iscomplexobj Test for complex object, scalar result
isrealobj Test for real object, scalar result
iscomplex Test for complex elements, array result
isreal Test for real elements, array result
imag Imaginary part
real Real part
real_if_close Turns complex number with tiny imaginary part to real
isneginf Tests for negative infinity, array result
isposinf Tests for positive infinity, array result
isnan Tests for nans, array result
isinf Tests for infinity, array result
isfinite Tests for finite numbers, array result
isscalar True if argument is a scalar
nan_to_num Replaces NaN's with 0 and infinities with large numbers
cast Dictionary of functions to force cast to each type
common_type Determine the minimum common type code for a group
of arrays
mintypecode Return minimal allowed common typecode.
================ ===================
Index Tricks
------------
================ ===================
mgrid Method which allows easy construction of N-d
'mesh-grids'
``r_`` Append and construct arrays: turns slice objects into
ranges and concatenates them, for 2d arrays appends rows.
index_exp Konrad Hinsen's index_expression class instance which
can be useful for building complicated slicing syntax.
================ ===================
Useful Functions
----------------
================ ===================
select Extension of where to multiple conditions and choices
extract Extract 1d array from flattened array according to mask
insert Insert 1d array of values into Nd array according to mask
linspace Evenly spaced samples in linear space
logspace Evenly spaced samples in logarithmic space
fix Round x to nearest integer towards zero
mod Modulo mod(x,y) = x % y except keeps sign of y
amax Array maximum along axis
amin Array minimum along axis
ptp Array max-min along axis
cumsum Cumulative sum along axis
prod Product of elements along axis
cumprod Cumluative product along axis
diff Discrete differences along axis
angle Returns angle of complex argument
unwrap Unwrap phase along given axis (1-d algorithm)
sort_complex Sort a complex-array (based on real, then imaginary)
trim_zeros Trim the leading and trailing zeros from 1D array.
vectorize A class that wraps a Python function taking scalar
arguments into a generalized function which can handle
arrays of arguments using the broadcast rules of
numerix Python.
================ ===================
Shape Manipulation
------------------
================ ===================
squeeze Return a with length-one dimensions removed.
atleast_1d Force arrays to be >= 1D
atleast_2d Force arrays to be >= 2D
atleast_3d Force arrays to be >= 3D
vstack Stack arrays vertically (row on row)
hstack Stack arrays horizontally (column on column)
column_stack Stack 1D arrays as columns into 2D array
dstack Stack arrays depthwise (along third dimension)
stack Stack arrays along a new axis
split Divide array into a list of sub-arrays
hsplit Split into columns
vsplit Split into rows
dsplit Split along third dimension
================ ===================
Matrix (2D Array) Manipulations
-------------------------------
================ ===================
fliplr 2D array with columns flipped
flipud 2D array with rows flipped
rot90 Rotate a 2D array a multiple of 90 degrees
eye Return a 2D array with ones down a given diagonal
diag Construct a 2D array from a vector, or return a given
diagonal from a 2D array.
mat Construct a Matrix
bmat Build a Matrix from blocks
================ ===================
Polynomials
-----------
================ ===================
poly1d A one-dimensional polynomial class
poly Return polynomial coefficients from roots
roots Find roots of polynomial given coefficients
polyint Integrate polynomial
polyder Differentiate polynomial
polyadd Add polynomials
polysub Subtract polynomials
polymul Multiply polynomials
polydiv Divide polynomials
polyval Evaluate polynomial at given argument
================ ===================
Iterators
---------
================ ===================
Arrayterator A buffered iterator for big arrays.
================ ===================
Import Tricks
-------------
================ ===================
ppimport Postpone module import until trying to use it
ppimport_attr Postpone module import until trying to use its attribute
ppresolve Import postponed module and return it.
================ ===================
Machine Arithmetics
-------------------
================ ===================
machar_single Single precision floating point arithmetic parameters
machar_double Double precision floating point arithmetic parameters
================ ===================
Threading Tricks
----------------
================ ===================
ParallelExec Execute commands in parallel thread.
================ ===================
Array Set Operations
-----------------------
Set operations for numeric arrays based on sort() function.
================ ===================
unique Unique elements of an array.
isin Test whether each element of an ND array is present
anywhere within a second array.
ediff1d Array difference (auxiliary function).
intersect1d Intersection of 1D arrays with unique elements.
setxor1d Set exclusive-or of 1D arrays with unique elements.
in1d Test whether elements in a 1D array are also present in
another array.
union1d Union of 1D arrays with unique elements.
setdiff1d Set difference of 1D arrays with unique elements.
================ ===================
"""
from __future__ import division, absolute_import, print_function
depends = ['core', 'testing']
global_symbols = ['*']
|
hkawasaki/kawasaki-aio8-0
|
refs/heads/gacco2/master
|
i18n/extract.py
|
12
|
#!/usr/bin/env python
"""
See https://edx-wiki.atlassian.net/wiki/display/ENG/PO+File+workflow
This task extracts all English strings from all source code
and produces three human-readable files:
conf/locale/en/LC_MESSAGES/django-partial.po
conf/locale/en/LC_MESSAGES/djangojs-partial.po
conf/locale/en/LC_MESSAGES/mako.po
This task will clobber any existing django.po file.
This is because django-admin.py makemessages hardcodes this filename
and it cannot be overridden.
"""
from datetime import datetime
import importlib
import os
import os.path
import logging
import sys
import argparse
from path import path
from polib import pofile
from i18n.config import BASE_DIR, LOCALE_DIR, CONFIGURATION
from i18n.execute import execute, remove_file
from i18n.segment import segment_pofiles
EDX_MARKER = "edX translation file"
LOG = logging.getLogger(__name__)
DEVNULL = open(os.devnull, 'wb')
def base(path1, *paths):
"""Return a relative path from BASE_DIR to path1 / paths[0] / ... """
return BASE_DIR.relpathto(path1.joinpath(*paths))
def main(verbosity=1):
"""
Main entry point of script
"""
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOCALE_DIR.parent.makedirs_p()
source_msgs_dir = CONFIGURATION.source_messages_dir
remove_file(source_msgs_dir.joinpath('django.po'))
# Extract strings from mako templates.
verbosity_map = {
0: "-q",
1: "",
2: "-v",
}
babel_verbosity = verbosity_map.get(verbosity, "")
babel_mako_cmd = 'pybabel {verbosity} extract -F {config} -c "Translators:" . -o {output}'
babel_mako_cmd = babel_mako_cmd.format(
verbosity=babel_verbosity,
config=base(LOCALE_DIR, 'babel_mako.cfg'),
output=base(CONFIGURATION.source_messages_dir, 'mako.po'),
)
if verbosity:
stderr = None
else:
stderr = DEVNULL
execute(babel_mako_cmd, working_directory=BASE_DIR, stderr=stderr)
makemessages = "django-admin.py makemessages -l en -v{}".format(verbosity)
ignores = " ".join('--ignore="{}/*"'.format(d) for d in CONFIGURATION.ignore_dirs)
if ignores:
makemessages += " " + ignores
# Extract strings from django source files, including .py files.
make_django_cmd = makemessages + ' --extension html'
execute(make_django_cmd, working_directory=BASE_DIR, stderr=stderr)
# Extract strings from Javascript source files.
make_djangojs_cmd = makemessages + ' -d djangojs --extension js'
execute(make_djangojs_cmd, working_directory=BASE_DIR, stderr=stderr)
# makemessages creates 'django.po'. This filename is hardcoded.
# Rename it to django-partial.po to enable merging into django.po later.
os.rename(
source_msgs_dir.joinpath('django.po'),
source_msgs_dir.joinpath('django-partial.po')
)
# makemessages creates 'djangojs.po'. This filename is hardcoded.
# Rename it to djangojs-partial.po to enable merging into djangojs.po later.
os.rename(
source_msgs_dir.joinpath('djangojs.po'),
source_msgs_dir.joinpath('djangojs-partial.po')
)
files_to_clean = set()
# Extract strings from third-party applications.
for app_name in CONFIGURATION.third_party:
# Import the app to find out where it is. Then use pybabel to extract
# from that directory.
app_module = importlib.import_module(app_name)
app_dir = path(app_module.__file__).dirname().dirname()
output_file = source_msgs_dir / (app_name + ".po")
files_to_clean.add(output_file)
babel_cmd = 'pybabel {verbosity} extract -F {config} -c "Translators:" {app} -o {output}'
babel_cmd = babel_cmd.format(
verbosity=babel_verbosity,
config=LOCALE_DIR / 'babel_third_party.cfg',
app=app_name,
output=output_file,
)
execute(babel_cmd, working_directory=app_dir, stderr=stderr)
# Segment the generated files.
segmented_files = segment_pofiles("en")
files_to_clean.update(segmented_files)
# Finish each file.
for filename in files_to_clean:
LOG.info('Cleaning %s' % filename)
po = pofile(source_msgs_dir.joinpath(filename))
# replace default headers with edX headers
fix_header(po)
# replace default metadata with edX metadata
fix_metadata(po)
# remove key strings which belong in messages.po
strip_key_strings(po)
po.save()
def fix_header(po):
"""
Replace default headers with edX headers
"""
# By default, django-admin.py makemessages creates this header:
#
# SOME DESCRIPTIVE TITLE.
# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
# This file is distributed under the same license as the PACKAGE package.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
po.metadata_is_fuzzy = [] # remove [u'fuzzy']
header = po.header
fixes = (
('SOME DESCRIPTIVE TITLE', EDX_MARKER),
('Translations template for PROJECT.', EDX_MARKER),
('YEAR', str(datetime.utcnow().year)),
('ORGANIZATION', 'edX'),
("THE PACKAGE'S COPYRIGHT HOLDER", "EdX"),
(
'This file is distributed under the same license as the PROJECT project.',
'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'
),
(
'This file is distributed under the same license as the PACKAGE package.',
'This file is distributed under the GNU AFFERO GENERAL PUBLIC LICENSE.'
),
('FIRST AUTHOR <EMAIL@ADDRESS>', 'EdX Team <info@edx.org>'),
)
for src, dest in fixes:
header = header.replace(src, dest)
po.header = header
def fix_metadata(po):
"""
Replace default metadata with edX metadata
"""
# By default, django-admin.py makemessages creates this metadata:
#
# {u'PO-Revision-Date': u'YEAR-MO-DA HO:MI+ZONE',
# u'Language': u'',
# u'Content-Transfer-Encoding': u'8bit',
# u'Project-Id-Version': u'PACKAGE VERSION',
# u'Report-Msgid-Bugs-To': u'',
# u'Last-Translator': u'FULL NAME <EMAIL@ADDRESS>',
# u'Language-Team': u'LANGUAGE <LL@li.org>',
# u'POT-Creation-Date': u'2013-04-25 14:14-0400',
# u'Content-Type': u'text/plain; charset=UTF-8',
# u'MIME-Version': u'1.0'}
fixes = {
'PO-Revision-Date': datetime.utcnow(),
'Report-Msgid-Bugs-To': 'openedx-translation@googlegroups.com',
'Project-Id-Version': '0.1a',
'Language': 'en',
'Last-Translator': '',
'Language-Team': 'openedx-translation <openedx-translation@googlegroups.com>',
}
po.metadata.update(fixes)
def strip_key_strings(po):
"""
Removes all entries in PO which are key strings.
These entries should appear only in messages.po, not in any other po files.
"""
newlist = [entry for entry in po if not is_key_string(entry.msgid)]
del po[:]
po += newlist
def is_key_string(string):
"""
returns True if string is a key string.
Key strings begin with underscore.
"""
return len(string) > 1 and string[0] == '_'
if __name__ == '__main__':
# pylint: disable=invalid-name
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--verbose', '-v', action='count', default=0)
args = parser.parse_args()
main(verbosity=args.verbose)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.