repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
ryfx/gyp
|
refs/heads/master
|
test/variables/commands/test.py
|
337
|
print "sample\\path\\foo.cpp"
|
bitcraft/pyglet
|
refs/heads/master
|
contrib/experimental/buffer/streamers.py
|
1
|
#!/usr/bin/python
# $Id:$
import random
import sys
from pyglet.gl import *
from pyglet import clock
from pyglet import font
from pyglet import graphics
from pyglet import window
MAX_STREAMERS = 1000
if len(sys.argv) > 1:
MAX_STREAMERS = int(sys.argv[1])
MAX_ADD_STREAMERS = MAX_STREAMERS // 10
MIN_STREAMER_LENGTH = 6
MAX_STREAMER_LENGTH = 20
STREAMER_SEGMENT_SIZE = 5
STREAMER_PULL_FORCE = 10
GRAVITY = -250
win = window.Window(vsync=False)
batch = graphics.Batch()
streamers = list()
colors = [
[170, 0, 0],
[0, 255, 100],
[80, 100, 255],
[40, 180, 180],
[200, 255, 100],
[255, 70, 200],
]
def add_streamers():
dx = (random.random() - .5) * win.width / 4
length = random.randint(MIN_STREAMER_LENGTH, MAX_STREAMER_LENGTH)
position = list()
for i in range(length):
if i & 1:
position.append(
win.width / 2 + STREAMER_SEGMENT_SIZE - dx * i * .05)
else:
position.append(win.width / 2 - dx * i * .05)
position.append(-i * STREAMER_SEGMENT_SIZE / 2)
# Insert degenerate triangles at start and end
position = position[:2] + position + position[-2:]
length += 2
color = random.choice(colors) * length
streamer = batch.add(length, GL_TRIANGLE_STRIP, None,
('v2f/stream', position),
('c3B/static', color))
streamer.dx = dx
streamer.dy = win.height * (.8 + random.random() * .2)
streamer.dead = False
streamers.append(streamer)
def update_streamers():
global streamers
for streamer in streamers:
dx = streamer.dx * dt
streamer.dy += GRAVITY * dt
vertices = streamer.vertices
vertices[2] += dx
vertices[3] += streamer.dy * dt
for i in range(4, len(vertices) - 2):
if i & 1:
dy = vertices[i - 2] - vertices[i]
vertices[i] += STREAMER_PULL_FORCE * dy * dt
else:
vertices[i] += dx
# Update degenerates
vertices[:2] = vertices[2:4]
vertices[-2:] = vertices[-4:-2]
if vertices[-1] <= -100:
streamer.delete()
streamer.dead = True
streamers = [p for p in streamers if not p.dead]
stats_text = font.Text(font.load('', 12), '',
x=win.width, y=0,
halign='right')
def update_stats(dt):
np = len(streamers)
usage = streamers[0].domain.allocator.get_usage()
fragmentation = streamers[0].domain.allocator.get_fragmentation()
blocks = len(streamers[0].domain.allocator.starts)
stats_text.text = \
'Streamers: %d Blocks: %d Usage: %d%% Fragmentation: %d%%' % \
(np, blocks, usage * 100, fragmentation * 100)
clock.schedule_interval(update_stats, 1)
fps_text = clock.ClockDisplay()
while not win.has_exit:
win.dispatch_events()
dt = clock.tick()
dt = min(dt, 0.05)
update_streamers()
for i in range(min(MAX_ADD_STREAMERS, MAX_STREAMERS - len(streamers))):
add_streamers()
win.clear()
batch.draw()
stats_text.draw()
fps_text.draw()
win.flip()
|
tgoyne/xy-vsfilter
|
refs/heads/master
|
src/thirdparty/gtest/test/gtest_list_tests_unittest.py
|
1898
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = 'phanna@google.com (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
|
oneminot/audacity
|
refs/heads/master
|
lib-src/lv2/lilv/waflib/Tools/irixcc.py
|
330
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Utils
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_irixcc(conf):
v=conf.env
cc=None
if v['CC']:cc=v['CC']
elif'CC'in conf.environ:cc=conf.environ['CC']
if not cc:cc=conf.find_program('cc',var='CC')
if not cc:conf.fatal('irixcc was not found')
cc=conf.cmd_to_list(cc)
try:
conf.cmd_and_log(cc+['-version'])
except Exception:
conf.fatal('%r -version could not be executed'%cc)
v['CC']=cc
v['CC_NAME']='irix'
@conf
def irixcc_common_flags(conf):
v=conf.env
v['CC_SRC_F']=''
v['CC_TGT_F']=['-c','-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
if not v['LINK_CC']:v['LINK_CC']=v['CC']
v['CCLNK_SRC_F']=''
v['CCLNK_TGT_F']=['-o']
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['cprogram_PATTERN']='%s'
v['cshlib_PATTERN']='lib%s.so'
v['cstlib_PATTERN']='lib%s.a'
def configure(conf):
conf.find_irixcc()
conf.find_cpp()
conf.find_ar()
conf.irixcc_common_flags()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
|
saopicc/killMS
|
refs/heads/master
|
Predict/ClassImageSM.py
|
1
|
#!/usr/bin/env python
"""
killMS, a package for calibration in radio interferometry.
Copyright (C) 2013-2017 Cyril Tasse, l'Observatoire de Paris,
SKA South Africa, Rhodes University
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
import numpy as np
from DDFacet.Imager.ClassDeconvMachine import ClassImagerDeconv
from pyrap.images import image
from killMS.Array import NpShared
from killMS.Other import MyLogger
log=MyLogger.getLogger("ClassImageSM")
from killMS.Other.progressbar import ProgressBar
from DDFacet.ToolsDir.GiveEdges import GiveEdges
class ClassImageSM():
def __init__(self):
self.Type="Image"
class ClassPreparePredict(ClassImagerDeconv):
def __init__(self,ModelImageName,VS,*args,**kwargs):
ClassImagerDeconv.__init__(self,**kwargs)
self.ModelImageName=ModelImageName
self.VS=VS
self.IdSharedMem=kwargs["IdSharedMem"]
self.SM=ClassImageSM()
self.InitFacetMachine()
self.LoadModel()
def LoadModel(self):
im=image(self.ModelImageName)
data=im.getdata()
nch,npol,_,_=data.shape
for ch in range(nch):
for pol in range(npol):
data[ch,pol]=data[ch,pol].T[::-1]
self.ModelImage=data
# NormImage=self.FacetMachine.GiveNormImage()
# import pylab
# pylab.clf()
# pylab.imshow(NormImage)
# pylab.draw()
# pylab.show()
# stop
print>>log, "Splitting model image"
self.FacetMachine.ImToFacets(self.ModelImage)
NFacets=len(self.FacetMachine.DicoImager)
DelFacet=np.zeros((NFacets,),bool)
for iFacet in sorted(self.FacetMachine.DicoImager.keys()):
ModelFacet=self.FacetMachine.DicoImager[iFacet]["ModelFacet"]
if np.max(np.abs(ModelFacet))==0: DelFacet[iFacet]=1
D={}
iFacetNew=0
for iFacet in sorted(self.FacetMachine.DicoImager.keys()):
if DelFacet[iFacet]==False:
D[iFacetNew]=self.FacetMachine.DicoImager[iFacet]
iFacetNew+=1
else:
print>>log, "Facet [%i] is empty, removing it from direction list"%iFacet
self.FacetMachine.DicoImager=D
NFacets=len(self.FacetMachine.DicoImager)
self.NDirs=NFacets
self.Dirs=range(self.NDirs)
ClusterCat=np.zeros((len(self.Dirs),),dtype=[('Name','|S200'),('ra',np.float),('dec',np.float),('SumI',np.float),("Cluster",int)])
ClusterCat=ClusterCat.view(np.recarray)
self.DicoImager=self.FacetMachine.DicoImager
ClusterCat.Cluster=np.arange(NFacets)
self.ClusterCat=ClusterCat
#self.BuildGridsSerial()
self.BuildGridsParallel()
self.SM.NDir=self.NDirs
self.SM.Dirs=self.Dirs
self.SM.ClusterCat=self.ClusterCat
self.SM.GD=self.FacetMachine.GD
self.SM.DicoImager=self.FacetMachine.DicoImager
self.SM.GD["Compression"]["CompDeGridMode"]=0
self.SM.rac=self.VS.MS.rac
self.SM.decc=self.VS.MS.decc
del(self.ModelImage)
#del(self.VS,self.FacetMachine)
def BuildGridsSerial(self):
print>>log, "Building the grids"
ClusterCat=self.ClusterCat
ListGrid=[]
for iFacet in sorted(self.FacetMachine.DicoImager.keys()):
GM=self.FacetMachine.GiveGM(iFacet)
ModelFacet=self.FacetMachine.DicoImager[iFacet]["ModelFacet"]
ClusterCat.SumI[iFacet]=np.sum(ModelFacet)
Grid=GM.dtype(GM.setModelIm(ModelFacet))
ra,dec=self.FacetMachine.DicoImager[iFacet]["RaDec"]
ClusterCat.ra[iFacet]=ra
ClusterCat.dec[iFacet]=dec
del(self.FacetMachine.DicoImager[iFacet]["ModelFacet"])
ListGrid.append(Grid)
NpShared.PackListArray("%sGrids"%(self.IdSharedMem),ListGrid)
del(self.ModelImage)
#del(self.VS,self.FacetMachine)
def BuildGridsParallel(self):
print>>log, "Building the grids"
ClusterCat=self.ClusterCat
ListGrid=[]
for iFacet in sorted(self.FacetMachine.DicoImager.keys()):
ModelFacet=self.FacetMachine.DicoImager[iFacet]["ModelFacet"]
_=NpShared.ToShared("%sModelFacet.%3.3i"%(self.IdSharedMem,iFacet),ModelFacet)
NCPU=self.GD["Parallel"]["NCPU"]
NFacets=len(self.DicoImager.keys())
work_queue = multiprocessing.Queue()
result_queue = multiprocessing.Queue()
NJobs=NFacets
for iFacet in range(NFacets):
work_queue.put(iFacet)
GM=self.FacetMachine.GiveGM(0)
argsImToGrid=(GM.GridShape,GM.PaddingInnerCoord,GM.OverS,GM.Padding,GM.dtype)
workerlist=[]
for ii in range(NCPU):
W=Worker(work_queue, result_queue,argsImToGrid=argsImToGrid,
IdSharedMem=self.IdSharedMem)
workerlist.append(W)
workerlist[ii].start()
pBAR= ProgressBar('white', width=50, block='=', empty=' ',Title="Make Grids ", HeaderSize=10,TitleSize=13)
pBAR.render(0, '%4i/%i' % (0,NFacets))
iResult=0
while iResult < NJobs:
DicoResult=result_queue.get()
if DicoResult["Success"]:
iResult+=1
NDone=iResult
intPercent=int(100* NDone / float(NFacets))
pBAR.render(intPercent, '%4i/%i' % (NDone,NFacets))
for ii in range(NCPU):
workerlist[ii].shutdown()
workerlist[ii].terminate()
workerlist[ii].join()
for iFacet in sorted(self.FacetMachine.DicoImager.keys()):
ClusterCat.SumI[iFacet]=np.sum(ModelFacet)
Grid=NpShared.GiveArray("%sModelGrid.%3.3i"%(self.IdSharedMem,iFacet))
ra,dec=self.FacetMachine.DicoImager[iFacet]["RaDec"]
ClusterCat.ra[iFacet]=ra
ClusterCat.dec[iFacet]=dec
del(self.FacetMachine.DicoImager[iFacet]["ModelFacet"])
ListGrid.append(Grid)
NpShared.PackListArray("%sGrids"%(self.IdSharedMem),ListGrid)
NpShared.DelAll("%sModelFacet"%self.IdSharedMem)
NpShared.DelAll("%sModelGrid"%self.IdSharedMem)
return True
from DDFacet.Imager.ClassImToGrid import ClassImToGrid
import multiprocessing
from killMS.Predict.PredictGaussPoints_NumExpr5 import ClassPredict
class Worker(multiprocessing.Process):
def __init__(self,
work_queue,
result_queue,
argsImToGrid=None,
IdSharedMem=None):
multiprocessing.Process.__init__(self)
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
self.exit = multiprocessing.Event()
self.IdSharedMem=IdSharedMem
self.SharedMemNameSphe="%sSpheroidal"%(self.IdSharedMem)
self.ifzfCF=NpShared.GiveArray(self.SharedMemNameSphe)
self.ClassImToGrid=ClassImToGrid(*argsImToGrid,ifzfCF=self.ifzfCF)
def shutdown(self):
self.exit.set()
def run(self):
while not self.kill_received:
try:
iFacet = self.work_queue.get()
except:
break
ModelFacet=NpShared.GiveArray("%sModelFacet.%3.3i"%(self.IdSharedMem,iFacet))
Grid=self.ClassImToGrid.setModelIm(ModelFacet)
_=NpShared.ToShared("%sModelGrid.%3.3i"%(self.IdSharedMem,iFacet),Grid)
self.result_queue.put({"Success":True})
|
jscn/django
|
refs/heads/master
|
tests/gis_tests/layermap/models.py
|
34
|
from django.utils.encoding import python_2_unicode_compatible
from ..models import models
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=25)
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class State(NamedModel):
pass
class County(NamedModel):
state = models.ForeignKey(State, models.CASCADE)
mpoly = models.MultiPolygonField(srid=4269) # Multipolygon in NAD83
class CountyFeat(NamedModel):
poly = models.PolygonField(srid=4269)
class City(NamedModel):
name_txt = models.TextField(default='')
name_short = models.CharField(max_length=5)
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
dt = models.DateField()
point = models.PointField()
class Meta:
app_label = 'layermap'
required_db_features = ['gis_enabled']
class Interstate(NamedModel):
length = models.DecimalField(max_digits=6, decimal_places=2)
path = models.LineStringField()
class Meta:
app_label = 'layermap'
required_db_features = ['gis_enabled']
# Same as `City` above, but for testing model inheritance.
class CityBase(NamedModel):
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
point = models.PointField()
class ICity1(CityBase):
dt = models.DateField()
class Meta(CityBase.Meta):
pass
class ICity2(ICity1):
dt_time = models.DateTimeField(auto_now=True)
class Meta(ICity1.Meta):
pass
class Invalid(models.Model):
point = models.PointField()
class Meta:
required_db_features = ['gis_enabled']
# Mapping dictionaries for the models above.
co_mapping = {
'name': 'Name',
# ForeignKey's use another mapping dictionary for the _related_ Model (State in this case).
'state': {'name': 'State'},
'mpoly': 'MULTIPOLYGON', # Will convert POLYGON features into MULTIPOLYGONS.
}
cofeat_mapping = {'name': 'Name',
'poly': 'POLYGON',
}
city_mapping = {'name': 'Name',
'population': 'Population',
'density': 'Density',
'dt': 'Created',
'point': 'POINT',
}
inter_mapping = {'name': 'Name',
'length': 'Length',
'path': 'LINESTRING',
}
|
hurricup/intellij-community
|
refs/heads/master
|
python/testData/resolve/Tuple.py
|
83
|
aa = 1
bb = 2
c = aa, b<ref>b
|
ListFranz/torngas
|
refs/heads/master
|
demo/run_gunicorn.py
|
3
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from torngas.webserver import Server
"""
run at gunicorn.
gunicorn -c gunicorn.py.conf run_gunicorn:app
torngas settings 写在gunicorn.conf.py中:
os.environ.setdefault('TORNGAS_APP_SETTINGS', 'settings.setting')
"""
server = Server()
server.parse_logger()
server.load_urls()
app = server.load_application()
|
LaoZhongGu/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/double_const.py
|
203
|
from test.support import TestFailed
# A test for SF bug 422177: manifest float constants varied way too much in
# precision depending on whether Python was loading a module for the first
# time, or reloading it from a precompiled .pyc. The "expected" failure
# mode is that when test_import imports this after all .pyc files have been
# erased, it passes, but when test_import imports this from
# double_const.pyc, it fails. This indicates a woeful loss of precision in
# the marshal format for doubles. It's also possible that repr() doesn't
# produce enough digits to get reasonable precision for this box.
PI = 3.14159265358979324
TWOPI = 6.28318530717958648
PI_str = "3.14159265358979324"
TWOPI_str = "6.28318530717958648"
# Verify that the double x is within a few bits of eval(x_str).
def check_ok(x, x_str):
assert x > 0.0
x2 = eval(x_str)
assert x2 > 0.0
diff = abs(x - x2)
# If diff is no larger than 3 ULP (wrt x2), then diff/8 is no larger
# than 0.375 ULP, so adding diff/8 to x2 should have no effect.
if x2 + (diff / 8.) != x2:
raise TestFailed("Manifest const %s lost too much precision " % x_str)
check_ok(PI, PI_str)
check_ok(TWOPI, TWOPI_str)
|
jelugbo/ddi
|
refs/heads/master
|
lms/djangoapps/instructor_task/migrations/0001_initial.py
|
73
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'InstructorTask'
db.create_table('instructor_task_instructortask', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('task_type', self.gf('django.db.models.fields.CharField')(max_length=50, db_index=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_key', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_input', self.gf('django.db.models.fields.CharField')(max_length=255)),
('task_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('task_state', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, db_index=True)),
('task_output', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True)),
('requester', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('instructor_task', ['InstructorTask'])
def backwards(self, orm):
# Deleting model 'InstructorTask'
db.delete_table('instructor_task_instructortask')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'instructor_task.instructortask': {
'Meta': {'object_name': 'InstructorTask'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requester': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'task_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_input': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'task_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'task_output': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'task_state': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'db_index': 'True'}),
'task_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['instructor_task']
|
crcresearch/osf.io
|
refs/heads/develop
|
api/institutions/serializers.py
|
4
|
from rest_framework import serializers as ser
from rest_framework import exceptions
from osf.models import Node, Registration
from website.util import permissions as osf_permissions
from api.base.serializers import JSONAPISerializer, RelationshipField, LinksField, JSONAPIRelationshipSerializer, \
BaseAPISerializer
from api.base.exceptions import RelationshipPostMakesNoChanges
class InstitutionSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'id',
'name',
'auth_url',
])
name = ser.CharField(read_only=True)
id = ser.CharField(read_only=True, source='_id')
logo_path = ser.CharField(read_only=True)
description = ser.CharField(read_only=True)
auth_url = ser.CharField(read_only=True)
links = LinksField({'self': 'get_api_url', })
nodes = RelationshipField(
related_view='institutions:institution-nodes',
related_view_kwargs={'institution_id': '<_id>'},
)
registrations = RelationshipField(
related_view='institutions:institution-registrations',
related_view_kwargs={'institution_id': '<_id>'}
)
users = RelationshipField(
related_view='institutions:institution-users',
related_view_kwargs={'institution_id': '<_id>'}
)
def get_api_url(self, obj):
return obj.absolute_api_v2_url
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class Meta:
type_ = 'institutions'
class NodeRelated(JSONAPIRelationshipSerializer):
id = ser.CharField(source='_id', required=False, allow_null=True)
class Meta:
type_ = 'nodes'
class InstitutionNodesRelationshipSerializer(BaseAPISerializer):
data = ser.ListField(child=NodeRelated())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return obj['self'].nodes_relationship_url
def get_related_url(self, obj):
return obj['self'].nodes_url
class Meta:
type_ = 'nodes'
def create(self, validated_data):
inst = self.context['view'].get_object()['self']
user = self.context['request'].user
node_dicts = validated_data['data']
changes_flag = False
for node_dict in node_dicts:
node = Node.load(node_dict['_id'])
if not node:
raise exceptions.NotFound(detail='Node with id "{}" was not found'.format(node_dict['_id']))
if not node.has_permission(user, osf_permissions.WRITE):
raise exceptions.PermissionDenied(detail='Write permission on node {} required'.format(node_dict['_id']))
if not node.is_affiliated_with_institution(inst):
node.add_affiliated_institution(inst, user, save=True)
changes_flag = True
if not changes_flag:
raise RelationshipPostMakesNoChanges
return {
'data': list(inst.nodes.filter(is_deleted=False, type='osf.node')),
'self': inst
}
class RegistrationRelated(JSONAPIRelationshipSerializer):
id = ser.CharField(source='_id', required=False, allow_null=True)
class Meta:
type_ = 'registrations'
class InstitutionRegistrationsRelationshipSerializer(BaseAPISerializer):
data = ser.ListField(child=RegistrationRelated())
links = LinksField({'self': 'get_self_url',
'html': 'get_related_url'})
def get_self_url(self, obj):
return obj['self'].registrations_relationship_url
def get_related_url(self, obj):
return obj['self'].registrations_url
class Meta:
type_ = 'registrations'
def create(self, validated_data):
inst = self.context['view'].get_object()['self']
user = self.context['request'].user
registration_dicts = validated_data['data']
changes_flag = False
for registration_dict in registration_dicts:
registration = Registration.load(registration_dict['_id'])
if not registration:
raise exceptions.NotFound(detail='Registration with id "{}" was not found'.format(registration_dict['_id']))
if not registration.has_permission(user, osf_permissions.WRITE):
raise exceptions.PermissionDenied(detail='Write permission on registration {} required'.format(registration_dict['_id']))
if not registration.is_affiliated_with_institution(inst):
registration.add_affiliated_institution(inst, user, save=True)
changes_flag = True
if not changes_flag:
raise RelationshipPostMakesNoChanges
return {
'data': list(inst.nodes.filter(is_deleted=False, type='osf.registration')),
'self': inst
}
|
flypy/flypy
|
refs/heads/master
|
flypy/runtime/primitives.py
|
1
|
# -*- coding: utf-8 -*-
"""
Primitive operations, like 'a is b'.
"""
from __future__ import print_function, division, absolute_import
import operator
from .obj.core import NoneType
from .. import jit, ijit, cjit, typeof, overlay
jit = cjit
#===------------------------------------------------------------------===
# Implementations
#===------------------------------------------------------------------===
@jit('a -> b -> bool')
def is_(a, b):
"""Support `a is b` syntax"""
return False
# TODO: Overload for variants !
@jit('NoneType[] -> NoneType[] -> bool')
def is_(a, b):
return True
@jit('a -> bool')
def not_(x):
"""Support `not x` syntax"""
if bool(x):
return False
return True
@jit
def getitem(obj, idx):
"""Support `obj[idx]` syntax"""
return obj.__getitem__(idx)
@jit
def setitem(obj, idx, value):
"""Support `obj[idx] = value` syntax"""
obj.__setitem__(idx, value)
@jit('a -> b -> bool')
def contains(item, obj):
"""Support `item in obj` syntax"""
return obj.__contains__(item)
#===------------------------------------------------------------------===
# Overlays
#===------------------------------------------------------------------===
# We overlay operator.is_ with our own implementation. This works not only
# when operator.is_ is used in user-code, but frontend/translation.py itself
# turns 'is' operations into operator.is_ calls
overlay(operator.is_, is_)
overlay(operator.not_, not_)
overlay(operator.getitem, getitem)
overlay(operator.setitem, setitem)
overlay(operator.contains, contains)
|
allmightyspiff/softlayer-python
|
refs/heads/master
|
SoftLayer/fixtures/SoftLayer_Virtual_DedicatedHost.py
|
3
|
getObject = {
'id': 37401,
'memoryCapacity': 242,
'modifyDate': '',
'name': 'test-dedicated',
'diskCapacity': 1200,
'createDate': '2017-10-16T12:50:23-05:00',
'cpuCount': 56,
'accountId': 1199911
}
getAvailableRouters = [
{'hostname': 'bcr01a.dal05', 'id': 12345},
{'hostname': 'bcr02a.dal05', 'id': 12346},
{'hostname': 'bcr03a.dal05', 'id': 12347},
{'hostname': 'bcr04a.dal05', 'id': 12348}
]
getObjectById = {
'datacenter': {
'id': 12345,
'name': 'dal05',
'longName': 'Dallas 5'
},
'memoryCapacity': 242,
'modifyDate': '2017-11-06T11:38:20-06:00',
'name': 'test-dedicated',
'diskCapacity': 1200,
'backendRouter': {
'domain': 'test.com',
'hostname': 'bcr01a.dal05',
'id': 12345
},
'guestCount': 1,
'cpuCount': 56,
'guests': [{
'domain': 'test.com',
'hostname': 'test-dedicated',
'id': 12345,
'uuid': 'F9329795-4220-4B0A-B970-C86B950667FA'
}],
'billingItem': {
'nextInvoiceTotalRecurringAmount': 1515.556,
'orderItem': {
'id': 12345,
'order': {
'status': 'APPROVED',
'privateCloudOrderFlag': False,
'modifyDate': '2017-11-02T11:42:50-07:00',
'orderQuoteId': '',
'userRecordId': 12345,
'createDate': '2017-11-02T11:40:56-07:00',
'impersonatingUserRecordId': '',
'orderTypeId': 7,
'presaleEventId': '',
'userRecord': {
'username': 'test-dedicated'
},
'id': 12345,
'accountId': 12345
}
},
'id': 12345,
'children': [
{
'nextInvoiceTotalRecurringAmount': 0.0,
'categoryCode': 'dedicated_host_ram'
},
{
'nextInvoiceTotalRecurringAmount': 0.0,
'categoryCode': 'dedicated_host_disk'
}
]
},
'id': 12345,
'createDate': '2017-11-02T11:40:56-07:00'
}
deleteObject = True
getGuests = [{
'id': 200,
'hostname': 'vs-test1',
'domain': 'test.sftlyr.ws',
'fullyQualifiedDomainName': 'vs-test1.test.sftlyr.ws',
'status': {'keyName': 'ACTIVE', 'name': 'Active'},
'datacenter': {'id': 50, 'name': 'TEST00',
'description': 'Test Data Center'},
'powerState': {'keyName': 'RUNNING', 'name': 'Running'},
'maxCpu': 2,
'maxMemory': 1024,
'primaryIpAddress': '172.16.240.2',
'globalIdentifier': '1a2b3c-1701',
'primaryBackendIpAddress': '10.45.19.37',
'hourlyBillingFlag': False,
'billingItem': {
'id': 6327,
'recurringFee': 1.54,
'orderItem': {
'order': {
'userRecord': {
'username': 'chechu',
}
}
}
},
}, {
'id': 202,
'hostname': 'vs-test2',
'domain': 'test.sftlyr.ws',
'fullyQualifiedDomainName': 'vs-test2.test.sftlyr.ws',
'status': {'keyName': 'ACTIVE', 'name': 'Active'},
'datacenter': {'id': 50, 'name': 'TEST00',
'description': 'Test Data Center'},
'powerState': {'keyName': 'RUNNING', 'name': 'Running'},
'maxCpu': 4,
'maxMemory': 4096,
'primaryIpAddress': '172.16.240.7',
'globalIdentifier': '05a8ac-6abf0',
'primaryBackendIpAddress': '10.45.19.35',
'hourlyBillingFlag': True,
'billingItem': {
'id': 6327,
'recurringFee': 1.54,
'orderItem': {
'order': {
'userRecord': {
'username': 'chechu',
}
}
}
}
}]
|
tiwillia/openshift-tools
|
refs/heads/stg
|
ansible/roles/lib_openshift_3.2/build/ansible/oadm_policy_user.py
|
13
|
# pylint: skip-file
#Manage policy
#
#Usage:
# oadm policy [options]
#
#Available Commands:
# add-role-to-user Add users to a role in the current project
# remove-role-from-user Remove user from role in the current project
# remove-user Remove user from the current project
# add-cluster-role-to-user Add users to a role for all projects in the cluster
# remove-cluster-role-from-user Remove user from role for all projects in the cluster
# add-scc-to-user Add users to a security context constraint
# remove-scc-from-user Remove user from scc
#
#Use "oadm help <command>" for more information about a given command.
#Use "oadm options" for a list of global command-line options (applies to all commands).
#
def main():
'''
ansible oadm module for user policy
'''
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', type='str',
choices=['present', 'absent']),
debug=dict(default=False, type='bool'),
resource_name=dict(required=True, type='str'),
namespace=dict(default=None, type='str'),
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
# add-role-to-user
user=dict(required=True, type='str'),
resource_kind=dict(required=True, choices=['role', 'cluster-role', 'scc'], type='str'),
),
supports_check_mode=True,
)
state = module.params['state']
action = None
if state == 'present':
action = 'add-' + module.params['resource_kind'] + '-to-user'
else:
action = 'remove-' + module.params['resource_kind'] + '-from-user'
uconfig = OadmPolicyUserConfig(module.params['namespace'],
module.params['kubeconfig'],
{'action': {'value': action, 'include': False},
'user': {'value': module.params['user'], 'include': False},
'resource_kind': {'value': module.params['resource_kind'], 'include': False},
'name': {'value': module.params['resource_name'], 'include': False},
})
oadmpolicyuser = OadmPolicyUser(uconfig)
########
# Delete
########
if state == 'absent':
if not oadmpolicyuser.exists():
module.exit_json(changed=False, state="absent")
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a delete.')
api_rval = oadmpolicyuser.perform()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="absent")
if state == 'present':
########
# Create
########
results = oadmpolicyuser.exists()
if isinstance(results, dict) and results.has_key('returncode') and results['returncode'] != 0:
module.fail_json(msg=results)
if not results:
if module.check_mode:
module.exit_json(changed=False, msg='Would have performed a create.')
api_rval = oadmpolicyuser.perform()
if api_rval['returncode'] != 0:
module.fail_json(msg=api_rval)
module.exit_json(changed=True, results=api_rval, state="present")
module.exit_json(changed=False, state="present")
module.exit_json(failed=True,
changed=False,
results='Unknown state passed. %s' % state,
state="unknown")
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
# import module snippets. This are required
from ansible.module_utils.basic import *
main()
|
nacl-webkit/chrome_deps
|
refs/heads/master
|
build/android/pylib/instrumentation/__init__.py
|
998
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
|
camillemonchicourt/Geotrek
|
refs/heads/master
|
geotrek/trekking/migrations/0002_auto__add_field_difficultylevel_pictogram.py
|
1
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from django.conf import settings
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DifficultyLevel.pictogram'
db.add_column('o_b_difficulte', 'pictogram',
self.gf('django.db.models.fields.files.FileField')(max_length=512, null=True, db_column='picto', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'DifficultyLevel.pictogram'
db.delete_column('o_b_difficulte', 'picto')
models = {
u'authent.structure': {
'Meta': {'ordering': "['name']", 'object_name': 'Structure'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'core.comfort': {
'Meta': {'ordering': "['comfort']", 'object_name': 'Comfort', 'db_table': "'l_b_confort'"},
'comfort': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'confort'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.datasource': {
'Meta': {'ordering': "['source']", 'object_name': 'Datasource', 'db_table': "'l_b_source'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.network': {
'Meta': {'ordering': "['network']", 'object_name': 'Network', 'db_table': "'l_b_reseau'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'reseau'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.path': {
'Meta': {'object_name': 'Path', 'db_table': "'l_t_troncon'"},
'arrival': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'db_column': "'arrivee'", 'blank': 'True'}),
'ascent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_positive'", 'blank': 'True'}),
'comfort': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'confort'", 'to': u"orm['core.Comfort']"}),
'comments': ('django.db.models.fields.TextField', [], {'null': 'True', 'db_column': "'remarques'", 'blank': 'True'}),
'datasource': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'source'", 'to': u"orm['core.Datasource']"}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'departure': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250', 'null': 'True', 'db_column': "'depart'", 'blank': 'True'}),
'descent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_negative'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': '%s' % settings.SRID, 'spatial_index': 'False'}),
'geom_3d': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'dim': '3', 'spatial_index': 'False', 'null': 'True', 'srid': '%s' % settings.SRID}),
'geom_cadastre': ('django.contrib.gis.db.models.fields.LineStringField', [], {'srid': '%s' % settings.SRID, 'null': 'True', 'spatial_index': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'longueur'", 'blank': 'True'}),
'max_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_maximum'", 'blank': 'True'}),
'min_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_minimum'", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'db_column': "'nom'", 'blank': 'True'}),
'networks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'paths'", 'to': u"orm['core.Network']", 'db_table': "'l_r_troncon_reseau'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'slope': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'pente'", 'blank': 'True'}),
'stake': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'paths'", 'null': 'True', 'db_column': "'enjeu'", 'to': u"orm['core.Stake']"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'usages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'paths'", 'to': u"orm['core.Usage']", 'db_table': "'l_r_troncon_usage'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'valid': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_column': "'valide'"})
},
u'core.pathaggregation': {
'Meta': {'ordering': "['id']", 'object_name': 'PathAggregation', 'db_table': "'e_r_evenement_troncon'"},
'end_position': ('django.db.models.fields.FloatField', [], {'db_column': "'pk_fin'", 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'ordre'", 'blank': 'True'}),
'path': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aggregations'", 'on_delete': 'models.DO_NOTHING', 'db_column': "'troncon'", 'to': u"orm['core.Path']"}),
'start_position': ('django.db.models.fields.FloatField', [], {'db_column': "'pk_debut'", 'db_index': 'True'}),
'topo_object': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aggregations'", 'db_column': "'evenement'", 'to': u"orm['core.Topology']"})
},
u'core.stake': {
'Meta': {'ordering': "['id']", 'object_name': 'Stake', 'db_table': "'l_b_enjeu'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stake': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'enjeu'"}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"})
},
u'core.topology': {
'Meta': {'object_name': 'Topology', 'db_table': "'e_t_evenement'"},
'ascent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_positive'", 'blank': 'True'}),
'date_insert': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_column': "'date_insert'", 'blank': 'True'}),
'date_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_column': "'date_update'", 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'supprime'"}),
'descent': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'denivelee_negative'", 'blank': 'True'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'srid': '%s' % settings.SRID, 'null': 'True', 'spatial_index': 'False'}),
'geom_3d': ('django.contrib.gis.db.models.fields.GeometryField', [], {'default': 'None', 'dim': '3', 'spatial_index': 'False', 'null': 'True', 'srid': '%s' % settings.SRID}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'length': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'longueur'", 'blank': 'True'}),
'max_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_maximum'", 'blank': 'True'}),
'min_elevation': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'db_column': "'altitude_minimum'", 'blank': 'True'}),
'offset': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_column': "'decallage'"}),
'paths': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Path']", 'through': u"orm['core.PathAggregation']", 'db_column': "'troncons'", 'symmetrical': 'False'}),
'slope': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'null': 'True', 'db_column': "'pente'", 'blank': 'True'})
},
u'core.usage': {
'Meta': {'ordering': "['usage']", 'object_name': 'Usage', 'db_table': "'l_b_usage'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'structure': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['authent.Structure']", 'db_column': "'structure'"}),
'usage': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_column': "'usage'"})
},
u'trekking.difficultylevel': {
'Meta': {'ordering': "['id']", 'object_name': 'DifficultyLevel', 'db_table': "'o_b_difficulte'"},
'difficulty': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'difficulte'"}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'null': 'True', 'db_column': "'picto'", 'blank': 'True'})
},
u'trekking.informationdesk': {
'Meta': {'ordering': "['name']", 'object_name': 'InformationDesk', 'db_table': "'o_b_renseignement'"},
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'db_column': "'nom'"})
},
u'trekking.poi': {
'Meta': {'object_name': 'POI', 'db_table': "'o_t_poi'", '_ormbases': [u'core.Topology']},
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'topo_object': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Topology']", 'unique': 'True', 'primary_key': 'True', 'db_column': "'evenement'"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pois'", 'db_column': "'type'", 'to': u"orm['trekking.POIType']"})
},
u'trekking.poitype': {
'Meta': {'ordering': "['label']", 'object_name': 'POIType', 'db_table': "'o_b_poi'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"})
},
u'trekking.route': {
'Meta': {'ordering': "['route']", 'object_name': 'Route', 'db_table': "'o_b_parcours'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'route': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'parcours'"})
},
u'trekking.theme': {
'Meta': {'ordering': "['label']", 'object_name': 'Theme', 'db_table': "'o_b_theme'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'theme'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"})
},
u'trekking.trek': {
'Meta': {'ordering': "['name']", 'object_name': 'Trek', 'db_table': "'o_t_itineraire'", '_ormbases': [u'core.Topology']},
'access': ('django.db.models.fields.TextField', [], {'db_column': "'acces'", 'blank': 'True'}),
'advice': ('django.db.models.fields.TextField', [], {'db_column': "'recommandation'", 'blank': 'True'}),
'advised_parking': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'parking'", 'blank': 'True'}),
'ambiance': ('django.db.models.fields.TextField', [], {'db_column': "'ambiance'", 'blank': 'True'}),
'arrival': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'arrivee'", 'blank': 'True'}),
'departure': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'depart'", 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'db_column': "'description'", 'blank': 'True'}),
'description_teaser': ('django.db.models.fields.TextField', [], {'db_column': "'chapeau'", 'blank': 'True'}),
'difficulty': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'treks'", 'null': 'True', 'db_column': "'difficulte'", 'to': u"orm['trekking.DifficultyLevel']"}),
'disabled_infrastructure': ('django.db.models.fields.TextField', [], {'db_column': "'handicap'", 'blank': 'True'}),
'duration': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'db_column': "'duree'", 'blank': 'True'}),
'information_desk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'treks'", 'null': 'True', 'db_column': "'renseignement'", 'to': u"orm['trekking.InformationDesk']"}),
'is_park_centered': ('django.db.models.fields.BooleanField', [], {'db_column': "'coeur'"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'networks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.TrekNetwork']", 'db_table': "'o_r_itineraire_reseau'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'parking_location': ('django.contrib.gis.db.models.fields.PointField', [], {'srid': '%s' % settings.SRID, 'null': 'True', 'spatial_index': 'False', 'db_column': "'geom_parking'", 'blank': 'True'}),
'public_transport': ('django.db.models.fields.TextField', [], {'db_column': "'transport'", 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'db_column': "'public'"}),
'related_treks': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_treks+'", 'symmetrical': 'False', 'through': u"orm['trekking.TrekRelationship']", 'to': u"orm['trekking.Trek']"}),
'route': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'treks'", 'null': 'True', 'db_column': "'parcours'", 'to': u"orm['trekking.Route']"}),
'themes': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.Theme']", 'db_table': "'o_r_itineraire_theme'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'topo_object': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Topology']", 'unique': 'True', 'primary_key': 'True', 'db_column': "'evenement'"}),
'usages': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.Usage']", 'db_table': "'o_r_itineraire_usage'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'web_links': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'treks'", 'to': u"orm['trekking.WebLink']", 'db_table': "'o_r_itineraire_web'", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'})
},
u'trekking.treknetwork': {
'Meta': {'ordering': "['network']", 'object_name': 'TrekNetwork', 'db_table': "'o_b_reseau'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'network': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'reseau'"})
},
u'trekking.trekrelationship': {
'Meta': {'unique_together': "(('trek_a', 'trek_b'),)", 'object_name': 'TrekRelationship', 'db_table': "'o_r_itineraire_itineraire'"},
'has_common_departure': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'depart_commun'"}),
'has_common_edge': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'troncons_communs'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_circuit_step': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_column': "'etape_circuit'"}),
'trek_a': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trek_relationship_a'", 'db_column': "'itineraire_a'", 'to': u"orm['trekking.Trek']"}),
'trek_b': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'trek_relationship_b'", 'db_column': "'itineraire_b'", 'to': u"orm['trekking.Trek']"})
},
u'trekking.usage': {
'Meta': {'ordering': "['usage']", 'object_name': 'Usage', 'db_table': "'o_b_usage'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"}),
'usage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'usage'"})
},
u'trekking.weblink': {
'Meta': {'ordering': "['name']", 'object_name': 'WebLink', 'db_table': "'o_t_web'"},
'category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'links'", 'null': 'True', 'db_column': "'categorie'", 'to': u"orm['trekking.WebLinkCategory']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '128', 'db_column': "'url'"})
},
u'trekking.weblinkcategory': {
'Meta': {'ordering': "['label']", 'object_name': 'WebLinkCategory', 'db_table': "'o_b_web_category'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_column': "'nom'"}),
'pictogram': ('django.db.models.fields.files.FileField', [], {'max_length': '512', 'db_column': "'picto'"})
}
}
complete_apps = ['trekking']
|
anistark/mozillians
|
refs/heads/master
|
vendor-local/lib/python/tablib/packages/odf/meta.py
|
98
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import METANS
from element import Element
# Autogenerated
def AutoReload(**args):
return Element(qname = (METANS,'auto-reload'), **args)
def CreationDate(**args):
return Element(qname = (METANS,'creation-date'), **args)
def DateString(**args):
return Element(qname = (METANS,'date-string'), **args)
def DocumentStatistic(**args):
return Element(qname = (METANS,'document-statistic'), **args)
def EditingCycles(**args):
return Element(qname = (METANS,'editing-cycles'), **args)
def EditingDuration(**args):
return Element(qname = (METANS,'editing-duration'), **args)
def Generator(**args):
return Element(qname = (METANS,'generator'), **args)
def HyperlinkBehaviour(**args):
return Element(qname = (METANS,'hyperlink-behaviour'), **args)
def InitialCreator(**args):
return Element(qname = (METANS,'initial-creator'), **args)
def Keyword(**args):
return Element(qname = (METANS,'keyword'), **args)
def PrintDate(**args):
return Element(qname = (METANS,'print-date'), **args)
def PrintedBy(**args):
return Element(qname = (METANS,'printed-by'), **args)
def Template(**args):
return Element(qname = (METANS,'template'), **args)
def UserDefined(**args):
return Element(qname = (METANS,'user-defined'), **args)
|
praveen-pal/edx-platform
|
refs/heads/master
|
lms/djangoapps/certificates/management/commands/ungenerated_certs.py
|
11
|
from django.core.management.base import BaseCommand
from certificates.models import certificate_status_for_student
from certificates.queue import XQueueCertInterface
from django.contrib.auth.models import User
from optparse import make_option
from django.conf import settings
from xmodule.course_module import CourseDescriptor
from xmodule.modulestore.django import modulestore
from certificates.models import CertificateStatuses
import datetime
from pytz import UTC
class Command(BaseCommand):
help = """
Find all students that need certificates
for courses that have finished and
put their cert requests on the queue
Use the --noop option to test without actually
putting certificates on the queue to be generated.
"""
option_list = BaseCommand.option_list + (
make_option('-n', '--noop',
action='store_true',
dest='noop',
default=False,
help="Don't add certificate requests to the queue"),
make_option('-c', '--course',
metavar='COURSE_ID',
dest='course',
default=False,
help='Grade and generate certificates '
'for a specific course'),
make_option('-f', '--force-gen',
metavar='STATUS',
dest='force',
default=False,
help='Will generate new certificates for only those users '
'whose entry in the certificate table matches STATUS. '
'STATUS can be generating, unavailable, deleted, error '
'or notpassing.'),
)
def handle(self, *args, **options):
# Will only generate a certificate if the current
# status is in the unavailable state, can be set
# to something else with the force flag
if options['force']:
valid_statuses = getattr(CertificateStatuses, options['force'])
else:
valid_statuses = [CertificateStatuses.unavailable]
# Print update after this many students
STATUS_INTERVAL = 500
if options['course']:
ended_courses = [options['course']]
else:
# Find all courses that have ended
ended_courses = []
for course_id in [course # all courses in COURSE_LISTINGS
for sub in settings.COURSE_LISTINGS
for course in settings.COURSE_LISTINGS[sub]]:
course_loc = CourseDescriptor.id_to_location(course_id)
course = modulestore().get_instance(course_id, course_loc)
if course.has_ended():
ended_courses.append(course_id)
for course_id in ended_courses:
# prefetch all chapters/sequentials by saying depth=2
course = modulestore().get_instance(course_id, CourseDescriptor.id_to_location(course_id), depth=2)
print "Fetching enrolled students for {0}".format(course_id)
enrolled_students = User.objects.filter(
courseenrollment__course_id=course_id).prefetch_related(
"groups").order_by('username')
xq = XQueueCertInterface()
total = enrolled_students.count()
count = 0
start = datetime.datetime.now(UTC)
for student in enrolled_students:
count += 1
if count % STATUS_INTERVAL == 0:
# Print a status update with an approximation of
# how much time is left based on how long the last
# interval took
diff = datetime.datetime.now(UTC) - start
timeleft = diff * (total - count) / STATUS_INTERVAL
hours, remainder = divmod(timeleft.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
print "{0}/{1} completed ~{2:02}:{3:02}m remaining".format(
count, total, hours, minutes)
start = datetime.datetime.now(UTC)
if certificate_status_for_student(
student, course_id)['status'] in valid_statuses:
if not options['noop']:
# Add the certificate request to the queue
ret = xq.add_cert(student, course_id, course=course)
if ret == 'generating':
print '{0} - {1}'.format(student, ret)
|
skydark/nstools
|
refs/heads/master
|
zhtools/xpinyin.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Translate chinese hanzi to pinyin by python
Created by Eric Lo on 2010-05-20.
Copyright (c) 2010 __lxneng@gmail.com__. http://lxneng.com All rights reserved.
"""
"""
Forked by skydarkchen <skydark2 at gmail>
"""
import os.path
try:
chr = unichr
except NameError:
pass
VERSION = '0.3a'
class Pinyin(object):
"""translate chinese hanzi to pinyin by python, inspired by flyerhzm’s
`chinese\_pinyin`_ gem
.. _chinese\_pinyin: https://github.com/flyerhzm/chinese_pinyin
usage(python3)
-----
::
>>> p = Pinyin()
>>> p.get_pinyin("上海")
'shanghai'
>>> p.get_pinyin("上海", tone=True)
'shang4hai3'
>>> p.get_initials("上")
'S'
>>> print(''.join(p.py2hz('shang4')))
丄上姠尙尚蠰銄鑜
>>> print(''.join(p.py2hz('a')))
吖腌錒锕阿嗄阿阿啊阿
"""
data_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \
'Mandarin.dat')
def __init__(self):
self.dict = {}
self.revdict = {}
for line in open(self.data_path):
k, v = line.strip().split('\t')
v = v.lower().split(' ')
hz = chr(int('0x%s' % k, 16))
self.dict[hz] = v
for vkey in v:
self.revdict.setdefault(vkey, [])
self.revdict[vkey].append(hz)
def py2hz(self, pinyin):
if pinyin == '':
return []
pinyin = pinyin.lower()
if pinyin[-1].isdigit():
return self.revdict.get(pinyin, [])
ret = []
for i in range(1, 6):
key = '%s%s' % (pinyin, i)
ret += self.revdict.get(key, [])
return ret
def get_pinyin(self, chars='', splitter='', tone=False):
result = []
for char in chars:
v = self.dict.get(char, None)
if v:
v = v[0]
if not tone and v[-1].isdigit():
v = v[:-1]
else:
v = char
result.append(v)
return splitter.join(result)
def get_initials(self, char=''):
if char == '':
return ''
return self.dict.get(char, [char])[0][0].upper()
if __name__ == '__main__':
import unittest
class PinyinTestCase(unittest.TestCase):
def setUp(self):
import sys
py = sys.version_info
self.py3k = py >= (3, 0, 0)
self.py = Pinyin()
def to_unicode(self, s):
if self.py3k:
return s
return s.decode('utf-8')
def test_get_pinyin(self): ## test method names begin 'test*'
s = self.to_unicode('上A2#海')
a = self.to_unicode('shangA2#hai')
aa = self.to_unicode('shang4A2#hai3')
aaa = self.to_unicode('shang A 2 # hai')
self.assertEqual(self.py.get_pinyin(s), a)
self.assertEqual(self.py.get_pinyin(s, tone=True), aa)
self.assertEqual(self.py.get_pinyin(s, splitter=' '), aaa)
def test_get_initials(self):
s = self.to_unicode('上')
a = self.to_unicode('S')
self.assertEqual(self.py.get_initials(s), a)
def test_py2hz(self):
s1 = self.to_unicode('shang4')
s2 = self.to_unicode('a')
a1 = self.to_unicode('丄上姠尙尚蠰銄鑜')
a2 = self.to_unicode('吖腌錒锕阿嗄阿阿啊阿')
self.assertEqual(''.join(self.py.py2hz(s1)), a1)
self.assertEqual(''.join(self.py.py2hz(s2)), a2)
unittest.main()
|
sbalde/edx-platform
|
refs/heads/master
|
lms/lib/courseware_search/test/test_lms_result_processor.py
|
151
|
"""
Tests for the lms_result_processor
"""
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.factories import UserFactory
from lms.lib.courseware_search.lms_result_processor import LmsSearchResultProcessor
class LmsSearchResultProcessorTestCase(ModuleStoreTestCase):
""" Test case class to test search result processor """
def build_course(self):
"""
Build up a course tree with an html control
"""
self.global_staff = UserFactory(is_staff=True)
self.course = CourseFactory.create(
org='Elasticsearch',
course='ES101',
run='test_run',
display_name='Elasticsearch test course',
)
self.section = ItemFactory.create(
parent=self.course,
category='chapter',
display_name='Test Section',
)
self.subsection = ItemFactory.create(
parent=self.section,
category='sequential',
display_name='Test Subsection',
)
self.vertical = ItemFactory.create(
parent=self.subsection,
category='vertical',
display_name='Test Unit',
)
self.html = ItemFactory.create(
parent=self.vertical,
category='html',
display_name='Test Html control',
)
self.ghost_subsection = ItemFactory.create(
parent=self.section,
category='sequential',
display_name=None,
)
self.ghost_vertical = ItemFactory.create(
parent=self.ghost_subsection,
category='vertical',
display_name=None,
)
self.ghost_html = ItemFactory.create(
parent=self.ghost_vertical,
category='html',
display_name='Ghost Html control',
)
def setUp(self):
# from nose.tools import set_trace
# set_trace()
super(LmsSearchResultProcessorTestCase, self).setUp()
self.build_course()
def test_url_parameter(self):
fake_url = ""
srp = LmsSearchResultProcessor({}, "test")
with self.assertRaises(ValueError):
fake_url = srp.url
self.assertEqual(fake_url, "")
srp = LmsSearchResultProcessor(
{
"course": unicode(self.course.id),
"id": unicode(self.html.scope_ids.usage_id),
"content": {"text": "This is the html text"}
},
"test"
)
self.assertEqual(
srp.url, "/courses/{}/jump_to/{}".format(unicode(self.course.id), unicode(self.html.scope_ids.usage_id)))
def test_should_remove(self):
"""
Tests that "visible_to_staff_only" overrides start date.
"""
srp = LmsSearchResultProcessor(
{
"course": unicode(self.course.id),
"id": unicode(self.html.scope_ids.usage_id),
"content": {"text": "This is html test text"}
},
"test"
)
self.assertEqual(srp.should_remove(self.global_staff), False)
|
mlorbetske/PTVS
|
refs/heads/master
|
Python/Tests/TestData/DjangoDebugProject/DjangoDebugProject/settings.py
|
18
|
# Django settings for DjangoDebugProject project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'n(bd1f1c%e8=_xad02x5qtfn%wgwpi492e$8_erx+d)!tpeoim'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'DjangoDebugProject.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'DjangoDebugProject.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'TestApp',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
PeterLValve/apitrace
|
refs/heads/master
|
scripts/convert.py
|
1
|
#!/usr/bin/env python
##########################################################################
#
# Copyright 2012 VMware Inc.
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the 'Software'), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
'''Convert traces to/from PIX.
'''
import optparse
import os.path
import subprocess
import platform
import sys
def getPixExe():
try:
programFiles = os.environ['ProgramFiles(x86)']
except KeyError:
programFiles = os.environ['ProgramFiles']
try:
dxsdkDir = os.environ['DXSDK_DIR']
except KeyError:
dxsdkDir = os.path.join(programFiles, "Microsoft DirectX SDL (June 2010)")
pixExe = os.path.join(dxsdkDir, "Utilities", "bin", 'x86', 'PIXwin.exe')
return pixExe
def callProcess(cmd):
if options.verbose:
sys.stderr.write(' '.join(cmd) + '\n')
ret = subprocess.call(cmd)
if ret:
exeName = os.path.basename(cmd[0])
sys.stderr.write('error: %s failed with exit code %u\n' % (exeName, ret))
sys.exit(ret)
return ret
def convertToPix(inTrace, outPixrun):
pix = getPixPath()
pixExp = os.path.join(os.path.dirname(__file__), 'apitrace.PIXExp')
# http://social.msdn.microsoft.com/Forums/sv/devdocs/thread/15addc0c-036d-413a-854a-35637ccbb834
# http://src.chromium.org/svn/trunk/o3d/tests/test_driver.py
cmd = [
getPixExe(),
pixExp,
'-start',
'-runfile', os.path.abspath(outPixrun),
'-targetpath', os.path.abspath(options.retrace),
#'-targetstartfolder', ...,
'-targetargs', os.path.abspath(inTrace),
]
callProcess(cmd)
if os.path.exists(outPixrun):
sys.stderr.write('info: %s written\n' % outPixrun)
if options.verify:
subprocess.call([pix, os.path.abspath(outPixrun)])
else:
sys.stderr.write('error: %s not written\n' % outPixrun)
sys.exit(1)
def convertFromPix(inPix, outTrace):
pixExe = getPixExe()
if False:
# TODO: Use -exporttocsv option to detect which API to use
cmd = [
pixExe,
inPix,
'-exporttocsv', # XXX: output filename is ignored
]
callProcess(cmd)
cmd = [
options.apitrace,
'trace',
'-a', options.api,
'-o', outTrace,
pixExe,
inPix,
'-playstandalone',
]
callProcess(cmd)
if os.path.exists(outTrace):
sys.stderr.write('info: %s written\n' % outTrace)
if options.verify:
subprocess.call([options.retrace, os.path.abspath(outTrace)])
else:
sys.stderr.write('error: %s not written\n' % outTrace)
sys.exit(1)
def main():
global options
# Parse command line options
optparser = optparse.OptionParser(
usage='\n\t%prog [options] <trace> ...',
version='%%prog')
optparser.add_option(
'--apitrace', metavar='PROGRAM',
type='string', dest='apitrace', default='apitrace.exe',
help='retrace command [default: %default]')
optparser.add_option(
'-a', '--api', metavar='API',
type='string', dest='api', default='d3d9',
help='api [default: %default]')
optparser.add_option(
'-r', '--retrace', metavar='PROGRAM',
type='string', dest='retrace', default='d3dretrace.exe',
help='retrace command [default: %default]')
optparser.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='verbose output')
optparser.add_option(
'-o', '--output', metavar='FILE',
type="string", dest="output",
help="output file [default: stdout]")
optparser.add_option(
'--verify',
action='store_true', dest='verify', default=False,
help='verify output by replaying it')
(options, args) = optparser.parse_args(sys.argv[1:])
if not args:
optparser.error("incorrect number of arguments")
for inFile in args:
name, inExt = os.path.splitext(os.path.basename(inFile))
inExt = inExt
if inExt.lower() == '.trace':
convert = convertToPix
outExt = '.PIXRun'
elif inExt.lower() == '.pixrun':
convert = convertFromPix
outExt = '.trace'
else:
optparser.error("unexpected file extensions `%s`" % inExt)
if options.output:
outFile = options.output
else:
outFile = name + outExt
convert(inFile, outFile)
if __name__ == '__main__':
main()
|
santoshkumarsingh/Data-Wrangling-with-MongoDB
|
refs/heads/master
|
Lesson_1_Data_Extraction_Fundamentals/07-Parsing_CSV_Files/simple.py
|
2
|
# Your task is to read the input DATAFILE line by line, and for the first 10 lines (not including the header)
# split each line on "," and then for each line, create a dictionary
# where the key is the header title of the field, and the value is the value of that field in the row.
# The function parse_file should return a list of dictionaries,
# each data line in the file being a single list entry.
# Field names and values should not contain extra whitespace, like spaces or newline characters.
# You can use the Python string method strip() to remove the extra whitespace.
# You have to parse only the first 10 data lines in this exercise,
# so the returned list should have 10 entries!
import os
DATADIR = ""
DATAFILE = "beatles-diskography.csv"
def parse_file(datafile):
data = []
with open(datafile, "rb") as f:
header = f.readline().strip().split(",")
for line in f:
data.append(dict(zip(header, line.strip().split(","))))
return data
def test():
# a simple test of your implemetation
datafile = os.path.join(DATADIR, DATAFILE)
d = parse_file(datafile)
firstline = {'Title': 'Please Please Me', 'UK Chart Position': '1', 'Label': 'Parlophone(UK)', 'Released': '22 March 1963', 'US Chart Position': '-', 'RIAA Certification': 'Platinum', 'BPI Certification': 'Gold'}
tenthline = {'Title': '', 'UK Chart Position': '1', 'Label': 'Parlophone(UK)', 'Released': '10 July 1964', 'US Chart Position': '-', 'RIAA Certification': '', 'BPI Certification': 'Gold'}
assert d[0] == firstline
assert d[9] == tenthline
test()
|
mapr/hue
|
refs/heads/hue-3.9.0-mapr
|
desktop/core/ext-py/Django-1.6.10/django/dispatch/__init__.py
|
571
|
"""Multi-consumer multi-producer dispatching mechanism
Originally based on pydispatch (BSD) http://pypi.python.org/pypi/PyDispatcher/2.0.1
See license.txt for original license.
Heavily modified for Django's purposes.
"""
from django.dispatch.dispatcher import Signal, receiver
|
fo0nikens/CapTipper
|
refs/heads/master
|
CTMagic.py
|
11
|
#
# CapTipper is a malicious HTTP traffic explorer tool
# By Omri Herscovici <omriher AT gmail.com>
# http://omriher.com
# @omriher
#
#
# This file is part of CapTipper, and part of the Whatype library
# Whatype is an independent file type identification python library
# https://github.com/omriher/whatype
#
# CapTipper is a free software under the GPLv3 License
#
import os
class WhatypeErr(Exception):
def __init__(self, when, error):
self.when = when
self.error = error
def __str__(self):
return repr("Whatype Error on " + self.when + " : " + self.error)
class MagicNode(object):
def __init__(self, byte):
self.byte = byte
self.filetype = ""
self.ext = ""
self.strings = ""
self.children = []
def add_child(self, obj):
n = MagicNode(obj)
self.children.append(n)
return n
def has_child(self, data):
for child in self.children:
if child.byte.lower() == data.lower():
return child
return None
def get_childrens_by_byte(self, data):
childrens = []
for child in self.children:
if child.byte.lower() == data.lower():
#return child
childrens.append(child)
return childrens
class Whatype(object):
WTver = "0.1"
WTrev = "01"
MAGICLIST_NAME = "magics.csv"
def __init__(self,magic_file=""):
if magic_file:
if os.path.isfile(magic_file):
self.magic_list_file = magic_file
else:
raise WhatypeErr("magics list load", "Couldn't find " + magic_file)
else:
default_mgc = os.path.join(os.path.dirname(os.path.realpath(__file__)),Whatype.MAGICLIST_NAME)
if os.path.isfile(default_mgc):
self.magic_list_file = default_mgc
else:
raise WhatypeErr("loading default magics list","Couldn't find default magics list. " \
"Please provide a magics CSV file")
# Create main prefix tree graph (Trie)
self.Tree = MagicNode("all_magics")
with open(self.magic_list_file, "r") as ins:
for line in ins:
parts = line.split(",")
# parts[0] = File Type
# parts[1] = Magic bytes
# parts[2] = File Ext
# parts[3] = File Strings
self.create_branch(0, self.Tree, parts[0], parts[1], parts[2],parts[3])
def create_branch(self, node_level, father, filetype, magic, ext, strings):
magic_bytes = magic.split(" ")
byte = magic_bytes[node_level]
son = father.has_child(byte)
node_level += 1
if (node_level < len(magic_bytes)):
if son is None:
son = father.add_child(byte)
self.create_branch(node_level, son, filetype, magic, ext,strings)
else:
if (node_level == len(magic_bytes)):
son = father.add_child(byte)
son.filetype = filetype
son.ext = ext
son.strings = strings
def print_tree(self,Node, index):
for nd in Node.children:
print "--" * index + nd.byte
if (len(nd.children) > 0):
self.print_tree(nd, index + 1)
def strings_search(self,strings_list, content):
bGood = True
for str in strings_list.split(";"):
if content.lower().find(str.lower().rstrip()) == -1:
bGood = False
return bGood
def return_magic(self,cont,Name,Ext):
if not Name:
Name = "Inconclusive. "
if self.istext(cont):
Name += "Probably text"
Ext = "TEXT"
else:
Name += "Probably binary"
Ext = "BINARY"
return Name,Ext
def istext(self,cont):
# Based on http://code.activestate.com/recipes/173220/
import string
text_characters = "".join(map(chr, range(32, 127)) + list("\n\r\t\b"))
_null_trans = string.maketrans("", "")
if not cont:
# Empty files are considered text
return True
if "\0" in cont:
# Files with null bytes are likely binary
return False
# Get the non-text characters (maps a character to itself then
# use the 'remove' option to get rid of the text characters.)
t = cont.translate(_null_trans, text_characters)
# If more than 30% non-text characters, then
# this is considered a binary file
if float(len(t))/float(len(cont)) > 0.30:
return False
return True
def find(self, cont, Node, index=0, magic_history=[]):
if cont == "" or cont is None:
return "",""
curr_byte = cont[index].encode('hex')
NextNode = Node.get_childrens_by_byte(curr_byte)
if NextNode:
magic_history.extend(NextNode)
Name, Ext = self.find(cont, NextNode[0], index+1, magic_history)
if Ext == "Rollback":
for i in range(len(magic_history)):
Node = magic_history.pop()
if Node.filetype != "":
if self.strings_search(Node.strings, cont):
return Node.filetype, Node.ext
else:
return Name, Ext
return self.return_magic(cont,"","")
#return ""
else:
# last hex node found
if Node.filetype != "":
if self.strings_search(Node.strings, cont):
return Node.filetype, Node.ext
if len(magic_history) == 0:
#return "",""
return self.return_magic(cont,"","")
return "", "Rollback" # Magic search went too far, rollbacking
def identify_file(self,filepath):
try:
file_content = open(filepath).read()
return self.find(file_content, self.Tree)
except Exception, e:
raise WhatypeErr("file identification", str(e))
def identify_buffer(self,file_content):
try:
return self.find(file_content, self.Tree,0,[])
except Exception, e:
raise WhatypeErr("buffer identification", str(e))
|
p0psicles/SickRage
|
refs/heads/master
|
lib/stevedore/__init__.py
|
77
|
# flake8: noqa
__all__ = [
'ExtensionManager',
'EnabledExtensionManager',
'NamedExtensionManager',
'HookManager',
'DriverManager',
]
from .extension import ExtensionManager
from .enabled import EnabledExtensionManager
from .named import NamedExtensionManager
from .hook import HookManager
from .driver import DriverManager
import logging
# Configure a NullHandler for our log messages in case
# the app we're used from does not set up logging.
LOG = logging.getLogger('stevedore')
if hasattr(logging, 'NullHandler'):
LOG.addHandler(logging.NullHandler())
else:
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
LOG.addHandler(NullHandler())
|
honnibal/spaCy
|
refs/heads/master
|
spacy/pipeline/hooks.py
|
1
|
# coding: utf8
from __future__ import unicode_literals
from thinc.t2v import Pooling, max_pool, mean_pool
from thinc.neural._classes.difference import Siamese, CauchySimilarity
from .pipes import Pipe
from ..language import component
from .._ml import link_vectors_to_models
@component("sentencizer_hook", assigns=["doc.user_hooks"])
class SentenceSegmenter(object):
"""A simple spaCy hook, to allow custom sentence boundary detection logic
(that doesn't require the dependency parse). To change the sentence
boundary detection strategy, pass a generator function `strategy` on
initialization, or assign a new strategy to the .strategy attribute.
Sentence detection strategies should be generators that take `Doc` objects
and yield `Span` objects for each sentence.
"""
def __init__(self, vocab, strategy=None):
self.vocab = vocab
if strategy is None or strategy == "on_punct":
strategy = self.split_on_punct
self.strategy = strategy
def __call__(self, doc):
doc.user_hooks["sents"] = self.strategy
return doc
@staticmethod
def split_on_punct(doc):
start = 0
seen_period = False
for i, token in enumerate(doc):
if seen_period and not token.is_punct:
yield doc[start : token.i]
start = token.i
seen_period = False
elif token.text in [".", "!", "?"]:
seen_period = True
if start < len(doc):
yield doc[start : len(doc)]
@component("similarity", assigns=["doc.user_hooks"])
class SimilarityHook(Pipe):
"""
Experimental: A pipeline component to install a hook for supervised
similarity into `Doc` objects. Requires a `Tensorizer` to pre-process
documents. The similarity model can be any object obeying the Thinc `Model`
interface. By default, the model concatenates the elementwise mean and
elementwise max of the two tensors, and compares them using the
Cauchy-like similarity function from Chen (2013):
>>> similarity = 1. / (1. + (W * (vec1-vec2)**2).sum())
Where W is a vector of dimension weights, initialized to 1.
"""
def __init__(self, vocab, model=True, **cfg):
self.vocab = vocab
self.model = model
self.cfg = dict(cfg)
@classmethod
def Model(cls, length):
return Siamese(Pooling(max_pool, mean_pool), CauchySimilarity(length))
def __call__(self, doc):
"""Install similarity hook"""
doc.user_hooks["similarity"] = self.predict
return doc
def pipe(self, docs, **kwargs):
for doc in docs:
yield self(doc)
def predict(self, doc1, doc2):
self.require_model()
return self.model.predict([(doc1, doc2)])
def update(self, doc1_doc2, golds, sgd=None, drop=0.0):
self.require_model()
sims, bp_sims = self.model.begin_update(doc1_doc2, drop=drop)
def begin_training(self, _=tuple(), pipeline=None, sgd=None, **kwargs):
"""Allocate model, using width from tensorizer in pipeline.
gold_tuples (iterable): Gold-standard training data.
pipeline (list): The pipeline the model is part of.
"""
if self.model is True:
self.model = self.Model(pipeline[0].model.nO)
link_vectors_to_models(self.vocab)
if sgd is None:
sgd = self.create_optimizer()
return sgd
|
wiki-ai/editquality
|
refs/heads/master
|
editquality/feature_lists/cawiki.py
|
1
|
from revscoring.languages import catalan
from . import enwiki, mediawiki, wikipedia, wikitext
badwords = [
catalan.badwords.revision.diff.match_delta_sum,
catalan.badwords.revision.diff.match_delta_increase,
catalan.badwords.revision.diff.match_delta_decrease,
catalan.badwords.revision.diff.match_prop_delta_sum,
catalan.badwords.revision.diff.match_prop_delta_increase,
catalan.badwords.revision.diff.match_prop_delta_decrease
]
informals = [
catalan.informals.revision.diff.match_delta_sum,
catalan.informals.revision.diff.match_delta_increase,
catalan.informals.revision.diff.match_delta_decrease,
catalan.informals.revision.diff.match_prop_delta_sum,
catalan.informals.revision.diff.match_prop_delta_increase,
catalan.informals.revision.diff.match_prop_delta_decrease
]
dict_words = [
catalan.dictionary.revision.diff.dict_word_delta_sum,
catalan.dictionary.revision.diff.dict_word_delta_increase,
catalan.dictionary.revision.diff.dict_word_delta_decrease,
catalan.dictionary.revision.diff.dict_word_prop_delta_sum,
catalan.dictionary.revision.diff.dict_word_prop_delta_increase,
catalan.dictionary.revision.diff.dict_word_prop_delta_decrease,
catalan.dictionary.revision.diff.non_dict_word_delta_sum,
catalan.dictionary.revision.diff.non_dict_word_delta_increase,
catalan.dictionary.revision.diff.non_dict_word_delta_decrease,
catalan.dictionary.revision.diff.non_dict_word_prop_delta_sum,
catalan.dictionary.revision.diff.non_dict_word_prop_delta_increase,
catalan.dictionary.revision.diff.non_dict_word_prop_delta_decrease
]
damaging = wikipedia.page + \
wikitext.parent + wikitext.diff + mediawiki.user_rights + \
mediawiki.protected_user + mediawiki.comment + \
badwords + informals + dict_words + \
enwiki.badwords + enwiki.informals
reverted = damaging
goodfaith = damaging
|
MalmoUniversity-DA366A/calvin-base
|
refs/heads/master
|
calvin/Tools/cscompiler.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import json
import argparse
from calvin.csparser.parser import calvin_parser
from calvin.csparser.checker import check
from calvin.csparser.analyzer import generate_app_info
def compile(source_text, filename=''):
# Steps taken:
# 1) parser .calvin file -> IR. May produce syntax errors/warnings
# 2) checker IR -> IR. May produce syntax errors/warnings
# 3) analyzer IR -> app. Should not fail. Sets 'valid' property of IR to True/False
deployable = {'valid': False, 'actors': {}, 'connections': {}}
ir, errors, warnings = calvin_parser(source_text, filename)
# If there were errors during parsing no IR will be generated
if not errors:
c_errors, c_warnings = check(ir)
errors.extend(c_errors)
warnings.extend(c_warnings)
deployable = generate_app_info(ir)
if errors:
deployable['valid'] = False
return deployable, errors, warnings
def compile_file(file):
with open(file, 'r') as source:
sourceText = source.read()
return compile(sourceText, file)
def compile_generator(files):
for file in files:
deployable, errors, warnings = compile_file(file)
yield((deployable, errors, warnings, file))
def remove_debug_info(deployable):
pass
# if type(d)==type({}):
# d.pop('dbg_line', None)
# for item in d:
# _remove_debug_symbols(d[item])
# elif type(d)==type([]):
# for item in d:
# _remove_debug_symbols(item)
def main():
long_description = """
Compile a CalvinScript source file, <filename> into a deployable JSON representation.
By default, the output will be written to file with the same name as the input file,
but with the extension replaced by 'json'.
"""
argparser = argparse.ArgumentParser(description=long_description)
argparser.add_argument('files', metavar='<filename>', type=str, nargs='+',
help='source file to compile')
argparser.add_argument('-d', '--debug', dest='debug', action='store_true', default=False,
help='leave debugging information in output')
argparser.add_argument('--stdout', dest='to_stdout', action='store_true',
help='send output to stdout instead of file (default)')
argparser.add_argument('--compact', dest='indent', action='store_const', const=None, default=4,
help='use compact JSON format instead of readable (default)')
argparser.add_argument('--sorted', dest='sorted', action='store_true', default=False,
help='sort resulting JSON output by keys')
argparser.add_argument('--issue-fmt', dest='fmt', type=str,
default='{issue_type}: {reason} {script} [{line}:{col}]',
help='custom format for issue reporting.')
argparser.add_argument('--verbose', action='store_true',
help='informational output from the compiler')
args = argparser.parse_args()
def report_issues(issues, issue_type, file=''):
sorted_issues = sorted(issues, key=lambda k: k.get('line', 0))
for issue in sorted_issues:
sys.stderr.write(args.fmt.format(script=file, issue_type=issue_type, **issue) + '\n')
exit_code = 0
for deployable, errors, warnings, file in compile_generator(args.files):
if errors:
report_issues(errors, 'Error', file)
exit_code = 1
if warnings and args.verbose:
report_issues(warnings, 'Warning', file)
if exit_code == 1:
# Don't produce output if there were errors
continue
if not args.debug:
# FIXME: Debug information is not propagated from IR to deployable by Analyzer.
# When it is, this is the place to remove it
remove_debug_info(deployable)
string_rep = json.dumps(deployable, indent=args.indent, sort_keys=args.sorted)
if args.to_stdout:
print(string_rep)
else:
path, ext = os.path.splitext(file)
dst = path + ".json"
with open(dst, 'w') as f:
f.write(string_rep)
return exit_code
if __name__ == '__main__':
sys.exit(main())
|
FHannes/intellij-community
|
refs/heads/master
|
python/testData/highlighting/unsupportedFeaturesInPython3.py
|
58
|
print(<error descr="Python version 3.0 does not support <>, use != instead.">a <> 3</error>)
<error descr="Python version 3.0 does not support backquotes, use repr() instead">`foo()`</error>
a = <error descr="Python version 3.0 does not support a trailing 'l' or 'L'.">123l</error>
a = <error descr="Python version 3.0 does not support this syntax. It requires '0o' prefix for octal literals">043</error>
a = 0X43
a = 0b1
a = 0.0
s = <error descr="Python version 3.0 does not support a 'U' prefix">u</error>"text"
<error descr="Python version 3.0 does not support this syntax.">raise a, b, c</error>
<error descr="Python version 3.0 does not support this syntax.">raise a, b</error>
try:
pass
<error descr="Python version 3.0 does not support this syntax.">except a, name:
pass</error>
[x * 2 for x in <error descr="Python version 3.0 does not support this syntax in list comprehensions.">vec1, vec2</error>]
<error descr="Python version 3.0 does not have module __builtin__">import __builtin__</error>
<warning descr="Python version 3.0 does not support this syntax. Raise with no arguments can only be used in an except block">raise</warning>
try:
pass
except:
raise
|
zahanm/foodpedia
|
refs/heads/master
|
django/db/models/fields/related.py
|
82
|
from django.conf import settings
from django.db import connection, router, transaction, connections
from django.db.backends import util
from django.db.models import signals, get_model
from django.db.models.fields import (AutoField, Field, IntegerField,
PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist)
from django.db.models.related import RelatedObject
from django.db.models.query import QuerySet
from django.db.models.query_utils import QueryWrapper
from django.db.models.deletion import CASCADE
from django.utils.encoding import smart_unicode
from django.utils.translation import (ugettext_lazy as _, string_concat,
ungettext, ugettext)
from django.utils.functional import curry
from django.core import exceptions
from django import forms
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lazy_relation(cls, field, relation, operation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
operation is the work that must be performed once the relation can be resolved.
"""
# Check for recursive relations
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
try:
app_label, model_name = relation.split(".")
except ValueError:
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
except AttributeError:
# If it doesn't have a split it's actually a model class
app_label = relation._meta.app_label
model_name = relation._meta.object_name
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name, False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
#HACK
class RelatedField(object):
def contribute_to_class(self, cls, name):
sup = super(RelatedField, self)
# Store the opts for related_query_name()
self.opts = cls._meta
if hasattr(sup, 'contribute_to_class'):
sup.contribute_to_class(cls, name)
if not cls._meta.abstract and self.rel.related_name:
self.rel.related_name = self.rel.related_name % {
'class': cls.__name__.lower(),
'app_label': cls._meta.app_label.lower(),
}
other = self.rel.to
if isinstance(other, basestring) or other._meta.pk is None:
def resolve_related_class(field, model, cls):
field.rel.to = model
field.do_related_class(model, cls)
add_lazy_relation(cls, self, other, resolve_related_class)
else:
self.do_related_class(other, cls)
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.object_name.lower() + '_' + self.rel.to._meta.pk.name)
if self.verbose_name is None:
self.verbose_name = self.rel.to._meta.verbose_name
self.rel.field_name = self.rel.field_name or self.rel.to._meta.pk.name
def do_related_class(self, other, cls):
self.set_attributes_from_rel()
self.related = RelatedObject(other, cls, self)
if not cls._meta.abstract:
self.contribute_to_related_class(other, self.related)
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
# FIXME: lt and gt are explicitly allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt', 'gte', 'lte']:
return self._pk_trace(value, 'get_prep_lookup', lookup_type)
if lookup_type in ('range', 'in'):
return [self._pk_trace(v, 'get_prep_lookup', lookup_type) for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Related Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
# FIXME: lt and gt are explicitly allowed to make
# get_(next/prev)_by_date work; other lookups are not allowed since that
# gets messy pretty quick. This is a good candidate for some refactoring
# in the future.
if lookup_type in ['exact', 'gt', 'lt', 'gte', 'lte']:
return [self._pk_trace(value, 'get_db_prep_lookup', lookup_type,
connection=connection, prepared=prepared)]
if lookup_type in ('range', 'in'):
return [self._pk_trace(v, 'get_db_prep_lookup', lookup_type,
connection=connection, prepared=prepared)
for v in value]
elif lookup_type == 'isnull':
return []
raise TypeError("Related Field has invalid lookup: %s" % lookup_type)
def _pk_trace(self, value, prep_func, lookup_type, **kwargs):
# Value may be a primary key, or an object held in a relation.
# If it is an object, then we need to get the primary key value for
# that object. In certain conditions (especially one-to-one relations),
# the primary key may itself be an object - so we need to keep drilling
# down until we hit a value that can be used for a comparison.
v = value
# In the case of an FK to 'self', this check allows to_field to be used
# for both forwards and reverse lookups across the FK. (For normal FKs,
# it's only relevant for forward lookups).
if isinstance(v, self.rel.to):
field_name = getattr(self.rel, "field_name", None)
else:
field_name = None
try:
while True:
if field_name is None:
field_name = v._meta.pk.name
v = getattr(v, field_name)
field_name = None
except AttributeError:
pass
except exceptions.ObjectDoesNotExist:
v = None
field = self
while field.rel:
if hasattr(field.rel, 'field_name'):
field = field.rel.to._meta.get_field(field.rel.field_name)
else:
field = field.rel.to._meta.pk
if lookup_type in ('range', 'in'):
v = [v]
v = getattr(field, prep_func)(lookup_type, v, **kwargs)
if isinstance(v, list):
v = v[0]
return v
def related_query_name(self):
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_name or self.opts.object_name.lower()
class SingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class pointed to by a related field.
# In the example "place.restaurant", the restaurant attribute is a
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = related.get_cache_name()
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_name)
except AttributeError:
params = {'%s__pk' % self.related.field.name: instance._get_pk_val()}
db = router.db_for_read(self.related.model, instance=instance)
rel_obj = self.related.model._base_manager.using(db).get(**params)
setattr(instance, self.cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self.related.opts.object_name)
# The similarity of the code below to the code in
# ReverseSingleRelatedObjectDescriptor is annoying, but there's a bunch
# of small differences that would make a common base class convoluted.
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.related.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.related.get_accessor_name()))
elif value is not None and not isinstance(value, self.related.model):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.related.get_accessor_name(), self.related.opts.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": instance is on database "%s", value is on database "%s"' %
(value, instance._state.db, value._state.db))
# Set the value of the related field to the value of the related object's related field
setattr(value, self.related.field.attname, getattr(instance, self.related.field.rel.get_related_field().attname))
# Since we already know what the related object is, seed the related
# object caches now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.cache_name, value)
setattr(value, self.related.field.get_cache_name(), instance)
class ReverseSingleRelatedObjectDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# a single "remote" value, on the class that defines the related field.
# In the example "choice.poll", the poll attribute is a
# ReverseSingleRelatedObjectDescriptor instance.
def __init__(self, field_with_rel):
self.field = field_with_rel
def __get__(self, instance, instance_type=None):
if instance is None:
return self
cache_name = self.field.get_cache_name()
try:
return getattr(instance, cache_name)
except AttributeError:
val = getattr(instance, self.field.attname)
if val is None:
# If NULL is an allowed value, return it.
if self.field.null:
return None
raise self.field.rel.to.DoesNotExist
other_field = self.field.rel.get_related_field()
if other_field.rel:
params = {'%s__pk' % self.field.rel.field_name: val}
else:
params = {'%s__exact' % self.field.rel.field_name: val}
# If the related manager indicates that it should be used for
# related fields, respect that.
rel_mgr = self.field.rel.to._default_manager
db = router.db_for_read(self.field.rel.to, instance=instance)
if getattr(rel_mgr, 'use_for_related_fields', False):
rel_obj = rel_mgr.using(db).get(**params)
else:
rel_obj = QuerySet(self.field.rel.to).using(db).get(**params)
setattr(instance, cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
raise AttributeError("%s must be accessed via instance" % self._field.name)
# If null=True, we can assign null here, but otherwise the value needs
# to be an instance of the related class.
if value is None and self.field.null == False:
raise ValueError('Cannot assign None: "%s.%s" does not allow null values.' %
(instance._meta.object_name, self.field.name))
elif value is not None and not isinstance(value, self.field.rel.to):
raise ValueError('Cannot assign "%r": "%s.%s" must be a "%s" instance.' %
(value, instance._meta.object_name,
self.field.name, self.field.rel.to._meta.object_name))
elif value is not None:
if instance._state.db is None:
instance._state.db = router.db_for_write(instance.__class__, instance=value)
elif value._state.db is None:
value._state.db = router.db_for_write(value.__class__, instance=instance)
elif value._state.db is not None and instance._state.db is not None:
if not router.allow_relation(value, instance):
raise ValueError('Cannot assign "%r": instance is on database "%s", value is on database "%s"' %
(value, instance._state.db, value._state.db))
# If we're setting the value of a OneToOneField to None, we need to clear
# out the cache on any old related object. Otherwise, deleting the
# previously-related object will also cause this object to be deleted,
# which is wrong.
if value is None:
# Look up the previously-related object, which may still be available
# since we've not yet cleared out the related field.
# Use the cache directly, instead of the accessor; if we haven't
# populated the cache, then we don't care - we're only accessing
# the object to invalidate the accessor cache, so there's no
# need to populate the cache just to expire it again.
related = getattr(instance, self.field.get_cache_name(), None)
# If we've got an old related object, we need to clear out its
# cache. This cache also might not exist if the related object
# hasn't been accessed yet.
if related:
cache_name = self.field.related.get_cache_name()
try:
delattr(related, cache_name)
except AttributeError:
pass
# Set the value of the related field
try:
val = getattr(value, self.field.rel.get_related_field().attname)
except AttributeError:
val = None
setattr(instance, self.field.attname, val)
# Since we already know what the related object is, seed the related
# object cache now, too. This avoids another db hit if you get the
# object you just set.
setattr(instance, self.field.get_cache_name(), value)
class ForeignRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ForeignKey pointed at them by
# some other model. In the example "poll.choice_set", the choice_set
# attribute is a ForeignRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
return self.create_manager(instance,
self.related.model._default_manager.__class__)
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
manager = self.__get__(instance)
# If the foreign key can support nulls, then completely clear the related set.
# Otherwise, just move the named objects into the set.
if self.related.field.null:
manager.clear()
manager.add(*value)
def delete_manager(self, instance):
"""
Returns a queryset based on the related model's base manager (rather
than the default manager, as returned by __get__). Used by
Model.delete().
"""
return self.create_manager(instance,
self.related.model._base_manager.__class__)
def create_manager(self, instance, superclass):
"""
Creates the managers used by other methods (__get__() and delete()).
"""
rel_field = self.related.field
rel_model = self.related.model
class RelatedManager(superclass):
def get_query_set(self):
db = self._db or router.db_for_read(rel_model, instance=instance)
return superclass.get_query_set(self).using(db).filter(**(self.core_filters))
def add(self, *objs):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, rel_field.name, instance)
obj.save()
add.alters_data = True
def create(self, **kwargs):
kwargs.update({rel_field.name: instance})
db = router.db_for_write(rel_model, instance=instance)
return super(RelatedManager, self.db_manager(db)).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
# Update kwargs with the related object that this
# ForeignRelatedObjectsDescriptor knows about.
kwargs.update({rel_field.name: instance})
db = router.db_for_write(rel_model, instance=instance)
return super(RelatedManager, self.db_manager(db)).get_or_create(**kwargs)
get_or_create.alters_data = True
# remove() and clear() are only provided if the ForeignKey can have a value of null.
if rel_field.null:
def remove(self, *objs):
val = getattr(instance, rel_field.rel.get_related_field().attname)
for obj in objs:
# Is obj actually part of this descriptor set?
if getattr(obj, rel_field.attname) == val:
setattr(obj, rel_field.name, None)
obj.save()
else:
raise rel_field.rel.to.DoesNotExist("%r is not related to %r." % (obj, instance))
remove.alters_data = True
def clear(self):
for obj in self.all():
setattr(obj, rel_field.name, None)
obj.save()
clear.alters_data = True
manager = RelatedManager()
attname = rel_field.rel.get_related_field().name
manager.core_filters = {'%s__%s' % (rel_field.name, attname):
getattr(instance, attname)}
manager.model = self.related.model
return manager
def create_many_related_manager(superclass, rel=False):
"""Creates a manager that subclasses 'superclass' (which is a Manager)
and adds behavior for many-to-many related objects."""
through = rel.through
class ManyRelatedManager(superclass):
def __init__(self, model=None, core_filters=None, instance=None, symmetrical=None,
join_table=None, source_field_name=None, target_field_name=None,
reverse=False):
super(ManyRelatedManager, self).__init__()
self.core_filters = core_filters
self.model = model
self.symmetrical = symmetrical
self.instance = instance
self.source_field_name = source_field_name
self.target_field_name = target_field_name
self.through = through
self._pk_val = self.instance.pk
self.reverse = reverse
if self._pk_val is None:
raise ValueError("%r instance needs to have a primary key value before a many-to-many relationship can be used." % instance.__class__.__name__)
def get_query_set(self):
db = self._db or router.db_for_read(self.instance.__class__, instance=self.instance)
return superclass.get_query_set(self).using(db)._next_is_sticky().filter(**(self.core_filters))
# If the ManyToMany relation has an intermediary model,
# the add and remove methods do not exist.
if rel.through._meta.auto_created:
def add(self, *objs):
self._add_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, add the mirror entry in the m2m table
if self.symmetrical:
self._add_items(self.target_field_name, self.source_field_name, *objs)
add.alters_data = True
def remove(self, *objs):
self._remove_items(self.source_field_name, self.target_field_name, *objs)
# If this is a symmetrical m2m relation to self, remove the mirror entry in the m2m table
if self.symmetrical:
self._remove_items(self.target_field_name, self.source_field_name, *objs)
remove.alters_data = True
def clear(self):
self._clear_items(self.source_field_name)
# If this is a symmetrical m2m relation to self, clear the mirror entry in the m2m table
if self.symmetrical:
self._clear_items(self.target_field_name)
clear.alters_data = True
def create(self, **kwargs):
# This check needs to be done here, since we can't later remove this
# from the method lookup table, as we do with add and remove.
if not rel.through._meta.auto_created:
opts = through._meta
raise AttributeError("Cannot use create() on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
db = router.db_for_write(self.instance.__class__, instance=self.instance)
new_obj = super(ManyRelatedManager, self.db_manager(db)).create(**kwargs)
self.add(new_obj)
return new_obj
create.alters_data = True
def get_or_create(self, **kwargs):
db = router.db_for_write(self.instance.__class__, instance=self.instance)
obj, created = \
super(ManyRelatedManager, self.db_manager(db)).get_or_create(**kwargs)
# We only need to add() if created because if we got an object back
# from get() then the relationship already exists.
if created:
self.add(obj)
return obj, created
get_or_create.alters_data = True
def _add_items(self, source_field_name, target_field_name, *objs):
# join_table: name of the m2m link table
# source_field_name: the PK fieldname in join_table for the source object
# target_field_name: the PK fieldname in join_table for the target object
# *objs - objects to add. Either object instances, or primary keys of object instances.
# If there aren't any objects, there is nothing to do.
from django.db.models import Model
if objs:
new_ids = set()
for obj in objs:
if isinstance(obj, self.model):
if not router.allow_relation(obj, self.instance):
raise ValueError('Cannot add "%r": instance is on database "%s", value is on database "%s"' %
(obj, self.instance._state.db, obj._state.db))
new_ids.add(obj.pk)
elif isinstance(obj, Model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
else:
new_ids.add(obj)
db = router.db_for_write(self.through, instance=self.instance)
vals = self.through._default_manager.using(db).values_list(target_field_name, flat=True)
vals = vals.filter(**{
source_field_name: self._pk_val,
'%s__in' % target_field_name: new_ids,
})
new_ids = new_ids - set(vals)
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action='pre_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
# Add the ones that aren't there already
for obj_id in new_ids:
self.through._default_manager.using(db).create(**{
'%s_id' % source_field_name: self._pk_val,
'%s_id' % target_field_name: obj_id,
})
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are inserting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action='post_add',
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=new_ids, using=db)
def _remove_items(self, source_field_name, target_field_name, *objs):
# source_col_name: the PK colname in join_table for the source object
# target_col_name: the PK colname in join_table for the target object
# *objs - objects to remove
# If there aren't any objects, there is nothing to do.
if objs:
# Check that all the objects are of the right type
old_ids = set()
for obj in objs:
if isinstance(obj, self.model):
old_ids.add(obj.pk)
else:
old_ids.add(obj)
# Work out what DB we're operating on
db = router.db_for_write(self.through, instance=self.instance)
# Send a signal to the other end if need be.
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="pre_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
# Remove the specified objects from the join table
self.through._default_manager.using(db).filter(**{
source_field_name: self._pk_val,
'%s__in' % target_field_name: old_ids
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are deleting the
# duplicate data row for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="post_remove",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=old_ids, using=db)
def _clear_items(self, source_field_name):
db = router.db_for_write(self.through, instance=self.instance)
# source_col_name: the PK colname in join_table for the source object
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="pre_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
self.through._default_manager.using(db).filter(**{
source_field_name: self._pk_val
}).delete()
if self.reverse or source_field_name == self.source_field_name:
# Don't send the signal when we are clearing the
# duplicate data rows for symmetrical reverse entries.
signals.m2m_changed.send(sender=rel.through, action="post_clear",
instance=self.instance, reverse=self.reverse,
model=self.model, pk_set=None, using=db)
return ManyRelatedManager
class ManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField pointed at them by
# some other model (rather than having a ManyToManyField themselves).
# In the example "publication.article_set", the article_set attribute is a
# ManyRelatedObjectsDescriptor instance.
def __init__(self, related):
self.related = related # RelatedObject instance
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model = self.related.model
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.related.field.rel)
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.related.field.name: instance._get_pk_val()},
instance=instance,
symmetrical=False,
source_field_name=self.related.field.m2m_reverse_field_name(),
target_field_name=self.related.field.m2m_field_name(),
reverse=True
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
if not self.related.field.rel.through._meta.auto_created:
opts = self.related.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ReverseManyRelatedObjectsDescriptor(object):
# This class provides the functionality that makes the related-object
# managers available as attributes on a model class, for fields that have
# multiple "remote" values and have a ManyToManyField defined in their
# model (rather than having another model pointed *at* them).
# In the example "article.publications", the publications attribute is a
# ReverseManyRelatedObjectsDescriptor instance.
def __init__(self, m2m_field):
self.field = m2m_field
def _through(self):
# through is provided so that you have easy access to the through
# model (Book.authors.through) for inlines, etc. This is done as
# a property to ensure that the fully resolved value is returned.
return self.field.rel.through
through = property(_through)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
# Dynamically create a class that subclasses the related
# model's default manager.
rel_model=self.field.rel.to
superclass = rel_model._default_manager.__class__
RelatedManager = create_many_related_manager(superclass, self.field.rel)
manager = RelatedManager(
model=rel_model,
core_filters={'%s__pk' % self.field.related_query_name(): instance._get_pk_val()},
instance=instance,
symmetrical=self.field.rel.symmetrical,
source_field_name=self.field.m2m_field_name(),
target_field_name=self.field.m2m_reverse_field_name(),
reverse=False
)
return manager
def __set__(self, instance, value):
if instance is None:
raise AttributeError("Manager must be accessed via instance")
if not self.field.rel.through._meta.auto_created:
opts = self.field.rel.through._meta
raise AttributeError("Cannot set values on a ManyToManyField which specifies an intermediary model. Use %s.%s's Manager instead." % (opts.app_label, opts.object_name))
manager = self.__get__(instance)
manager.clear()
manager.add(*value)
class ManyToOneRel(object):
def __init__(self, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.multiple = True
self.parent_link = parent_link
self.on_delete = on_delete
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
class OneToOneRel(ManyToOneRel):
def __init__(self, to, field_name, related_name=None, limit_choices_to=None,
parent_link=False, on_delete=None):
super(OneToOneRel, self).__init__(to, field_name,
related_name=related_name, limit_choices_to=limit_choices_to,
parent_link=parent_link, on_delete=on_delete
)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, related_name=None, limit_choices_to=None,
symmetrical=True, through=None):
self.to = to
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.symmetrical = symmetrical
self.multiple = True
self.through = through
def is_hidden(self):
"Should the related object be hidden?"
return self.related_name and self.related_name[-1] == '+'
def get_related_field(self):
"""
Returns the field in the to' object to which this relationship is tied
(this is always the primary key on the target model). Provided for
symmetry with ManyToOneRel.
"""
return self.to._meta.pk
class ForeignKey(RelatedField, Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Model %(model)s with pk %(pk)r does not exist.')
}
description = _("Foreign Key (type determined by related field)")
def __init__(self, to, to_field=None, rel_class=ManyToOneRel, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
# For backwards compatibility purposes, we need to *try* and set
# the to_field during FK construction. It won't be guaranteed to
# be correct until contribute_to_class is called. Refs #12190.
to_field = to_field or (to._meta.pk and to._meta.pk.name)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
if 'db_index' not in kwargs:
kwargs['db_index'] = True
kwargs['rel'] = rel_class(to, to_field,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
parent_link=kwargs.pop('parent_link', False),
on_delete=kwargs.pop('on_delete', CASCADE),
)
Field.__init__(self, **kwargs)
def validate(self, value, model_instance):
if self.rel.parent_link:
return
super(ForeignKey, self).validate(value, model_instance)
if value is None:
return
using = router.db_for_read(model_instance.__class__, instance=model_instance)
qs = self.rel.to._default_manager.using(using).filter(
**{self.rel.field_name: value}
)
qs = qs.complex_filter(self.rel.limit_choices_to)
if not qs.exists():
raise exceptions.ValidationError(self.error_messages['invalid'] % {
'model': self.rel.to._meta.verbose_name, 'pk': value})
def get_attname(self):
return '%s_id' % self.name
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.rel.get_related_field().name)
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
field_default = super(ForeignKey, self).get_default()
if isinstance(field_default, self.rel.to):
return getattr(field_default, self.rel.get_related_field().attname)
return field_default
def get_db_prep_save(self, value, connection):
if value == '' or value == None:
return None
else:
return self.rel.get_related_field().get_db_prep_save(value,
connection=connections[router.db_for_read(self.rel.to)])
def value_to_string(self, obj):
if not obj:
# In required many-to-one fields with only one available choice,
# select that one available choice. Note: For SelectFields
# we have to check that the length of choices is *2*, not 1,
# because SelectFields always have an initial "blank" value.
if not self.blank and self.choices:
choice_list = self.get_choices_default()
if len(choice_list) == 2:
return smart_unicode(choice_list[1][0])
return Field.value_to_string(self, obj)
def contribute_to_class(self, cls, name):
super(ForeignKey, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "o2m")
def contribute_to_related_class(self, cls, related):
# Internal FK's - i.e., those with a related name ending with '+' -
# don't get a related descriptor.
if not self.rel.is_hidden():
setattr(cls, related.get_accessor_name(), ForeignRelatedObjectsDescriptor(related))
if self.rel.limit_choices_to:
cls._meta.related_fkey_lookups.append(self.rel.limit_choices_to)
if self.rel.field_name is None:
self.rel.field_name = cls._meta.pk.name
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to),
'to_field_name': self.rel.field_name,
}
defaults.update(kwargs)
return super(ForeignKey, self).formfield(**defaults)
def db_type(self, connection):
# The database column type of a ForeignKey is the column type
# of the field to which it points. An exception is if the ForeignKey
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
# If the database needs similar types for key fields however, the only
# thing we can do is making AutoField an IntegerField.
rel_field = self.rel.get_related_field()
return rel_field.related_db_type(connection=connections[router.db_for_read(rel_field.model)])
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
description = _("One-to-one relationship")
def __init__(self, to, to_field=None, **kwargs):
kwargs['unique'] = True
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
def formfield(self, **kwargs):
if self.rel.parent_link:
return None
return super(OneToOneField, self).formfield(**kwargs)
def save_form_data(self, instance, data):
if isinstance(data, self.rel.to):
setattr(instance, self.name, data)
else:
setattr(instance, self.attname, data)
def create_many_to_many_intermediary_model(field, klass):
from django.db import models
managed = True
if isinstance(field.rel.to, basestring) and field.rel.to != RECURSIVE_RELATIONSHIP_CONSTANT:
to_model = field.rel.to
to = to_model.split('.')[-1]
def set_managed(field, model, cls):
field.rel.through._meta.managed = model._meta.managed or cls._meta.managed
add_lazy_relation(klass, field, to_model, set_managed)
elif isinstance(field.rel.to, basestring):
to = klass._meta.object_name
to_model = klass
managed = klass._meta.managed
else:
to = field.rel.to._meta.object_name
to_model = field.rel.to
managed = klass._meta.managed or to_model._meta.managed
name = '%s_%s' % (klass._meta.object_name, field.name)
if field.rel.to == RECURSIVE_RELATIONSHIP_CONSTANT or to == klass._meta.object_name:
from_ = 'from_%s' % to.lower()
to = 'to_%s' % to.lower()
else:
from_ = klass._meta.object_name.lower()
to = to.lower()
meta = type('Meta', (object,), {
'db_table': field._get_m2m_db_table(klass._meta),
'managed': managed,
'auto_created': klass,
'app_label': klass._meta.app_label,
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
})
# Construct and return the new class.
return type(name, (models.Model,), {
'Meta': meta,
'__module__': klass.__module__,
from_: models.ForeignKey(klass, related_name='%s+' % name),
to: models.ForeignKey(to_model, related_name='%s+' % name)
})
class ManyToManyField(RelatedField, Field):
description = _("Many-to-many relationship")
def __init__(self, to, **kwargs):
try:
assert not to._meta.abstract, "%s cannot define a relation with abstract class %s" % (self.__class__.__name__, to._meta.object_name)
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ManyToManyField must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
kwargs['rel'] = ManyToManyRel(to,
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
symmetrical=kwargs.pop('symmetrical', to==RECURSIVE_RELATIONSHIP_CONSTANT),
through=kwargs.pop('through', None))
self.db_table = kwargs.pop('db_table', None)
if kwargs['rel'].through is not None:
assert self.db_table is None, "Cannot specify a db_table if an intermediary model is used."
Field.__init__(self, **kwargs)
msg = _('Hold down "Control", or "Command" on a Mac, to select more than one.')
self.help_text = string_concat(self.help_text, ' ', msg)
def get_choices_default(self):
return Field.get_choices(self, include_blank=False)
def _get_m2m_db_table(self, opts):
"Function that can be curried to provide the m2m table name for this relation"
if self.rel.through is not None:
return self.rel.through._meta.db_table
elif self.db_table:
return self.db_table
else:
return util.truncate_name('%s_%s' % (opts.db_table, self.name),
connection.ops.max_name_length())
def _get_m2m_attr(self, related, attr):
"Function that can be curried to provide the source accessor or DB column name for the m2m table"
cache_attr = '_m2m_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
for f in self.rel.through._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.model:
setattr(self, cache_attr, getattr(f, attr))
return getattr(self, cache_attr)
def _get_m2m_reverse_attr(self, related, attr):
"Function that can be curried to provide the related accessor or DB column name for the m2m table"
cache_attr = '_m2m_reverse_%s_cache' % attr
if hasattr(self, cache_attr):
return getattr(self, cache_attr)
found = False
for f in self.rel.through._meta.fields:
if hasattr(f,'rel') and f.rel and f.rel.to == related.parent_model:
if related.model == related.parent_model:
# If this is an m2m-intermediate to self,
# the first foreign key you find will be
# the source column. Keep searching for
# the second foreign key.
if found:
setattr(self, cache_attr, getattr(f, attr))
break
else:
found = True
else:
setattr(self, cache_attr, getattr(f, attr))
break
return getattr(self, cache_attr)
def value_to_string(self, obj):
data = ''
if obj:
qs = getattr(obj, self.name).all()
data = [instance._get_pk_val() for instance in qs]
else:
# In required many-to-many fields with only one available choice,
# select that one available choice.
if not self.blank:
choices_list = self.get_choices_default()
if len(choices_list) == 1:
data = [choices_list[0][0]]
return smart_unicode(data)
def contribute_to_class(self, cls, name):
# To support multiple relations to self, it's useful to have a non-None
# related name on symmetrical relations for internal reasons. The
# concept doesn't make a lot of sense externally ("you want me to
# specify *what* on my non-reversible relation?!"), so we set it up
# automatically. The funky name reduces the chance of an accidental
# clash.
if self.rel.symmetrical and (self.rel.to == "self" or self.rel.to == cls._meta.object_name):
self.rel.related_name = "%s_rel_+" % name
super(ManyToManyField, self).contribute_to_class(cls, name)
# The intermediate m2m model is not auto created if:
# 1) There is a manually specified intermediate, or
# 2) The class owning the m2m field is abstract.
if not self.rel.through and not cls._meta.abstract:
self.rel.through = create_many_to_many_intermediary_model(self, cls)
# Add the descriptor for the m2m relation
setattr(cls, self.name, ReverseManyRelatedObjectsDescriptor(self))
# Set up the accessor for the m2m table name for the relation
self.m2m_db_table = curry(self._get_m2m_db_table, cls._meta)
# Populate some necessary rel arguments so that cross-app relations
# work correctly.
if isinstance(self.rel.through, basestring):
def resolve_through_model(field, model, cls):
field.rel.through = model
add_lazy_relation(cls, self, self.rel.through, resolve_through_model)
if isinstance(self.rel.to, basestring):
target = self.rel.to
else:
target = self.rel.to._meta.db_table
cls._meta.duplicate_targets[self.column] = (target, "m2m")
def contribute_to_related_class(self, cls, related):
# Internal M2Ms (i.e., those with a related name ending with '+')
# don't get a related descriptor.
if not self.rel.is_hidden():
setattr(cls, related.get_accessor_name(), ManyRelatedObjectsDescriptor(related))
# Set up the accessors for the column names on the m2m table
self.m2m_column_name = curry(self._get_m2m_attr, related, 'column')
self.m2m_reverse_name = curry(self._get_m2m_reverse_attr, related, 'column')
self.m2m_field_name = curry(self._get_m2m_attr, related, 'name')
self.m2m_reverse_field_name = curry(self._get_m2m_reverse_attr, related, 'name')
get_m2m_rel = curry(self._get_m2m_attr, related, 'rel')
self.m2m_target_field_name = lambda: get_m2m_rel().field_name
get_m2m_reverse_rel = curry(self._get_m2m_reverse_attr, related, 'rel')
self.m2m_reverse_target_field_name = lambda: get_m2m_reverse_rel().field_name
def set_attributes_from_rel(self):
pass
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname).all()
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
db = kwargs.pop('using', None)
defaults = {
'form_class': forms.ModelMultipleChoiceField,
'queryset': self.rel.to._default_manager.using(db).complex_filter(self.rel.limit_choices_to)
}
defaults.update(kwargs)
# If initial is passed in, it's a list of related objects, but the
# MultipleChoiceField takes a list of IDs.
if defaults.get('initial') is not None:
initial = defaults['initial']
if callable(initial):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
|
Scorpio92/linux_kernel_3.16.1
|
refs/heads/master
|
tools/perf/util/setup.py
|
989
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
libapikfs = getenv('LIBAPIKFS')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
jolevq/odoopub
|
refs/heads/master
|
addons/portal_project/tests/test_access_rights.py
|
65
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.addons.project.tests.test_project_base import TestProjectBase
from openerp.exceptions import AccessError
from openerp.osv.orm import except_orm
from openerp.tools import mute_logger
class TestPortalProjectBase(TestProjectBase):
def setUp(self):
super(TestPortalProjectBase, self).setUp()
cr, uid = self.cr, self.uid
# Find Portal group
group_portal_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_portal')
self.group_portal_id = group_portal_ref and group_portal_ref[1] or False
# Find Public group
group_public_ref = self.registry('ir.model.data').get_object_reference(cr, uid, 'base', 'group_public')
self.group_public_id = group_public_ref and group_public_ref[1] or False
# # Test users to use through the various tests
self.user_portal_id = self.res_users.create(cr, uid, {
'name': 'Chell Portal',
'login': 'chell',
'alias_name': 'chell',
'groups_id': [(6, 0, [self.group_portal_id])]
})
self.user_public_id = self.res_users.create(cr, uid, {
'name': 'Donovan Public',
'login': 'donovan',
'alias_name': 'donovan',
'groups_id': [(6, 0, [self.group_public_id])]
})
self.user_manager_id = self.res_users.create(cr, uid, {
'name': 'Eustache Manager',
'login': 'eustache',
'alias_name': 'eustache',
'groups_id': [(6, 0, [self.group_project_manager_id])]
})
# Test 'Pigs' project
self.project_pigs_id = self.project_project.create(cr, uid, {
'name': 'Pigs', 'privacy_visibility': 'public'}, {'mail_create_nolog': True})
# Various test tasks
self.task_1_id = self.project_task.create(cr, uid, {
'name': 'Test1', 'user_id': False, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_2_id = self.project_task.create(cr, uid, {
'name': 'Test2', 'user_id': False, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_3_id = self.project_task.create(cr, uid, {
'name': 'Test3', 'user_id': False, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_4_id = self.project_task.create(cr, uid, {
'name': 'Test4', 'user_id': self.user_projectuser_id, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_5_id = self.project_task.create(cr, uid, {
'name': 'Test5', 'user_id': self.user_portal_id, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
self.task_6_id = self.project_task.create(cr, uid, {
'name': 'Test6', 'user_id': self.user_public_id, 'project_id': self.project_pigs_id}, {'mail_create_nolog': True})
class TestPortalProject(TestPortalProjectBase):
@mute_logger('openerp.addons.base.ir.ir_model', 'openerp.models')
def test_00_project_access_rights(self):
""" Test basic project access rights, for project and portal_project """
cr, uid, pigs_id = self.cr, self.uid, self.project_pigs_id
# ----------------------------------------
# CASE1: public project
# ----------------------------------------
# Do: Alfred reads project -> ok (employee ok public)
self.project_project.read(cr, self.user_projectuser_id, [pigs_id], ['state'])
# Test: all project tasks visible
task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_1_id, self.task_2_id, self.task_3_id, self.task_4_id, self.task_5_id, self.task_6_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: project user cannot see all tasks of a public project')
# Test: all project tasks readable
self.project_task.read(cr, self.user_projectuser_id, task_ids, ['name'])
# Test: all project tasks writable
self.project_task.write(cr, self.user_projectuser_id, task_ids, {'description': 'TestDescription'})
# Do: Bert reads project -> crash, no group
self.assertRaises(AccessError, self.project_project.read, cr, self.user_none_id, [pigs_id], ['state'])
# Test: no project task visible
self.assertRaises(AccessError, self.project_task.search, cr, self.user_none_id, [('project_id', '=', pigs_id)])
# Test: no project task readable
self.assertRaises(AccessError, self.project_task.read, cr, self.user_none_id, task_ids, ['name'])
# Test: no project task writable
self.assertRaises(AccessError, self.project_task.write, cr, self.user_none_id, task_ids, {'description': 'TestDescription'})
# Do: Chell reads project -> ok (portal ok public)
self.project_project.read(cr, self.user_portal_id, [pigs_id], ['state'])
# Test: all project tasks visible
task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: project user cannot see all tasks of a public project')
# Test: all project tasks readable
self.project_task.read(cr, self.user_portal_id, task_ids, ['name'])
# Test: no project task writable
self.assertRaises(AccessError, self.project_task.write, cr, self.user_portal_id, task_ids, {'description': 'TestDescription'})
# Do: Donovan reads project -> ok (public)
self.project_project.read(cr, self.user_public_id, [pigs_id], ['state'])
# Test: all project tasks visible
task_ids = self.project_task.search(cr, self.user_public_id, [('project_id', '=', pigs_id)])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: public user cannot see all tasks of a public project')
# Test: all project tasks readable
self.project_task.read(cr, self.user_public_id, task_ids, ['name'])
# Test: no project task writable
self.assertRaises(AccessError, self.project_task.write, cr, self.user_public_id, task_ids, {'description': 'TestDescription'})
# ----------------------------------------
# CASE2: portal project
# ----------------------------------------
self.project_project.write(cr, uid, [pigs_id], {'privacy_visibility': 'portal'})
self.project_project.invalidate_cache(cr, uid)
# Do: Alfred reads project -> ok (employee ok public)
self.project_project.read(cr, self.user_projectuser_id, [pigs_id], ['state'])
# Test: all project tasks visible
task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: project user cannot see all tasks of a portal project')
# Do: Bert reads project -> crash, no group
self.assertRaises(AccessError, self.project_project.read, cr, self.user_none_id, [pigs_id], ['state'])
# Test: no project task searchable
self.assertRaises(AccessError, self.project_task.search, cr, self.user_none_id, [('project_id', '=', pigs_id)])
# Data: task follower
self.project_task.message_subscribe_users(cr, self.user_projectuser_id, [self.task_1_id, self.task_3_id], [self.user_portal_id])
# Do: Chell reads project -> ok (portal ok public)
self.project_project.read(cr, self.user_portal_id, [pigs_id], ['state'])
# Test: only followed project tasks visible + assigned
task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_1_id, self.task_3_id, self.task_5_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: portal user should see the followed tasks of a portal project')
# Do: Donovan reads project -> ko (public ko portal)
self.assertRaises(except_orm, self.project_project.read, cr, self.user_public_id, [pigs_id], ['state'])
# Test: no project task visible
task_ids = self.project_task.search(cr, self.user_public_id, [('project_id', '=', pigs_id)])
self.assertFalse(task_ids, 'access rights: public user should not see tasks of a portal project')
# Data: task follower cleaning
self.project_task.message_unsubscribe_users(cr, self.user_projectuser_id, [self.task_1_id, self.task_3_id], [self.user_portal_id])
# ----------------------------------------
# CASE3: employee project
# ----------------------------------------
self.project_project.write(cr, uid, [pigs_id], {'privacy_visibility': 'employees'})
self.project_project.invalidate_cache(cr, uid)
# Do: Alfred reads project -> ok (employee ok employee)
self.project_project.read(cr, self.user_projectuser_id, [pigs_id], ['state'])
# Test: all project tasks visible
task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_1_id, self.task_2_id, self.task_3_id, self.task_4_id, self.task_5_id, self.task_6_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: project user cannot see all tasks of an employees project')
# Do: Bert reads project -> crash, no group
self.assertRaises(AccessError, self.project_project.read, cr, self.user_none_id, [pigs_id], ['state'])
# Do: Chell reads project -> ko (portal ko employee)
self.assertRaises(except_orm, self.project_project.read, cr, self.user_portal_id, [pigs_id], ['state'])
# Test: no project task visible + assigned
task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)])
self.assertFalse(task_ids, 'access rights: portal user should not see tasks of an employees project, even if assigned')
# Do: Donovan reads project -> ko (public ko employee)
self.assertRaises(except_orm, self.project_project.read, cr, self.user_public_id, [pigs_id], ['state'])
# Test: no project task visible
task_ids = self.project_task.search(cr, self.user_public_id, [('project_id', '=', pigs_id)])
self.assertFalse(task_ids, 'access rights: public user should not see tasks of an employees project')
# ----------------------------------------
# CASE4: followers project
# ----------------------------------------
self.project_project.write(cr, uid, [pigs_id], {'privacy_visibility': 'followers'})
self.project_project.invalidate_cache(cr, uid)
# Do: Alfred reads project -> ko (employee ko followers)
self.assertRaises(except_orm, self.project_project.read, cr, self.user_projectuser_id, [pigs_id], ['state'])
# Test: no project task visible
task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_4_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: employee user should not see tasks of a not-followed followers project, only assigned')
# Do: Bert reads project -> crash, no group
self.assertRaises(AccessError, self.project_project.read, cr, self.user_none_id, [pigs_id], ['state'])
# Do: Chell reads project -> ko (portal ko employee)
self.assertRaises(except_orm, self.project_project.read, cr, self.user_portal_id, [pigs_id], ['state'])
# Test: no project task visible
task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_5_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: portal user should not see tasks of a not-followed followers project, only assigned')
# Do: Donovan reads project -> ko (public ko employee)
self.assertRaises(except_orm, self.project_project.read, cr, self.user_public_id, [pigs_id], ['state'])
# Test: no project task visible
task_ids = self.project_task.search(cr, self.user_public_id, [('project_id', '=', pigs_id)])
self.assertFalse(task_ids, 'access rights: public user should not see tasks of a followers project')
# Data: subscribe Alfred, Chell and Donovan as follower
self.project_project.message_subscribe_users(cr, uid, [pigs_id], [self.user_projectuser_id, self.user_portal_id, self.user_public_id])
self.project_task.message_subscribe_users(cr, self.user_manager_id, [self.task_1_id, self.task_3_id], [self.user_portal_id, self.user_projectuser_id])
# Do: Alfred reads project -> ok (follower ok followers)
self.project_project.read(cr, self.user_projectuser_id, [pigs_id], ['state'])
# Test: followed + assigned tasks visible
task_ids = self.project_task.search(cr, self.user_projectuser_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_1_id, self.task_3_id, self.task_4_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: employee user should not see followed + assigned tasks of a follower project')
# Do: Chell reads project -> ok (follower ok follower)
self.project_project.read(cr, self.user_portal_id, [pigs_id], ['state'])
# Test: followed + assigned tasks visible
task_ids = self.project_task.search(cr, self.user_portal_id, [('project_id', '=', pigs_id)])
test_task_ids = set([self.task_1_id, self.task_3_id, self.task_5_id])
self.assertEqual(set(task_ids), test_task_ids,
'access rights: employee user should not see followed + assigned tasks of a follower project')
# Do: Donovan reads project -> ko (public ko follower even if follower)
self.assertRaises(except_orm, self.project_project.read, cr, self.user_public_id, [pigs_id], ['state'])
|
rue89-tech/edx-platform
|
refs/heads/master
|
cms/djangoapps/contentstore/features/textbooks.py
|
116
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from django.conf import settings
from common import upload_file
from nose.tools import assert_equal
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
@step(u'I go to the textbooks page')
def go_to_uploads(_step):
world.wait_for_js_to_load()
world.click_course_content()
menu_css = 'li.nav-course-courseware-textbooks a'
world.css_click(menu_css)
@step(u'I should see a message telling me to create a new textbook')
def assert_create_new_textbook_msg(_step):
css = ".wrapper-content .no-textbook-content"
assert world.is_css_present(css)
no_tb = world.css_find(css)
assert "You haven't added any textbooks" in no_tb.text
@step(u'I upload the textbook "([^"]*)"$')
def upload_textbook(_step, file_name):
upload_file(file_name, sub_path="uploads/")
@step(u'I click (on )?the New Textbook button')
def click_new_textbook(_step, on):
button_css = ".nav-actions .new-button"
button = world.css_find(button_css)
button.click()
@step(u'I name my textbook "([^"]*)"')
def name_textbook(_step, name):
input_css = ".textbook input[name=textbook-name]"
world.css_fill(input_css, name)
if world.is_firefox():
world.trigger_event(input_css)
@step(u'I name the (first|second|third) chapter "([^"]*)"')
def name_chapter(_step, ordinal, name):
index = ["first", "second", "third"].index(ordinal)
input_css = ".textbook .chapter{i} input.chapter-name".format(i=index + 1)
world.css_fill(input_css, name)
if world.is_firefox():
world.trigger_event(input_css)
@step(u'I type in "([^"]*)" for the (first|second|third) chapter asset')
def asset_chapter(_step, name, ordinal):
index = ["first", "second", "third"].index(ordinal)
input_css = ".textbook .chapter{i} input.chapter-asset-path".format(i=index + 1)
world.css_fill(input_css, name)
if world.is_firefox():
world.trigger_event(input_css)
@step(u'I click the Upload Asset link for the (first|second|third) chapter')
def click_upload_asset(_step, ordinal):
index = ["first", "second", "third"].index(ordinal)
button_css = ".textbook .chapter{i} .action-upload".format(i=index + 1)
world.css_click(button_css)
@step(u'I click Add a Chapter')
def click_add_chapter(_step):
button_css = ".textbook .action-add-chapter"
world.css_click(button_css)
@step(u'I save the textbook')
def save_textbook(_step):
submit_css = "form.edit-textbook button[type=submit]"
world.css_click(submit_css)
@step(u'I should see a textbook named "([^"]*)" with a chapter path containing "([^"]*)"')
def check_textbook(_step, textbook_name, chapter_name):
title = world.css_text(".textbook h3.textbook-title", index=0)
chapter = world.css_text(".textbook .wrap-textbook p", index=0)
assert_equal(title, textbook_name)
assert_equal(chapter, chapter_name)
@step(u'I should see a textbook named "([^"]*)" with (\d+) chapters')
def check_textbook_chapters(_step, textbook_name, num_chapters_str):
num_chapters = int(num_chapters_str)
title = world.css_text(".textbook .view-textbook h3.textbook-title", index=0)
toggle_text = world.css_text(".textbook .view-textbook .chapter-toggle", index=0)
assert_equal(title, textbook_name)
assert_equal(
toggle_text,
"{num} PDF Chapters".format(num=num_chapters),
"Expected {num} chapters, found {real}".format(num=num_chapters, real=toggle_text)
)
@step(u'I click the textbook chapters')
def click_chapters(_step):
world.css_click(".textbook a.chapter-toggle")
@step(u'the (first|second|third) chapter should be named "([^"]*)"')
def check_chapter_name(_step, ordinal, name):
index = ["first", "second", "third"].index(ordinal)
chapter = world.css_find(".textbook .view-textbook ol.chapters li")[index]
element = chapter.find_by_css(".chapter-name")
assert element.text == name, "Expected chapter named {expected}, found chapter named {actual}".format(
expected=name, actual=element.text)
@step(u'the (first|second|third) chapter should have an asset called "([^"]*)"')
def check_chapter_asset(_step, ordinal, name):
index = ["first", "second", "third"].index(ordinal)
chapter = world.css_find(".textbook .view-textbook ol.chapters li")[index]
element = chapter.find_by_css(".chapter-asset-path")
assert element.text == name, "Expected chapter with asset {expected}, found chapter with asset {actual}".format(
expected=name, actual=element.text)
|
google-code-export/pyglet
|
refs/heads/master
|
pyglet/libs/darwin/constants.py
|
46
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
# CFString.h
kCFStringEncodingMacRoman = 0
kCFStringEncodingWindowsLatin1 = 0x0500
kCFStringEncodingISOLatin1 = 0x0201
kCFStringEncodingNextStepLatin = 0x0B01
kCFStringEncodingASCII = 0x0600
kCFStringEncodingUnicode = 0x0100
kCFStringEncodingUTF8 = 0x08000100
kCFStringEncodingNonLossyASCII = 0x0BFF
# MacTypes.h
noErr = 0
# CarbonEventsCore.h
eventLoopTimedOutErr = -9875
eventLoopQuitErr = -9876
kEventPriorityStandard = 1
# MacApplication.h
kUIModeNormal = 0
kUIModeContentSuppressed = 1
kUIModeContentHidden = 2
kUIModeAllSuppressed = 4
kUIModeAllHidden = 3
kUIOptionAutoShowMenuBar = 1 << 0
kUIOptionDisableAppleMenu = 1 << 2
kUIOptionDisableProcessSwitch = 1 << 3
kUIOptionDisableForceQuit = 1 << 4
kUIOptionDisableSessionTerminate = 1 << 5
kUIOptionDisableHide = 1 << 6
# MacWindows.h
kAlertWindowClass = 1
kMovableAlertWindowClass = 2
kModalWindowClass = 3
kMovableModalWindowClass = 4
kFloatingWindowClass = 5
kDocumentWindowClass = 6
kUtilityWindowClass = 8
kHelpWindowClass = 10
kSheetWindowClass = 11
kToolbarWindowClass = 12
kPlainWindowClass = 13
kOverlayWindowClass = 14
kSheetAlertWindowClass = 15
kAltPlainWindowClass = 16
kSimpleWindowClass = 18 # no window frame
kDrawerWindowClass = 20
kWindowNoAttributes = 0x0
kWindowCloseBoxAttribute = 0x1
kWindowHorizontalZoomAttribute = 0x2
kWindowVerticalZoomAttribute = 0x4
kWindowFullZoomAttribute = kWindowHorizontalZoomAttribute | \
kWindowVerticalZoomAttribute
kWindowCollapseBoxAttribute = 0x8
kWindowResizableAttribute = 0x10
kWindowSideTitlebarAttribute = 0x20
kWindowToolbarAttribute = 0x40
kWindowMetalAttribute = 1 << 8
kWindowDoesNotCycleAttribute = 1 << 15
kWindowNoupdatesAttribute = 1 << 16
kWindowNoActivatesAttribute = 1 << 17
kWindowOpaqueForEventsAttribute = 1 << 18
kWindowCompositingAttribute = 1 << 19
kWindowNoShadowAttribute = 1 << 21
kWindowHideOnSuspendAttribute = 1 << 24
kWindowAsyncDragAttribute = 1 << 23
kWindowStandardHandlerAttribute = 1 << 25
kWindowHideOnFullScreenAttribute = 1 << 26
kWindowInWindowMenuAttribute = 1 << 27
kWindowLiveResizeAttribute = 1 << 28
kWindowIgnoreClicksAttribute = 1 << 29
kWindowNoConstrainAttribute = 1 << 31
kWindowStandardDocumentAttributes = kWindowCloseBoxAttribute | \
kWindowFullZoomAttribute | \
kWindowCollapseBoxAttribute | \
kWindowResizableAttribute
kWindowStandardFloatingAttributes = kWindowCloseBoxAttribute | \
kWindowCollapseBoxAttribute
kWindowCenterOnMainScreen = 1
kWindowCenterOnParentWindow = 2
kWindowCenterOnParentWindowScreen = 3
kWindowCascadeOnMainScreen = 4
kWindowCascadeOnParentWindow = 5
kWindowCascadeonParentWindowScreen = 6
kWindowCascadeStartAtParentWindowScreen = 10
kWindowAlertPositionOnMainScreen = 7
kWindowAlertPositionOnParentWindow = 8
kWindowAlertPositionOnParentWindowScreen = 9
kWindowTitleBarRgn = 0
kWindowTitleTextRgn = 1
kWindowCloseBoxRgn = 2
kWindowZoomBoxRgn = 3
kWindowDragRgn = 5
kWindowGrowRgn = 6
kWindowCollapseBoxRgn = 7
kWindowTitleProxyIconRgn = 8
kWindowStructureRgn = 32
kWindowContentRgn = 33
kWindowUpdateRgn = 34
kWindowOpaqueRgn = 35
kWindowGlobalPortRgn = 40
kWindowToolbarButtonRgn = 41
inDesk = 0
inNoWindow = 0
inMenuBar = 1
inSysWindow = 2
inContent = 3
inDrag = 4
inGrow = 5
inGoAway = 6
inZoomIn = 7
inZoomOut = 8
inCollapseBox = 11
inProxyIcon = 12
inToolbarButton = 13
inStructure = 15
def _name(name):
return ord(name[0]) << 24 | \
ord(name[1]) << 16 | \
ord(name[2]) << 8 | \
ord(name[3])
# AEDataModel.h
typeBoolean = _name('bool')
typeChar = _name('TEXT')
typeSInt16 = _name('shor')
typeSInt32 = _name('long')
typeUInt32 = _name('magn')
typeSInt64 = _name('comp')
typeIEEE32BitFloatingPoint = _name('sing')
typeIEEE64BitFloatingPoint = _name('doub')
type128BitFloatingPoint = _name('ldbl')
typeDecimalStruct = _name('decm')
# AERegistry.h
typeUnicodeText = _name('utxt')
typeStyledUnicodeText = _name('sutx')
typeUTF8Text = _name('utf8')
typeEncodedString = _name('encs')
typeCString = _name('cstr')
typePString = _name('pstr')
typeEventRef = _name('evrf')
# CarbonEvents.h
kEventParamWindowRef = _name('wind')
kEventParamWindowPartCode = _name('wpar')
kEventParamGrafPort = _name('graf')
kEventParamMenuRef = _name('menu')
kEventParamEventRef = _name('evnt')
kEventParamControlRef = _name('ctrl')
kEventParamRgnHandle = _name('rgnh')
kEventParamEnabled = _name('enab')
kEventParamDimensions = _name('dims')
kEventParamBounds = _name('boun')
kEventParamAvailableBounds = _name('avlb')
#kEventParamAEEventID = keyAEEventID
#kEventParamAEEventClass = keyAEEventClass
kEventParamCGContextRef = _name('cntx')
kEventParamDeviceDepth = _name('devd')
kEventParamDeviceColor = _name('devc')
kEventParamMutableArray = _name('marr')
kEventParamResult = _name('ansr')
kEventParamMinimumSize = _name('mnsz')
kEventParamMaximumSize = _name('mxsz')
kEventParamAttributes = _name('attr')
kEventParamReason = _name('why?')
kEventParamTransactionID = _name('trns')
kEventParamGDevice = _name('gdev')
kEventParamIndex = _name('indx')
kEventParamUserData = _name('usrd')
kEventParamShape = _name('shap')
typeWindowRef = _name('wind')
typeWindowPartCode = _name('wpar')
typeGrafPtr = _name('graf')
typeGWorldPtr = _name('gwld')
typeMenuRef = _name('menu')
typeControlRef = _name('ctrl')
typeCollection = _name('cltn')
typeQDRgnHandle = _name('rgnh')
typeOSStatus = _name('osst')
typeCFIndex = _name('cfix')
typeCGContextRef = _name('cntx')
typeQDPoint = _name('QDpt')
typeHICommand = _name('hcmd')
typeHIPoint = _name('hipt')
typeHISize = _name('hisz')
typeHIRect = _name('hirc')
typeHIShapeRef = _name('shap')
typeVoidPtr = _name('void')
typeGDHandle = _name('gdev')
kCoreEventClass = _name('aevt')
kEventClassMouse = _name('mous')
kEventClassKeyboard = _name('keyb')
kEventClassTextInput = _name('text')
kEventClassApplication = _name('appl')
kEventClassAppleEvent = _name('eppc')
kEventClassMenu = _name('menu')
kEventClassWindow = _name('wind')
kEventClassControl = _name('cntl')
kEventClassCommand = _name('cmds')
kEventClassTablet = _name('tblt')
kEventClassVolume = _name('vol ')
kEventClassAppearance = _name('appm')
kEventClassService = _name('serv')
kEventClassToolbar = _name('tbar')
kEventClassToolbarItem = _name('tbit')
kEventClassToolbarItemView = _name('tbiv')
kEventClassAccessibility = _name('acce')
kEventClassSystem = _name('macs')
kEventClassInk = _name('ink ')
kEventClassTSMDocumentAccess = _name('tdac')
kEventDurationForever = -1.0
# Appearance.h
kThemeArrowCursor = 0
kThemeCopyArrowCursor = 1
kThemeAliasArrowCursor = 2
kThemeContextualMenuArrowCursor = 3
kThemeIBeamCursor = 4
kThemeCrossCursor = 5
kThemePlusCursor = 6
kThemeWatchCursor = 7
kThemeClosedHandCursor = 8
kThemeOpenHandCursor = 9
kThemePointingHandCursor = 10
kThemeCountingUpHandCursor = 11
kThemeCountingDownHandCursor = 12
kThemeCountingUpAndDownHandCursor = 13
kThemeSpinningCursor = 14
kThemeResizeLeftCursor = 15
kThemeResizeRightCursor = 16
kThemeResizeLeftRightCursor = 17
kThemeNotAllowedCursor = 18
kThemeResizeUpCursor = 19
kThemeResizeDownCursor = 20
kThemeResizeUpDownCursor = 21
kThemePoofCursor = 22
# AE
kEventAppleEvent = 1
kEventAppQuit = 3
kAEQuitApplication = _name('quit')
# Commands
kEventProcessCommand = 1
kEventParamHICommand = _name('hcmd')
kEventParamDirectObject = _name('----')
kHICommandQuit = _name('quit')
# Keyboard
kEventRawKeyDown = 1
kEventRawKeyRepeat = 2
kEventRawKeyUp = 3
kEventRawKeyModifiersChanged = 4
kEventHotKeyPressed = 5
kEventHotKeyReleased = 6
kEventParamKeyCode = _name('kcod')
kEventParamKeyMacCharCodes = _name('kchr')
kEventParamKeyModifiers = _name('kmod')
kEventParamKeyUnicodes = _name('kuni')
kEventParamKeyboardType = _name('kbdt')
typeEventHotKeyID = _name('hkid')
activeFlagBit = 0
btnStateBit = 7
cmdKeyBit = 8
shiftKeyBit = 9
alphaLockBit = 10
optionKeyBit = 11
controlKeyBit = 12
rightShiftKeyBit = 13
rightOptionKeyBit = 14
rightControlKeyBit = 15
numLockBit = 16
activeFlag = 1 << activeFlagBit
btnState = 1 << btnStateBit
cmdKey = 1 << cmdKeyBit
shiftKey = 1 << shiftKeyBit
alphaLock = 1 << alphaLockBit
optionKey = 1 << optionKeyBit
controlKey = 1 << controlKeyBit
rightShiftKey = 1 << rightShiftKeyBit
rightOptionKey = 1 << rightOptionKeyBit
rightControlKey = 1 << rightControlKeyBit
numLock = 1 << numLockBit
# TextInput
kEventTextInputUpdateActiveInputArea = 1
kEventTextInputUnicodeForKeyEvent = 2
kEventTextInputOffsetToPos = 3
kEventTextInputPosToOffset = 4
kEventTextInputShowHideBottomWindow = 5
kEventTextInputGetSelectedText = 6
kEventTextInputUnicodeText = 7
kEventParamTextInputSendText = _name('tstx')
kEventParamTextInputSendKeyboardEvent = _name('tske')
# Mouse
kEventMouseDown = 1
kEventMouseUp = 2
kEventMouseMoved = 5
kEventMouseDragged = 6
kEventMouseEntered = 8
kEventMouseExited = 9
kEventMouseWheelMoved = 10
kEventParamMouseLocation = _name('mloc')
kEventParamWindowMouseLocation = _name('wmou')
kEventParamMouseButton = _name('mbtn')
kEventParamClickCount = _name('ccnt')
kEventParamMouseWheelAxis = _name('mwax')
kEventParamMouseWheelDelta = _name('mwdl')
kEventParamMouseDelta = _name('mdta')
kEventParamMouseChord = _name('chor')
kEventParamTabletEventType = _name('tblt')
kEventParamMouseTrackingRef = _name('mtrf')
typeMouseButton = _name('mbtn')
typeMouseWheelAxis = _name('mwax')
typeMouseTrackingRef = _name('mtrf')
kMouseTrackingOptionsLocalClip = 0
kMouseTrackingOptionsGlobalClip = 1
kEventMouseButtonPrimary = 1
kEventMouseButtonSecondary = 2
kEventMouseButtonTertiary = 3
kEventMouseWheelAxisX = 0
kEventMouseWheelAxisY = 1
DEFAULT_CREATOR_CODE = _name('PYGL') # <ah> this is registered for Pyglet
# apps. register your own at:
# http://developer.apple.com/datatype
# Window
kEventWindowUpdate = 1
kEventWindowDrawContent = 2
# -- window activation events --
kEventWindowActivated = 5
kEventWindowDeactivated = 6
kEventWindowHandleActivate = 91
kEventWindowHandleDeactivate = 92
kEventWindowGetClickActivation = 7
kEventWindowGetClickModality = 8
# -- window state change events --
kEventWindowShowing = 22
kEventWindowHiding = 23
kEventWindowShown = 24
kEventWindowHidden = 25
kEventWindowCollapsing = 86
kEventWindowCollapsed = 67
kEventWindowExpanding = 87
kEventWindowExpanded = 70
kEventWindowZoomed = 76
kEventWindowBoundsChanging = 26
kEventWindowBoundsChanged = 27
kEventWindowResizeStarted = 28
kEventWindowResizeCompleted = 29
kEventWindowDragStarted = 30
kEventWindowDragCompleted = 31
kEventWindowClosed = 73
kEventWindowTransitionStarted = 88
kEventWindowTransitionCompleted = 89
# -- window click events --
kEventWindowClickDragRgn = 32
kEventWindowClickResizeRgn = 33
kEventWindowClickCollapseRgn = 34
kEventWindowClickCloseRgn = 35
kEventWindowClickZoomRgn = 36
kEventWindowClickContentRgn = 37
kEventWindowClickProxyIconRgn = 38
kEventWindowClickToolbarButtonRgn = 41
kEventWindowClickStructureRgn = 42
# -- window cursor change events --
kEventWindowCursorChange = 40
# -- window action events --
kEventWindowCollapse = 66
kEventWindowCollapsed = 67
kEventWindowCollapseAll = 68
kEventWindowExpand = 69
kEventWindowExpanded = 70
kEventWindowExpandAll = 71
kEventWindowClose = 72
kEventWindowClosed = 73
kEventWindowCloseAll = 74
kEventWindowZoom = 75
kEventWindowZoomed = 76
kEventWindowZoomAll = 77
kEventWindowContextualMenuSelect = 78
kEventWindowPathSelect = 79
kEventWindowGetIdealSize = 80
kEventWindowGetMinimumSize = 81
kEventWindowGetMaximumSize = 82
kEventWindowConstrain = 83
kEventWindowHandleContentClick = 85
kEventWindowCollapsing = 86
kEventWindowExpanding = 87
kEventWindowTransitionStarted = 88
kEventWindowTransitionCompleted = 89
kEventWindowGetDockTileMenu = 90
kEventWindowHandleActivate = 91
kEventWindowHandleDeactivate = 92
kEventWindowProxyBeginDrag = 128
kEventWindowProxyEndDrag = 129
kEventWindowToolbarSwitchMode = 150
# -- window focus events --
kEventWindowFocusAcquired = 200
kEventWindowFocusRelinquish = 201
kEventWindowFocusContent = 202
kEventWindowFocusToolbar = 203
kEventWindowFocusDrawer = 204
# -- sheet events --
kEventWindowSheetOpening = 210
kEventWindowSheetOpened = 211
kEventWindowSheetClosing = 212
kEventWindowSheetClosed = 213
# -- drawer events --
kEventWindowDrawerOpening = 220
kEventWindowDrawerOpened = 221
kEventWindowDrawerClosing = 222
kEventWindowDrawerClosed = 223
# -- window definition events --
kEventWindowDrawFrame = 1000
kEventWindowDrawPart = 1001
kEventWindowGetRegion = 1002
kEventWindowHitTest = 1003
kEventWindowInit = 1004
kEventWindowDispose = 1005
kEventWindowDragHilite = 1006
kEventWindowModified = 1007
kEventWindowSetupProxyDragImage = 1008
kEventWindowStateChanged = 1009
kEventWindowMeasureTitle = 1010
kEventWindowDrawGrowBox = 1011
kEventWindowGetGrowImageRegion = 1012
kEventWindowPaint = 1013
# Process.h
kNoProcess = 0
kSystemProcess = 1
kCurrentProcess = 2
# CGColorSpace.h
kCGRenderingIntentDefault = 0
# CGImage.h
kCGImageAlphaNone = 0
kCGImageAlphaPremultipliedLast = 1
kCGImageAlphaPremultipliedFirst = 2
kCGImageAlphaLast = 3
kCGImageAlphaFirst = 4
kCGImageAlphaNoneSkipLast = 5
kCGImageAlphaNoneSkipFirst = 6
kCGImageAlphaOnly = 7
# Tablet
kEventTabletPoint = 1
kEventTabletProximity = 2
kEventParamTabletPointRec = _name('tbrc')
kEventParamTabletProximityRec = _name('tbpx')
typeTabletPointRec = _name('tbrc')
typeTabletProximityRec = _name('tbpx')
|
leekchan/djangae
|
refs/heads/master
|
djangae/contrib/gauth/datastore/backends.py
|
14
|
# STANDARD LIB
from itertools import chain
# DJANGAE
from djangae.db import transaction
from djangae.contrib.gauth.common.backends import BaseAppEngineUserAPIBackend
from djangae.contrib.gauth.datastore.permissions import get_permission_choices
class AppEngineUserAPIBackend(BaseAppEngineUserAPIBackend):
atomic = transaction.atomic
atomic_kwargs = {'xg': True}
def get_group_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings that this user has through his/her
groups.
"""
if user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_group_perm_cache'):
if user_obj.is_superuser:
perms = (perm for perm, name in get_permission_choices())
else:
perms = chain.from_iterable((group.permissions for group in user_obj.groups.all()))
user_obj._group_perm_cache = set(perms)
return user_obj._group_perm_cache
def get_all_permissions(self, user_obj, obj=None):
if user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = set(user_obj.user_permissions)
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
return user_obj._perm_cache
|
evansd/django
|
refs/heads/master
|
tests/flatpages_tests/test_middleware.py
|
130
|
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import TestCase, modify_settings, override_settings
from .settings import FLATPAGES_TEMPLATES
class TestDataMixin:
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
LOGIN_URL='/accounts/login/',
MIDDLEWARE=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageMiddlewareTests(TestDataMixin, TestCase):
def test_view_flatpage(self):
"A flatpage can be served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/flatpage/')
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"""
A nonexistent flatpage raises 404 when served through a view, even when
the middleware is in use.
"""
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
user = User.objects.create_user('testuser', 'test@example.com', 's3krit')
self.client.force_login(user)
response = self.client.get('/flatpage_root/sekrit/')
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware"
response = self.client.get('/flatpage/')
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_fallback_non_existent_flatpage(self):
"""
A nonexistent flatpage raises a 404 when served by the fallback
middleware.
"""
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_fallback_authenticated_flatpage(self):
"A flatpage served by the middleware can require authentication"
response = self.client.get('/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/sekrit/')
user = User.objects.create_user('testuser', 'test@example.com', 's3krit')
self.client.force_login(user)
response = self.client.get('/sekrit/')
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served by the fallback middleware"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/some.very_special~chars-here/')
self.assertContains(response, "<p>Isn't it special!</p>")
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
APPEND_SLASH=True,
LOGIN_URL='/accounts/login/',
MIDDLEWARE=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageMiddlewareAppendSlashTests(TestDataMixin, TestCase):
def test_redirect_view_flatpage(self):
"A flatpage can be served through a view and should add a slash"
response = self.client.get('/flatpage_root/flatpage')
self.assertRedirects(response, '/flatpage_root/flatpage/', status_code=301)
def test_redirect_view_non_existent_flatpage(self):
"""
A nonexistent flatpage raises 404 when served through a view and
should not add a slash.
"""
response = self.client.get('/flatpage_root/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware and should add a slash"
response = self.client.get('/flatpage')
self.assertRedirects(response, '/flatpage/', status_code=301)
def test_redirect_fallback_non_existent_flatpage(self):
"""
A nonexistent flatpage raises a 404 when served by the fallback
middleware and should not add a slash.
"""
response = self.client.get('/no_such_flatpage')
self.assertEqual(response.status_code, 404)
def test_redirect_fallback_flatpage_special_chars(self):
"A flatpage with special chars in the URL can be served by the fallback middleware and should add a slash"
fp = FlatPage.objects.create(
url="/some.very_special~chars-here/",
title="A very special page",
content="Isn't it special!",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/some.very_special~chars-here')
self.assertRedirects(response, '/some.very_special~chars-here/', status_code=301)
def test_redirect_fallback_flatpage_root(self):
"A flatpage at / should not cause a redirect loop when APPEND_SLASH is set"
fp = FlatPage.objects.create(
url="/",
title="Root",
content="Root",
enable_comments=False,
registration_required=False,
)
fp.sites.add(settings.SITE_ID)
response = self.client.get('/')
self.assertContains(response, "<p>Root</p>")
|
simonwydooghe/ansible
|
refs/heads/devel
|
test/units/module_utils/facts/system/distribution/test_parse_distribution_file_ClearLinux.py
|
44
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from units.compat.mock import Mock
from ansible.module_utils.facts.system.distribution import DistributionFiles
def mock_module():
mock_module = Mock()
mock_module.params = {'gather_subset': ['all'],
'gather_timeout': 5,
'filter': '*'}
mock_module.get_bin_path = Mock(return_value=None)
return mock_module
def test_parse_distribution_file_clear_linux():
test_input = {
'name': 'Clearlinux',
'data': 'NAME="Clear Linux OS"\nVERSION=1\nID=clear-linux-os\nID_LIKE=clear-linux-os\nVERSION_ID=28120\nPRETTY_NAME="Clear Linux OS"\nANSI_COLOR="1;35"'
'\nHOME_URL="https://clearlinux.org"\nSUPPORT_URL="https://clearlinux.org"\nBUG_REPORT_URL="mailto:dev@lists.clearlinux.org"',
'path': '/usr/lib/os-release',
'collected_facts': None,
}
result = (
True,
{
'distribution': 'Clear Linux OS',
'distribution_major_version': '28120',
'distribution_release': 'clear-linux-os',
'distribution_version': '28120'
}
)
distribution = DistributionFiles(module=mock_module())
assert result == distribution.parse_distribution_file_ClearLinux(**test_input)
def test_parse_distribution_file_clear_linux_no_match():
# Test against data from Linux Mint and CoreOS to ensure we do not get a reported
# match from parse_distribution_file_ClearLinux()
scenarios = [
{
# CoreOS
'case': {
'name': 'Clearlinux',
'data': 'NAME="Container Linux by CoreOS"\nID=coreos\nVERSION=1911.5.0\nVERSION_ID=1911.5.0\nBUILD_ID=2018-12-15-2317\nPRETTY_NAME="Container L'
'inux by CoreOS 1911.5.0 (Rhyolite)"\nANSI_COLOR="38;5;75"\nHOME_URL="https://coreos.com/"\nBUG_REPORT_URL="https://issues.coreos.com"'
'\nCOREOS_BOARD="amd64-usr"',
'path': '/usr/lib/os-release',
'collected_facts': None,
},
'result': (False, {}),
},
{
# Linux Mint
'case': {
'name': 'Clearlinux',
'data': 'NAME="Linux Mint"\nVERSION="19.1 (Tessa)"\nID=linuxmint\nID_LIKE=ubuntu\nPRETTY_NAME="Linux Mint 19.1"\nVERSION_ID="19.1"\nHOME_URL="h'
'ttps://www.linuxmint.com/"\nSUPPORT_URL="https://forums.ubuntu.com/"\nBUG_REPORT_URL="http://linuxmint-troubleshooting-guide.readthedo'
'cs.io/en/latest/"\nPRIVACY_POLICY_URL="https://www.linuxmint.com/"\nVERSION_CODENAME=tessa\nUBUNTU_CODENAME=bionic',
'path': '/usr/lib/os-release',
'collected_facts': None,
},
'result': (False, {}),
},
]
distribution = DistributionFiles(module=mock_module())
for scenario in scenarios:
assert scenario['result'] == distribution.parse_distribution_file_ClearLinux(**scenario['case'])
|
c0710204/mirrorsBistu
|
refs/heads/master
|
pypi/bandersnatch/lib/python2.7/site-packages/pip/download.py
|
78
|
import cgi
import getpass
import hashlib
import mimetypes
import os
import platform
import re
import shutil
import socket
import ssl
import sys
import tempfile
import pip
from pip.backwardcompat import (urllib, urllib2, httplib,
urlparse, string_types, get_http_message_param,
match_hostname, CertificateError)
from pip.exceptions import InstallationError, HashMismatch
from pip.util import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
create_download_cache_folder, cache_download)
from pip.vcs import vcs
from pip.log import logger
from pip.locations import default_cert_path
__all__ = ['get_file_content', 'urlopen',
'is_url', 'url_to_path', 'path_to_url', 'path_to_url2',
'geturl', 'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url', 'unpack_http_url']
def build_user_agent():
"""Return a string representing the user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([_implementation_version, sys.pypy_version_info.releaselevel])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['pip/%s' % pip.__version__,
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
def get_file_content(url, comes_from=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
## FIXME: catch some errors
resp = urlopen(url)
encoding = get_http_message_param(resp.headers, 'charset', 'utf-8')
return geturl(resp), resp.read().decode(encoding)
try:
f = open(url)
content = f.read()
except IOError:
e = sys.exc_info()[1]
raise InstallationError('Could not open requirements file: %s' % str(e))
else:
f.close()
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
class VerifiedHTTPSConnection(httplib.HTTPSConnection):
"""
A connection that wraps connections with ssl certificate verification.
"""
def connect(self):
self.connection_kwargs = {}
#TODO: refactor compatibility logic into backwardcompat?
# for > py2.5
if hasattr(self, 'timeout'):
self.connection_kwargs.update(timeout = self.timeout)
# for >= py2.7
if hasattr(self, 'source_address'):
self.connection_kwargs.update(source_address = self.source_address)
sock = socket.create_connection((self.host, self.port), **self.connection_kwargs)
# for >= py2.7
if getattr(self, '_tunnel_host', None):
self.sock = sock
self._tunnel()
# get alternate bundle or use our included bundle
cert_path = os.environ.get('PIP_CERT', '') or default_cert_path
self.sock = ssl.wrap_socket(sock,
self.key_file,
self.cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=cert_path)
try:
match_hostname(self.sock.getpeercert(), self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class VerifiedHTTPSHandler(urllib2.HTTPSHandler):
"""
A HTTPSHandler that uses our own VerifiedHTTPSConnection.
"""
def __init__(self, connection_class = VerifiedHTTPSConnection):
self.specialized_conn_class = connection_class
urllib2.HTTPSHandler.__init__(self)
def https_open(self, req):
return self.do_open(self.specialized_conn_class, req)
class URLOpener(object):
"""
pip's own URL helper that adds HTTP auth and proxy support
"""
def __init__(self):
self.passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
self.proxy_handler = None
def __call__(self, url):
"""
If the given url contains auth info or if a normal request gets a 401
response, an attempt is made to fetch the resource using basic HTTP
auth.
"""
url, username, password, scheme = self.extract_credentials(url)
if username is None:
try:
response = self.get_opener(scheme=scheme).open(url)
except urllib2.HTTPError:
e = sys.exc_info()[1]
if e.code != 401:
raise
response = self.get_response(url)
else:
response = self.get_response(url, username, password)
return response
def get_request(self, url):
"""
Wraps the URL to retrieve to protects against "creative"
interpretation of the RFC: http://bugs.python.org/issue8732
"""
if isinstance(url, string_types):
url = urllib2.Request(url, headers={'Accept-encoding': 'identity'})
return url
def get_response(self, url, username=None, password=None):
"""
does the dirty work of actually getting the rsponse object using urllib2
and its HTTP auth builtins.
"""
scheme, netloc, path, query, frag = urlparse.urlsplit(url)
req = self.get_request(url)
stored_username, stored_password = self.passman.find_user_password(None, netloc)
# see if we have a password stored
if stored_username is None:
if username is None and self.prompting:
username = urllib.quote(raw_input('User for %s: ' % netloc))
password = urllib.quote(getpass.getpass('Password: '))
if username and password:
self.passman.add_password(None, netloc, username, password)
stored_username, stored_password = self.passman.find_user_password(None, netloc)
authhandler = urllib2.HTTPBasicAuthHandler(self.passman)
opener = self.get_opener(authhandler, scheme=scheme)
# FIXME: should catch a 401 and offer to let the user reenter credentials
return opener.open(req)
def get_opener(self, *args, **kwargs):
"""
Build an OpenerDirector instance based on the scheme and proxy option
"""
args = list(args)
if self.proxy_handler:
args.extend([self.proxy_handler, urllib2.CacheFTPHandler])
if kwargs.get('scheme') == 'https':
https_handler = VerifiedHTTPSHandler()
director = urllib2.build_opener(https_handler, *args)
#strip out HTTPHandler to prevent MITM spoof
for handler in director.handlers:
if isinstance(handler, urllib2.HTTPHandler):
director.handlers.remove(handler)
else:
director = urllib2.build_opener(*args)
# Add our new headers to the opener
headers = [x for x in director.addheaders if x[0].lower() != "user-agent"]
headers.append(("User-agent", build_user_agent()))
director.addheaders = headers
return director
def setup(self, proxystr='', prompting=True):
"""
Sets the proxy handler given the option passed on the command
line. If an empty string is passed it looks at the HTTP_PROXY
environment variable.
"""
self.prompting = prompting
proxy = self.get_proxy(proxystr)
if proxy:
self.proxy_handler = urllib2.ProxyHandler({"http": proxy, "ftp": proxy, "https": proxy})
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
def extract_credentials(self, url):
"""
Extracts user/password from a url.
Returns a tuple:
(url-without-auth, username, password)
"""
if isinstance(url, urllib2.Request):
result = urlparse.urlsplit(url.get_full_url())
else:
result = urlparse.urlsplit(url)
scheme, netloc, path, query, frag = result
username, password = self.parse_credentials(netloc)
if username is None:
return url, None, None, scheme
elif password is None and self.prompting:
# remove the auth credentials from the url part
netloc = netloc.replace('%s@' % username, '', 1)
# prompt for the password
prompt = 'Password for %s@%s: ' % (username, netloc)
password = urllib.quote(getpass.getpass(prompt))
else:
# remove the auth credentials from the url part
netloc = netloc.replace('%s:%s@' % (username, password), '', 1)
target_url = urlparse.urlunsplit((scheme, netloc, path, query, frag))
return target_url, username, password, scheme
def get_proxy(self, proxystr=''):
"""
Get the proxy given the option passed on the command line.
If an empty string is passed it looks at the HTTP_PROXY
environment variable.
"""
if not proxystr:
proxystr = os.environ.get('HTTP_PROXY', '')
if proxystr:
if '@' in proxystr:
user_password, server_port = proxystr.split('@', 1)
if ':' in user_password:
user, password = user_password.split(':', 1)
else:
user = user_password
prompt = 'Password for %s@%s: ' % (user, server_port)
password = urllib.quote(getpass.getpass(prompt))
return '%s:%s@%s' % (user, password, server_port)
else:
return proxystr
else:
return None
urlopen = URLOpener()
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
path = url[len('file:'):].lstrip('/')
path = urllib.unquote(path)
if _url_drive_re.match(path):
path = path[0] + ':' + path[2:]
else:
path = '/' + path
return path
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute.
"""
path = os.path.normcase(os.path.abspath(path))
if _drive_re.match(path):
path = path[0] + '|' + path[2:]
url = urllib.quote(path)
url = url.replace(os.path.sep, '/')
url = url.lstrip('/')
return 'file:///' + url
def path_to_url2(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join([urllib.quote(part) for part in filepath])
if not drive:
url = url.lstrip('/')
return 'file:///' + drive + url
def geturl(urllib2_resp):
"""
Use instead of urllib.addinfourl.geturl(), which appears to have
some issues with dropping the double slash for certain schemes
(e.g. file://). This implementation is probably over-eager, as it
always restores '://' if it is missing, and it appears some url
schemata aren't always followed by '//' after the colon, but as
far as I know pip doesn't need any of those.
The URI RFC can be found at: http://tools.ietf.org/html/rfc1630
This function assumes that
scheme:/foo/bar
is the same as
scheme:///foo/bar
"""
url = urllib2_resp.geturl()
scheme, rest = url.split(':', 1)
if rest.startswith('//'):
return url
else:
# FIXME: write a good test to cover it
return '%s://%s' % (scheme, rest)
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
archives = ('.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.pybundle',
'.whl')
ext = splitext(name)[1].lower()
if ext in archives:
return True
return False
def unpack_vcs_link(link, location, only_download=False):
vcs_backend = _get_used_vcs_backend(link)
if only_download:
vcs_backend.export(location)
else:
vcs_backend.unpack(location)
def unpack_file_url(link, location):
source = url_to_path(link.url)
content_type = mimetypes.guess_type(source)[0]
if os.path.isdir(source):
# delete the location since shutil will create it again :(
if os.path.isdir(location):
rmtree(location)
shutil.copytree(source, location)
else:
unpack_file(source, location, content_type, link)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_hash(download_hash, link):
if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
logger.fatal("Hash digest size of the package %d (%s) doesn't match the expected hash name %s!"
% (download_hash.digest_size, link, link.hash_name))
raise HashMismatch('Hash name mismatch for package %s' % link)
if download_hash.hexdigest() != link.hash:
logger.fatal("Hash of the package %s (%s) doesn't match the expected hash %s!"
% (link, download_hash.hexdigest(), link.hash))
raise HashMismatch('Bad %s hash for package %s' % (link.hash_name, link))
def _get_hash_from_file(target_file, link):
try:
download_hash = hashlib.new(link.hash_name)
except (ValueError, TypeError):
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
return None
fp = open(target_file, 'rb')
while True:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
fp.close()
return download_hash
def _download_url(resp, link, temp_location):
fp = open(temp_location, 'wb')
download_hash = None
if link.hash and link.hash_name:
try:
download_hash = hashlib.new(link.hash_name)
except ValueError:
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
try:
total_length = int(resp.info()['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
downloaded = 0
show_progress = total_length > 40 * 1000 or not total_length
show_url = link.show_url
try:
if show_progress:
## FIXME: the URL can get really long in this message:
if total_length:
logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
else:
logger.start_progress('Downloading %s (unknown size): ' % show_url)
else:
logger.notify('Downloading %s' % show_url)
logger.info('Downloading from URL %s' % link)
while True:
chunk = resp.read(4096)
if not chunk:
break
downloaded += len(chunk)
if show_progress:
if not total_length:
logger.show_progress('%s' % format_size(downloaded))
else:
logger.show_progress('%3i%% %s' % (100 * downloaded / total_length, format_size(downloaded)))
if download_hash is not None:
download_hash.update(chunk)
fp.write(chunk)
fp.close()
finally:
if show_progress:
logger.end_progress('%s downloaded' % format_size(downloaded))
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warn('Backing up %s to %s'
% (display_path(download_location), display_path(dest_file)))
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.indent -= 2
logger.notify('Saved %s' % display_path(download_location))
def unpack_http_url(link, location, download_cache, download_dir=None):
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
temp_location = None
target_url = link.url.split('#', 1)[0]
already_cached = False
cache_file = None
cache_content_type_file = None
download_hash = None
if download_cache:
cache_file = os.path.join(download_cache,
urllib.quote(target_url, ''))
cache_content_type_file = cache_file + '.content-type'
already_cached = (
os.path.exists(cache_file) and
os.path.exists(cache_content_type_file)
)
if not os.path.isdir(download_cache):
create_download_cache_folder(download_cache)
already_downloaded = None
if download_dir:
already_downloaded = os.path.join(download_dir, link.filename)
if not os.path.exists(already_downloaded):
already_downloaded = None
if already_downloaded:
temp_location = already_downloaded
content_type = mimetypes.guess_type(already_downloaded)[0]
logger.notify('File was already downloaded %s' % already_downloaded)
if link.hash:
download_hash = _get_hash_from_file(temp_location, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(already_downloaded)
already_downloaded = None
# We have a cached file, and we haven't already found a good downloaded copy
if already_cached and not temp_location:
with open(cache_content_type_file) as fp:
content_type = fp.read().strip()
temp_location = cache_file
logger.notify('Using download cache from %s' % cache_file)
if link.hash and link.hash_name:
download_hash = _get_hash_from_file(cache_file, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Cached file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(cache_file)
os.unlink(cache_content_type_file)
already_cached = False
# We don't have either a cached or a downloaded copy
if not temp_location:
resp = _get_response_from_url(target_url, link)
content_type = resp.info().get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.info().get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != geturl(resp):
ext = os.path.splitext(geturl(resp))[1]
if ext:
filename += ext
temp_location = os.path.join(temp_dir, filename)
download_hash = _download_url(resp, link, temp_location)
if link.hash and link.hash_name:
_check_hash(download_hash, link)
if download_dir and not already_downloaded:
_copy_file(temp_location, download_dir, content_type, link)
unpack_file(temp_location, location, content_type, link)
if cache_file and not already_cached:
cache_download(cache_file, temp_location, content_type)
if not (already_cached or already_downloaded):
os.unlink(temp_location)
os.rmdir(temp_dir)
def _get_response_from_url(target_url, link):
try:
resp = urlopen(target_url)
except urllib2.HTTPError:
e = sys.exc_info()[1]
logger.fatal("HTTP error %s while getting %s" % (e.code, link))
raise
except IOError:
e = sys.exc_info()[1]
# Typically an FTP error
logger.fatal("Error %s while getting %s" % (e, link))
raise
return resp
class Urllib2HeadRequest(urllib2.Request):
def get_method(self):
return "HEAD"
|
robovm/robovm-studio
|
refs/heads/master
|
python/testData/formatter/wrapDefinitionWithLongLine_after.py
|
79
|
def my_function_name_which_is_rather_long:
pass
|
ryfeus/lambda-packs
|
refs/heads/master
|
Tensorflow_LightGBM_Scipy_nightly/source/tensorflow/contrib/learn/python/learn/datasets/text_datasets.py
|
124
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Text datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.platform import gfile
DBPEDIA_URL = 'https://github.com/le-scientifique/torchDatasets/raw/master/dbpedia_csv.tar.gz'
def maybe_download_dbpedia(data_dir):
"""Download if DBpedia data is not present."""
train_path = os.path.join(data_dir, 'dbpedia_csv/train.csv')
test_path = os.path.join(data_dir, 'dbpedia_csv/test.csv')
if not (gfile.Exists(train_path) and gfile.Exists(test_path)):
archive_path = base.maybe_download(
'dbpedia_csv.tar.gz', data_dir, DBPEDIA_URL)
tfile = tarfile.open(archive_path, 'r:*')
tfile.extractall(data_dir)
def load_dbpedia(size='small', test_with_fake_data=False):
"""Get DBpedia datasets from CSV files."""
if not test_with_fake_data:
data_dir = os.path.join(os.getenv('TF_EXP_BASE_DIR', ''), 'dbpedia_data')
maybe_download_dbpedia(data_dir)
train_path = os.path.join(data_dir, 'dbpedia_csv', 'train.csv')
test_path = os.path.join(data_dir, 'dbpedia_csv', 'test.csv')
if size == 'small':
# Reduce the size of original data by a factor of 1000.
base.shrink_csv(train_path, 1000)
base.shrink_csv(test_path, 1000)
train_path = train_path.replace('train.csv', 'train_small.csv')
test_path = test_path.replace('test.csv', 'test_small.csv')
else:
module_path = os.path.dirname(__file__)
train_path = os.path.join(module_path, 'data', 'text_train.csv')
test_path = os.path.join(module_path, 'data', 'text_test.csv')
train = base.load_csv_without_header(
train_path, target_dtype=np.int32, features_dtype=np.str, target_column=0)
test = base.load_csv_without_header(
test_path, target_dtype=np.int32, features_dtype=np.str, target_column=0)
return base.Datasets(train=train, validation=None, test=test)
|
legorovers/legoflask
|
refs/heads/master
|
app/rules.py
|
1
|
class Rule(object):
def __init__(self, trigger, actions):
self.trigger = trigger
print "trigger: %s" % trigger
self.code = []
time = 0
for a in actions:
print "action: %s" % a
if a == 'back':
action = ('reverse', 40)
elif a == 'stop':
action = (None, 0)
else: # forward, left, right, speak, light-*
action = (a, 40)
self.code.append(time)
self.code.append(action)
time += 0.5
print "code: %s" % self.code
class RuleEngine(object):
def __init__(self, control):
self.control = control
self.rules = []
def check(self, color, touch, direction):
for rule in self.rules:
if (rule.trigger == 'collision' and touch) \
or (rule.trigger == 'dark ground' and color < 40) \
or (rule.trigger == 'light ground' and color >= 40):
self.control.program(*rule.code)
def activate(self, rules):
self.rules = rules
|
Endika/commission
|
refs/heads/8.0
|
sale_commission/models/sale_order.py
|
8
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Pexego Sistemas Informáticos (<http://www.pexego.es>).
# Copyright (C) 2015 Avanzosc (<http://www.avanzosc.es>)
# Copyright (C) 2015 Pedro M. Baeza (<http://www.serviciosbaeza.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class SaleOrder(models.Model):
_inherit = "sale.order"
@api.one
@api.depends('order_line.agents.amount')
def _get_commission_total(self):
self.commission_total = 0.0
for line in self.order_line:
self.commission_total += sum(x.amount for x in line.agents)
commission_total = fields.Float(
string="Commissions", compute="_get_commission_total",
store=True)
class SaleOrderLine(models.Model):
_inherit = "sale.order.line"
@api.model
def _default_agents(self):
agents = []
if self.env.context.get('partner_id'):
partner = self.env['res.partner'].browse(
self.env.context['partner_id'])
for agent in partner.agents:
agents.append({'agent': agent.id,
'commission': agent.commission.id})
return [(0, 0, x) for x in agents]
agents = fields.One2many(
string="Agents & commissions",
comodel_name='sale.order.line.agent', inverse_name='sale_line',
copy=True, readonly=True, default=_default_agents)
commission_free = fields.Boolean(
string="Comm. free", related="product_id.commission_free",
store=True, readonly=True)
@api.model
def _prepare_order_line_invoice_line(self, line, account_id=False):
vals = super(SaleOrderLine, self)._prepare_order_line_invoice_line(
line, account_id=account_id)
vals['agents'] = [
(0, 0, {'agent': x.agent.id,
'commission': x.commission.id}) for x in line.agents]
return vals
class SaleOrderLineAgent(models.Model):
_name = "sale.order.line.agent"
_rec_name = "agent"
sale_line = fields.Many2one(
comodel_name="sale.order.line", required=True, ondelete="cascade")
agent = fields.Many2one(
comodel_name="res.partner", required=True, ondelete="restrict",
domain="[('agent', '=', True')]")
commission = fields.Many2one(
comodel_name="sale.commission", required=True, ondelete="restrict")
amount = fields.Float(compute="_get_amount", store=True)
_sql_constraints = [
('unique_agent', 'UNIQUE(sale_line, agent)',
'You can only add one time each agent.')
]
@api.one
@api.onchange('agent')
def onchange_agent(self):
self.commission = self.agent.commission
@api.one
@api.depends('commission.commission_type', 'sale_line.price_subtotal')
def _get_amount(self):
self.amount = 0.0
if (not self.sale_line.product_id.commission_free and
self.commission):
subtotal = self.sale_line.price_subtotal
if self.commission.commission_type == 'fixed':
self.amount = subtotal * (self.commission.fix_qty / 100.0)
else:
self.amount = self.commission.calculate_section(subtotal)
|
hellerve/hawkweed
|
refs/heads/master
|
hawkweed/functional/__init__.py
|
1
|
"""A collection of useful functions"""
from hawkweed.functional.primitives import *
from hawkweed.functional.logical import *
from hawkweed.functional.mathematical import *
from hawkweed.functional.list_prims import *
|
yamahata/neutron
|
refs/heads/master
|
neutron/tests/unit/cisco/test_network_plugin.py
|
1
|
# Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import copy
import inspect
import logging
import mock
import six
import webob.exc as wexc
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.api.v2 import base
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import db_base_plugin_v2 as base_plugin
from neutron.db import l3_db
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron.manager import NeutronManager
from neutron.openstack.common import gettextutils
from neutron.plugins.cisco.common import cisco_constants as const
from neutron.plugins.cisco.common import cisco_exceptions as c_exc
from neutron.plugins.cisco.common import config as cisco_config
from neutron.plugins.cisco.db import network_db_v2
from neutron.plugins.cisco.db import nexus_db_v2
from neutron.plugins.cisco.models import virt_phy_sw_v2
from neutron.plugins.openvswitch.common import config as ovs_config
from neutron.plugins.openvswitch import ovs_db_v2
from neutron.tests.unit import _test_extension_portbindings as test_bindings
from neutron.tests.unit import test_db_plugin
from neutron.tests.unit import test_extensions
LOG = logging.getLogger(__name__)
CORE_PLUGIN = 'neutron.plugins.cisco.network_plugin.PluginV2'
NEXUS_PLUGIN = 'neutron.plugins.cisco.nexus.cisco_nexus_plugin_v2.NexusPlugin'
NEXUS_DRIVER = ('neutron.plugins.cisco.nexus.'
'cisco_nexus_network_driver_v2.CiscoNEXUSDriver')
PHYS_NET = 'physnet1'
BRIDGE_NAME = 'br-eth1'
VLAN_START = 1000
VLAN_END = 1100
COMP_HOST_NAME = 'testhost'
COMP_HOST_NAME_2 = 'testhost_2'
NEXUS_IP_ADDR = '1.1.1.1'
NEXUS_DEV_ID = 'NEXUS_SWITCH'
NEXUS_USERNAME = 'admin'
NEXUS_PASSWORD = 'mySecretPassword'
NEXUS_SSH_PORT = 22
NEXUS_INTERFACE = '1/1'
NEXUS_INTERFACE_2 = '1/2'
NEXUS_PORT_1 = 'ethernet:1/1'
NEXUS_PORT_2 = 'ethernet:1/2'
NETWORK_NAME = 'test_network'
CIDR_1 = '10.0.0.0/24'
CIDR_2 = '10.0.1.0/24'
DEVICE_ID_1 = '11111111-1111-1111-1111-111111111111'
DEVICE_ID_2 = '22222222-2222-2222-2222-222222222222'
DEVICE_OWNER = 'compute:None'
class CiscoNetworkPluginV2TestCase(test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self):
"""Configure for end-to-end neutron testing using a mock ncclient.
This setup includes:
- Configure the OVS plugin to use VLANs in the range of
VLAN_START-VLAN_END.
- Configure the Cisco plugin model to use the Nexus driver.
- Configure the Nexus driver to use an imaginary switch
at NEXUS_IP_ADDR.
"""
# Configure the OVS and Cisco plugins
phys_bridge = ':'.join([PHYS_NET, BRIDGE_NAME])
phys_vlan_range = ':'.join([PHYS_NET, str(VLAN_START), str(VLAN_END)])
config = {
ovs_config: {
'OVS': {'bridge_mappings': phys_bridge,
'network_vlan_ranges': [phys_vlan_range],
'tenant_network_type': 'vlan'}
},
cisco_config: {
'CISCO': {'nexus_driver': NEXUS_DRIVER},
'CISCO_PLUGINS': {'nexus_plugin': NEXUS_PLUGIN},
}
}
for module in config:
for group in config[module]:
for opt, val in config[module][group].items():
module.cfg.CONF.set_override(opt, val, group)
# Configure the Nexus switch dictionary
# TODO(Henry): add tests for other devices
nexus_config = {
(NEXUS_DEV_ID, NEXUS_IP_ADDR, 'username'): NEXUS_USERNAME,
(NEXUS_DEV_ID, NEXUS_IP_ADDR, 'password'): NEXUS_PASSWORD,
(NEXUS_DEV_ID, NEXUS_IP_ADDR, 'ssh_port'): NEXUS_SSH_PORT,
(NEXUS_DEV_ID, NEXUS_IP_ADDR, COMP_HOST_NAME): NEXUS_INTERFACE,
(NEXUS_DEV_ID, NEXUS_IP_ADDR, COMP_HOST_NAME_2): NEXUS_INTERFACE_2,
}
nexus_patch = mock.patch.dict(cisco_config.device_dictionary,
nexus_config)
nexus_patch.start()
self.addCleanup(nexus_patch.stop)
# Use a mock netconf client
self.mock_ncclient = mock.Mock()
ncclient_patch = mock.patch.dict('sys.modules',
{'ncclient': self.mock_ncclient})
ncclient_patch.start()
self.addCleanup(ncclient_patch.stop)
# Call the parent setUp, start the core plugin
super(CiscoNetworkPluginV2TestCase, self).setUp(CORE_PLUGIN)
self.port_create_status = 'DOWN'
# Set Cisco config module's first configured Nexus IP address.
# Used for SVI placement when round-robin placement is disabled.
mock.patch.object(cisco_config, 'first_device_ip',
new=NEXUS_IP_ADDR).start()
def _get_plugin_ref(self):
return getattr(NeutronManager.get_plugin(),
"_model")._plugins[const.VSWITCH_PLUGIN]
@contextlib.contextmanager
def _patch_ncclient(self, attr, value):
"""Configure an attribute on the mock ncclient module.
This method can be used to inject errors by setting a side effect
or a return value for an ncclient method.
:param attr: ncclient attribute (typically method) to be configured.
:param value: Value to be configured on the attribute.
"""
# Configure attribute.
config = {attr: value}
self.mock_ncclient.configure_mock(**config)
# Continue testing
yield
# Unconfigure attribute
config = {attr: None}
self.mock_ncclient.configure_mock(**config)
@staticmethod
def _config_dependent_side_effect(match_config, exc):
"""Generates a config-dependent side effect for ncclient edit_config.
This method generates a mock side-effect function which can be
configured on the mock ncclient module for the edit_config method.
This side effect will cause a given exception to be raised whenever
the XML config string that is passed to edit_config contains all
words in a given match config string.
:param match_config: String containing keywords to be matched
:param exc: Exception to be raised when match is found
:return: Side effect function for the mock ncclient module's
edit_config method.
"""
keywords = match_config.split()
def _side_effect_function(target, config):
if all(word in config for word in keywords):
raise exc
return _side_effect_function
def _is_in_nexus_cfg(self, words):
"""Check if any config sent to Nexus contains all words in a list."""
for call in (self.mock_ncclient.manager.connect.return_value.
edit_config.mock_calls):
configlet = call[2]['config']
if all(word in configlet for word in words):
return True
return False
def _is_in_last_nexus_cfg(self, words):
"""Check if last config sent to Nexus contains all words in a list."""
last_cfg = (self.mock_ncclient.manager.connect.return_value.
edit_config.mock_calls[-1][2]['config'])
return all(word in last_cfg for word in words)
def _is_vlan_configured(self, vlan_creation_expected=True,
add_keyword_expected=False):
vlan_created = self._is_in_nexus_cfg(['vlan', 'vlan-name'])
add_appears = self._is_in_last_nexus_cfg(['add'])
return (self._is_in_last_nexus_cfg(['allowed', 'vlan']) and
vlan_created == vlan_creation_expected and
add_appears == add_keyword_expected)
def _is_vlan_unconfigured(self, vlan_deletion_expected=True,
vlan_untrunk_expected=True):
vlan_deleted = self._is_in_nexus_cfg(
['no', 'vlan', 'vlan-id-create-delete'])
vlan_untrunked = self._is_in_nexus_cfg(['allowed', 'vlan', 'remove'])
return (vlan_deleted == vlan_deletion_expected and
vlan_untrunked == vlan_untrunk_expected)
def _assertExpectedHTTP(self, status, exc):
"""Confirm that an HTTP status corresponds to an expected exception.
Confirm that an HTTP status which has been returned for an
neutron API request matches the HTTP status corresponding
to an expected exception.
:param status: HTTP status
:param exc: Expected exception
"""
if exc in base.FAULT_MAP:
expected_http = base.FAULT_MAP[exc].code
else:
expected_http = wexc.HTTPInternalServerError.code
self.assertEqual(status, expected_http)
class TestCiscoGetAttribute(CiscoNetworkPluginV2TestCase):
def test_get_unsupported_attr_in_lazy_gettext_mode(self):
"""Test get of unsupported attribute in lazy gettext mode.
This test also checks that this operation does not cause
excessive nesting of calls to deepcopy.
"""
plugin = NeutronManager.get_plugin()
def _lazy_gettext(msg):
return gettextutils.Message(msg, domain='neutron')
with mock.patch.dict(six.moves.builtins.__dict__,
{'_': _lazy_gettext}):
self.nesting_count = 0
def _count_nesting(*args, **kwargs):
self.nesting_count += 1
with mock.patch.object(copy, 'deepcopy',
side_effect=_count_nesting,
wraps=copy.deepcopy):
self.assertRaises(AttributeError, getattr, plugin,
'an_unsupported_attribute')
# If there were no nested calls to deepcopy, then the total
# number of calls to deepcopy should be 2 (1 call for
# each mod'd field in the AttributeError message raised
# by the plugin).
self.assertEqual(self.nesting_count, 2)
class TestCiscoBasicGet(CiscoNetworkPluginV2TestCase,
test_db_plugin.TestBasicGet):
pass
class TestCiscoV2HTTPResponse(CiscoNetworkPluginV2TestCase,
test_db_plugin.TestV2HTTPResponse):
pass
class TestCiscoPortsV2(CiscoNetworkPluginV2TestCase,
test_db_plugin.TestPortsV2,
test_bindings.PortBindingsHostTestCaseMixin):
@contextlib.contextmanager
def _create_port_res(self, name=NETWORK_NAME, cidr=CIDR_1,
do_delete=True, host_id=COMP_HOST_NAME):
"""Create a network, subnet, and port and yield the result.
Create a network, subnet, and port, yield the result,
then delete the port, subnet, and network.
:param name: Name of network to be created
:param cidr: cidr address of subnetwork to be created
:param do_delete: If set to True, delete the port at the
end of testing
:param host_id: Name of compute host to use for testing
"""
ctx = context.get_admin_context()
with self.network(name=name) as network:
with self.subnet(network=network, cidr=cidr) as subnet:
net_id = subnet['subnet']['network_id']
args = (portbindings.HOST_ID, 'device_id', 'device_owner')
port_dict = {portbindings.HOST_ID: host_id,
'device_id': DEVICE_ID_1,
'device_owner': DEVICE_OWNER}
res = self._create_port(self.fmt, net_id, arg_list=args,
context=ctx, **port_dict)
port = self.deserialize(self.fmt, res)
try:
yield res
finally:
if do_delete:
self._delete('ports', port['port']['id'])
def test_create_ports_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
plugin_ref = self._get_plugin_ref()
orig = plugin_ref.create_port
with mock.patch.object(plugin_ref,
'create_port') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_port_bulk(self.fmt, 2,
net['network']['id'],
'test',
True)
# Expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'ports',
wexc.HTTPInternalServerError.code)
def test_create_ports_bulk_native(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
def test_create_ports_bulk_emulated(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
def test_create_ports_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk port create")
ctx = context.get_admin_context()
with self.network() as net:
plugin_ref = self._get_plugin_ref()
orig = plugin_ref.create_port
with mock.patch.object(plugin_ref,
'create_port') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_port_bulk(self.fmt, 2,
net['network']['id'],
'test', True, context=ctx)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'ports',
wexc.HTTPInternalServerError.code)
def test_nexus_enable_vlan_cmd(self):
"""Verify the syntax of the command to enable a vlan on an intf."""
# First vlan should be configured without 'add' keyword
with self._create_port_res(name='net1', cidr=CIDR_1):
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=True,
add_keyword_expected=False))
self.mock_ncclient.reset_mock()
# Second vlan should be configured with 'add' keyword
with self._create_port_res(name='net2', cidr=CIDR_2):
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=True,
add_keyword_expected=True))
def test_nexus_vlan_config_two_hosts(self):
"""Verify config/unconfig of vlan on two compute hosts."""
@contextlib.contextmanager
def _create_port_check_vlan(comp_host_name, device_id,
vlan_creation_expected=True):
arg_list = (portbindings.HOST_ID,)
port_dict = {portbindings.HOST_ID: comp_host_name,
'device_id': device_id,
'device_owner': DEVICE_OWNER}
with self.port(subnet=subnet, fmt=self.fmt,
arg_list=arg_list, **port_dict):
self.assertTrue(self._is_vlan_configured(
vlan_creation_expected=vlan_creation_expected,
add_keyword_expected=False))
self.mock_ncclient.reset_mock()
yield
# Create network and subnet
with self.network(name=NETWORK_NAME) as network:
with self.subnet(network=network, cidr=CIDR_1) as subnet:
# Create an instance on first compute host
with _create_port_check_vlan(
COMP_HOST_NAME, DEVICE_ID_1, vlan_creation_expected=True):
# Create an instance on second compute host
with _create_port_check_vlan(
COMP_HOST_NAME_2, DEVICE_ID_2,
vlan_creation_expected=False):
pass
# Instance on second host is now terminated.
# Vlan should be untrunked from port, but vlan should
# still exist on the switch.
self.assertTrue(self._is_vlan_unconfigured(
vlan_deletion_expected=False))
self.mock_ncclient.reset_mock()
# Instance on first host is now terminated.
# Vlan should be untrunked from port and vlan should have
# been deleted from the switch.
self.assertTrue(self._is_vlan_unconfigured(
vlan_deletion_expected=True))
def test_nexus_connect_fail(self):
"""Test failure to connect to a Nexus switch.
While creating a network, subnet, and port, simulate a connection
failure to a nexus switch. Confirm that the expected HTTP code
is returned for the create port operation.
"""
with self._patch_ncclient('manager.connect.side_effect',
AttributeError):
with self._create_port_res(do_delete=False) as res:
self._assertExpectedHTTP(res.status_int,
c_exc.NexusConnectFailed)
def test_nexus_config_fail(self):
"""Test a Nexus switch configuration failure.
While creating a network, subnet, and port, simulate a nexus
switch configuration error. Confirm that the expected HTTP code
is returned for the create port operation.
"""
with self._patch_ncclient(
'manager.connect.return_value.edit_config.side_effect',
AttributeError):
with self._create_port_res(do_delete=False) as res:
self._assertExpectedHTTP(res.status_int,
c_exc.NexusConfigFailed)
def test_nexus_extended_vlan_range_failure(self):
"""Test that extended VLAN range config errors are ignored.
Some versions of Nexus switch do not allow state changes for
the extended VLAN range (1006-4094), but these errors can be
ignored (default values are appropriate). Test that such errors
are ignored by the Nexus plugin.
"""
config_err_strings = {
"state active": "Can't modify state for extended",
"no shutdown": "Command is only allowed on VLAN",
}
for config, err_string in config_err_strings.items():
with self._patch_ncclient(
'manager.connect.return_value.edit_config.side_effect',
self._config_dependent_side_effect(config,
Exception(err_string))):
with self._create_port_res() as res:
self.assertEqual(res.status_int, wexc.HTTPCreated.code)
def test_nexus_vlan_config_rollback(self):
"""Test rollback following Nexus VLAN state config failure.
Test that the Cisco Nexus plugin correctly deletes the VLAN
on the Nexus switch when the 'state active' command fails (for
a reason other than state configuration change is rejected
for the extended VLAN range).
"""
vlan_state_configs = ['state active', 'no shutdown']
for config in vlan_state_configs:
with self._patch_ncclient(
'manager.connect.return_value.edit_config.side_effect',
self._config_dependent_side_effect(config, ValueError)):
with self._create_port_res(do_delete=False) as res:
# Confirm that the last configuration sent to the Nexus
# switch was deletion of the VLAN.
self.assertTrue(
self._is_in_last_nexus_cfg(['<no>', '<vlan>'])
)
self._assertExpectedHTTP(res.status_int,
c_exc.NexusConfigFailed)
def test_get_seg_id_fail(self):
"""Test handling of a NetworkSegmentIDNotFound exception.
Test the Cisco NetworkSegmentIDNotFound exception by simulating
a return of None by the OVS DB get_network_binding method
during port creation.
"""
orig = ovs_db_v2.get_network_binding
def _return_none_if_nexus_caller(self, *args, **kwargs):
def _calling_func_name(offset=0):
"""Get name of the calling function 'offset' frames back."""
return inspect.stack()[1 + offset][3]
if (_calling_func_name(1) == '_get_segmentation_id' and
_calling_func_name(2) == '_invoke_nexus_for_net_create'):
return None
else:
return orig(self, *args, **kwargs)
with mock.patch.object(ovs_db_v2, 'get_network_binding',
new=_return_none_if_nexus_caller):
with self._create_port_res(do_delete=False) as res:
self._assertExpectedHTTP(res.status_int,
c_exc.NetworkSegmentIDNotFound)
def test_nexus_host_non_configured(self):
"""Test handling of a NexusComputeHostNotConfigured exception.
Test the Cisco NexusComputeHostNotConfigured exception by using
a fictitious host name during port creation.
"""
with self._create_port_res(do_delete=False,
host_id='fakehost') as res:
self._assertExpectedHTTP(res.status_int,
c_exc.NexusComputeHostNotConfigured)
def _check_rollback_on_bind_failure(self,
vlan_deletion_expected,
vlan_untrunk_expected):
"""Test for proper rollback following add Nexus DB binding failure.
Test that the Cisco Nexus plugin correctly rolls back the vlan
configuration on the Nexus switch when add_nexusport_binding fails
within the plugin's create_port() method.
"""
inserted_exc = KeyError
with mock.patch.object(nexus_db_v2, 'add_nexusport_binding',
side_effect=inserted_exc):
with self._create_port_res(do_delete=False) as res:
# Confirm that the configuration sent to the Nexus
# switch includes deletion of the vlan (if expected)
# and untrunking of the vlan from the ethernet interface
# (if expected).
self.assertTrue(self._is_vlan_unconfigured(
vlan_deletion_expected=vlan_deletion_expected,
vlan_untrunk_expected=vlan_untrunk_expected))
self._assertExpectedHTTP(res.status_int, inserted_exc)
def test_nexus_rollback_on_bind_failure_non_provider_vlan(self):
"""Test rollback upon DB binding failure for non-provider vlan."""
self._check_rollback_on_bind_failure(vlan_deletion_expected=True,
vlan_untrunk_expected=True)
def test_nexus_rollback_on_bind_failure_prov_vlan_no_auto_create(self):
"""Test rollback on bind fail for prov vlan w auto-create disabled."""
with mock.patch.object(network_db_v2, 'is_provider_vlan',
return_value=True):
# Disable auto-create. This config change will be cleared based
# on cleanup scheduled in the CiscoNetworkPluginV2TestCase
# class' setUp() method.
cisco_config.CONF.set_override('provider_vlan_auto_create',
False, 'CISCO')
self._check_rollback_on_bind_failure(vlan_deletion_expected=False,
vlan_untrunk_expected=True)
def test_nexus_rollback_on_bind_failure_prov_vlan_no_auto_trunk(self):
"""Test rollback on bind fail for prov vlan w auto-trunk disabled."""
with mock.patch.object(network_db_v2, 'is_provider_vlan',
return_value=True):
# Disable auto-trunk. This config change will be cleared
# based on post-test cleanup scheduled in the
# CiscoNetworkPluginV2TestCase class' setUp() method.
cisco_config.CONF.set_override('provider_vlan_auto_trunk',
False, 'CISCO')
self._check_rollback_on_bind_failure(vlan_deletion_expected=True,
vlan_untrunk_expected=False)
def test_model_update_port_rollback(self):
"""Test for proper rollback for Cisco model layer update port failure.
Test that the vSwitch plugin port configuration is rolled back
(restored) by the Cisco plugin model layer when there is a
failure in the Nexus sub-plugin for an update port operation.
The update port operation simulates a port attachment scenario:
first a port is created with no instance (null device_id),
and then a port update is requested with a non-null device_id
to simulate the port attachment.
"""
with self.port(fmt=self.fmt, device_id='',
device_owner=DEVICE_OWNER) as orig_port:
inserted_exc = ValueError
with mock.patch.object(
virt_phy_sw_v2.VirtualPhysicalSwitchModelV2,
'_invoke_nexus_for_net_create',
side_effect=inserted_exc):
# Send an update port request including a non-null device ID
data = {'port': {'device_id': DEVICE_ID_2,
'device_owner': DEVICE_OWNER,
portbindings.HOST_ID: COMP_HOST_NAME}}
port_id = orig_port['port']['id']
req = self.new_update_request('ports', data, port_id)
res = req.get_response(self.api)
# Sanity check failure result code
self._assertExpectedHTTP(res.status_int, inserted_exc)
# Check that the port still has the original device ID
plugin = base_plugin.NeutronDbPluginV2()
ctx = context.get_admin_context()
db_port = plugin._get_port(ctx, port_id)
self.assertEqual(db_port['device_id'],
orig_port['port']['device_id'])
def test_model_delete_port_rollback(self):
"""Test for proper rollback for OVS plugin delete port failure.
Test that the nexus port configuration is rolled back (restored)
by the Cisco model plugin when there is a failure in the OVS
plugin for a delete port operation.
"""
with self._create_port_res() as res:
# After port is created, we should have one binding for this
# vlan/nexus switch.
port = self.deserialize(self.fmt, res)
start_rows = nexus_db_v2.get_nexusvlan_binding(VLAN_START,
NEXUS_IP_ADDR)
self.assertEqual(len(start_rows), 1)
# Inject an exception in the OVS plugin delete_port
# processing, and attempt a port deletion.
inserted_exc = n_exc.Conflict
expected_http = base.FAULT_MAP[inserted_exc].code
with mock.patch.object(l3_db.L3_NAT_db_mixin,
'disassociate_floatingips',
side_effect=inserted_exc):
self._delete('ports', port['port']['id'],
expected_code=expected_http)
# Confirm that the Cisco model plugin has restored
# the nexus configuration for this port after deletion failure.
end_rows = nexus_db_v2.get_nexusvlan_binding(VLAN_START,
NEXUS_IP_ADDR)
self.assertEqual(start_rows, end_rows)
def test_nexus_delete_port_rollback(self):
"""Test for proper rollback for nexus plugin delete port failure.
Test for rollback (i.e. restoration) of a VLAN entry in the
nexus database whenever the nexus plugin fails to reconfigure the
nexus switch during a delete_port operation.
"""
with self._create_port_res() as res:
port = self.deserialize(self.fmt, res)
# Check that there is only one binding in the nexus database
# for this VLAN/nexus switch.
start_rows = nexus_db_v2.get_nexusvlan_binding(VLAN_START,
NEXUS_IP_ADDR)
self.assertEqual(len(start_rows), 1)
# Simulate a Nexus switch configuration error during
# port deletion.
with self._patch_ncclient(
'manager.connect.return_value.edit_config.side_effect',
AttributeError):
self._delete('ports', port['port']['id'],
base.FAULT_MAP[c_exc.NexusConfigFailed].code)
# Confirm that the binding has been restored (rolled back).
end_rows = nexus_db_v2.get_nexusvlan_binding(VLAN_START,
NEXUS_IP_ADDR)
self.assertEqual(start_rows, end_rows)
def test_model_update_port_attach(self):
"""Test the model for update_port in attaching to an instance.
Mock the routines that call into the plugin code, and make sure they
are called with correct arguments.
"""
with contextlib.nested(
self.port(),
mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2,
'_invoke_plugin_per_device'),
mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2,
'_invoke_nexus_for_net_create')
) as (port, invoke_plugin_per_device, invoke_nexus_for_net_create):
data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME,
'device_id': DEVICE_ID_1,
'device_owner': DEVICE_OWNER}}
req = self.new_update_request('ports', data, port['port']['id'])
# Note, due to mocking out the two model routines, response won't
# contain any useful data
req.get_response(self.api)
# Note that call_args_list is used instead of
# assert_called_once_with which requires exact match of arguments.
# This is because the mocked routines contain variable number of
# arguments and/or dynamic objects.
self.assertEqual(invoke_plugin_per_device.call_count, 1)
self.assertEqual(
invoke_plugin_per_device.call_args_list[0][0][0:2],
(const.VSWITCH_PLUGIN, 'update_port'))
self.assertEqual(invoke_nexus_for_net_create.call_count, 1)
self.assertEqual(
invoke_nexus_for_net_create.call_args_list[0][0][1:],
(port['port']['tenant_id'], port['port']['network_id'],
data['port']['device_id'],
data['port'][portbindings.HOST_ID],))
def test_model_update_port_migrate(self):
"""Test the model for update_port in migrating an instance.
Mock the routines that call into the plugin code, and make sure they
are called with correct arguments.
"""
arg_list = (portbindings.HOST_ID,)
data = {portbindings.HOST_ID: COMP_HOST_NAME,
'device_id': DEVICE_ID_1,
'device_owner': DEVICE_OWNER}
with contextlib.nested(
self.port(arg_list=arg_list, **data),
mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2,
'_invoke_plugin_per_device'),
mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2,
'_invoke_nexus_for_net_create')
) as (port, invoke_plugin_per_device, invoke_nexus_for_net_create):
data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME_2}}
req = self.new_update_request('ports', data, port['port']['id'])
# Note, due to mocking out the two model routines, response won't
# contain any useful data
req.get_response(self.api)
# Note that call_args_list is used instead of
# assert_called_once_with which requires exact match of arguments.
# This is because the mocked routines contain variable number of
# arguments and/or dynamic objects.
self.assertEqual(invoke_plugin_per_device.call_count, 2)
self.assertEqual(
invoke_plugin_per_device.call_args_list[0][0][0:2],
(const.VSWITCH_PLUGIN, 'update_port'))
self.assertEqual(
invoke_plugin_per_device.call_args_list[1][0][0:2],
(const.NEXUS_PLUGIN, 'delete_port'))
self.assertEqual(invoke_nexus_for_net_create.call_count, 1)
self.assertEqual(
invoke_nexus_for_net_create.call_args_list[0][0][1:],
(port['port']['tenant_id'], port['port']['network_id'],
port['port']['device_id'],
data['port'][portbindings.HOST_ID],))
def test_model_update_port_net_create_not_needed(self):
"""Test the model for update_port when no action is needed.
Mock the routines that call into the plugin code, and make sure that
VSWITCH plugin is called with correct arguments, while NEXUS plugin is
not called at all.
"""
arg_list = (portbindings.HOST_ID,)
data = {portbindings.HOST_ID: COMP_HOST_NAME,
'device_id': DEVICE_ID_1,
'device_owner': DEVICE_OWNER}
with contextlib.nested(
self.port(arg_list=arg_list, **data),
mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2,
'_invoke_plugin_per_device'),
mock.patch.object(virt_phy_sw_v2.VirtualPhysicalSwitchModelV2,
'_invoke_nexus_for_net_create')
) as (port, invoke_plugin_per_device, invoke_nexus_for_net_create):
data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME,
'device_id': DEVICE_ID_1,
'device_owner': DEVICE_OWNER}}
req = self.new_update_request('ports', data, port['port']['id'])
# Note, due to mocking out the two model routines, response won't
# contain any useful data
req.get_response(self.api)
# Note that call_args_list is used instead of
# assert_called_once_with which requires exact match of arguments.
# This is because the mocked routines contain variable number of
# arguments and/or dynamic objects.
self.assertEqual(invoke_plugin_per_device.call_count, 1)
self.assertEqual(
invoke_plugin_per_device.call_args_list[0][0][0:2],
(const.VSWITCH_PLUGIN, 'update_port'))
self.assertFalse(invoke_nexus_for_net_create.called)
def verify_portbinding(self, host_id1, host_id2,
vlan, device_id, binding_port):
"""Verify a port binding entry in the DB is correct."""
self.assertEqual(host_id1, host_id2)
pb = nexus_db_v2.get_nexusvm_bindings(vlan, device_id)
self.assertEqual(len(pb), 1)
self.assertEqual(pb[0].port_id, binding_port)
self.assertEqual(pb[0].switch_ip, NEXUS_IP_ADDR)
def test_db_update_port_attach(self):
"""Test DB for update_port in attaching to an instance.
Query DB for the port binding entry corresponding to the search key
(vlan, device_id), and make sure that it's bound to correct switch port
"""
with self.port() as port:
data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME,
'device_id': DEVICE_ID_1,
'device_owner': DEVICE_OWNER}}
req = self.new_update_request('ports', data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
ctx = context.get_admin_context()
net = self._show('networks', res['port']['network_id'],
neutron_context=ctx)['network']
self.assertTrue(attributes.is_attr_set(
net.get(provider.SEGMENTATION_ID)))
vlan = net[provider.SEGMENTATION_ID]
self.assertEqual(vlan, VLAN_START)
self.verify_portbinding(res['port'][portbindings.HOST_ID],
data['port'][portbindings.HOST_ID],
vlan,
data['port']['device_id'],
NEXUS_PORT_1)
def test_db_update_port_migrate(self):
"""Test DB for update_port in migrating an instance.
Query DB for the port binding entry corresponding to the search key
(vlan, device_id), and make sure that it's bound to correct switch port
before and after the migration.
"""
arg_list = (portbindings.HOST_ID,)
data = {portbindings.HOST_ID: COMP_HOST_NAME,
'device_id': DEVICE_ID_1,
'device_owner': DEVICE_OWNER}
with self.port(arg_list=arg_list, **data) as port:
ctx = context.get_admin_context()
net = self._show('networks', port['port']['network_id'],
neutron_context=ctx)['network']
self.assertTrue(attributes.is_attr_set(
net.get(provider.SEGMENTATION_ID)))
vlan = net[provider.SEGMENTATION_ID]
self.assertEqual(vlan, VLAN_START)
self.verify_portbinding(port['port'][portbindings.HOST_ID],
data[portbindings.HOST_ID],
vlan,
data['device_id'],
NEXUS_PORT_1)
new_data = {'port': {portbindings.HOST_ID: COMP_HOST_NAME_2}}
req = self.new_update_request('ports',
new_data, port['port']['id'])
res = self.deserialize(self.fmt, req.get_response(self.api))
self.verify_portbinding(res['port'][portbindings.HOST_ID],
new_data['port'][portbindings.HOST_ID],
vlan,
data['device_id'],
NEXUS_PORT_2)
def test_delete_ports_by_device_id_second_call_failure(self):
plugin_ref = self._get_plugin_ref()
self._test_delete_ports_by_device_id_second_call_failure(plugin_ref)
def test_delete_ports_ignores_port_not_found(self):
plugin_ref = self._get_plugin_ref()
self._test_delete_ports_ignores_port_not_found(plugin_ref)
class TestCiscoNetworksV2(CiscoNetworkPluginV2TestCase,
test_db_plugin.TestNetworksV2):
def test_create_networks_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
plugin_ref = self._get_plugin_ref()
orig = plugin_ref.create_network
#ensures the API choose the emulation code path
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
with mock.patch.object(plugin_ref,
'create_network') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
LOG.debug("response is %s" % res)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'networks',
wexc.HTTPInternalServerError.code)
def test_create_networks_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk network create")
plugin_ref = self._get_plugin_ref()
orig = plugin_ref.create_network
with mock.patch.object(plugin_ref,
'create_network') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
res = self._create_network_bulk(self.fmt, 2, 'test', True)
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'networks',
wexc.HTTPInternalServerError.code)
@contextlib.contextmanager
def _provider_vlan_network(self, phys_net, segment_id, net_name):
provider_attrs = {provider.NETWORK_TYPE: 'vlan',
provider.PHYSICAL_NETWORK: phys_net,
provider.SEGMENTATION_ID: segment_id}
arg_list = tuple(provider_attrs.keys())
res = self._create_network(self.fmt, net_name, True,
arg_list=arg_list, **provider_attrs)
network = self.deserialize(self.fmt, res)['network']
try:
yield network
finally:
req = self.new_delete_request('networks', network['id'])
req.get_response(self.api)
def test_create_provider_vlan_network(self):
with self._provider_vlan_network(PHYS_NET, '1234',
'pvnet1') as network:
expected = [('name', 'pvnet1'),
('admin_state_up', True),
('status', 'ACTIVE'),
('shared', False),
(provider.NETWORK_TYPE, 'vlan'),
(provider.PHYSICAL_NETWORK, PHYS_NET),
(provider.SEGMENTATION_ID, 1234)]
for k, v in expected:
self.assertEqual(network[k], v)
self.assertTrue(network_db_v2.is_provider_network(network['id']))
def test_delete_provider_vlan_network(self):
with self._provider_vlan_network(PHYS_NET, '1234',
'pvnet1') as network:
network_id = network['id']
# Provider network should now be deleted
self.assertFalse(network_db_v2.is_provider_network(network_id))
class TestCiscoSubnetsV2(CiscoNetworkPluginV2TestCase,
test_db_plugin.TestSubnetsV2):
def test_create_subnets_bulk_emulated_plugin_failure(self):
real_has_attr = hasattr
#ensures the API choose the emulation code path
def fakehasattr(item, attr):
if attr.endswith('__native_bulk_support'):
return False
return real_has_attr(item, attr)
with mock.patch('__builtin__.hasattr',
new=fakehasattr):
plugin_ref = self._get_plugin_ref()
orig = plugin_ref.create_subnet
with mock.patch.object(plugin_ref,
'create_subnet') as patched_plugin:
def side_effect(*args, **kwargs):
self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'subnets',
wexc.HTTPInternalServerError.code)
def test_create_subnets_bulk_native_plugin_failure(self):
if self._skip_native_bulk:
self.skipTest("Plugin does not support native bulk subnet create")
plugin_ref = self._get_plugin_ref()
orig = plugin_ref.create_subnet
with mock.patch.object(plugin_ref,
'create_subnet') as patched_plugin:
def side_effect(*args, **kwargs):
return self._do_side_effect(patched_plugin, orig,
*args, **kwargs)
patched_plugin.side_effect = side_effect
with self.network() as net:
res = self._create_subnet_bulk(self.fmt, 2,
net['network']['id'],
'test')
# We expect an internal server error as we injected a fault
self._validate_behavior_on_bulk_failure(
res,
'subnets',
wexc.HTTPInternalServerError.code)
class TestCiscoRouterInterfacesV2(CiscoNetworkPluginV2TestCase):
def setUp(self):
"""Configure a log exception counter and an API extension manager."""
self.log_exc_count = 0
def _count_exception_logs(*args, **kwargs):
self.log_exc_count += 1
mock.patch.object(logging.LoggerAdapter, 'exception',
autospec=True,
side_effect=_count_exception_logs,
wraps=logging.LoggerAdapter.exception).start()
super(TestCiscoRouterInterfacesV2, self).setUp()
ext_mgr = extensions.PluginAwareExtensionManager.get_instance()
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
@contextlib.contextmanager
def _network_subnet_router(self):
"""Context mgr for creating/deleting a net, subnet, and router."""
with self.network() as network:
with self.subnet(network=network) as subnet:
data = {'router': {'tenant_id': 'test_tenant_id'}}
request = self.new_create_request('routers', data, self.fmt)
response = request.get_response(self.ext_api)
router = self.deserialize(self.fmt, response)
try:
yield network, subnet, router
finally:
self._delete('routers', router['router']['id'])
@contextlib.contextmanager
def _router_interface(self, router, subnet, **kwargs):
"""Create a router interface, yield the response, then delete it."""
interface_data = {}
if subnet:
interface_data['subnet_id'] = subnet['subnet']['id']
interface_data.update(kwargs)
request = self.new_action_request('routers', interface_data,
router['router']['id'],
'add_router_interface')
response = request.get_response(self.ext_api)
try:
yield response
finally:
# If router interface was created successfully, delete it now.
if response.status_int == wexc.HTTPOk.code:
request = self.new_action_request('routers', interface_data,
router['router']['id'],
'remove_router_interface')
request.get_response(self.ext_api)
@contextlib.contextmanager
def _network_subnet_router_interface(self, **kwargs):
"""Context mgr for create/deleting a net, subnet, router and intf."""
with self._network_subnet_router() as (network, subnet, router):
with self._router_interface(router, subnet,
**kwargs) as response:
yield response
def test_port_list_filtered_by_router_id(self):
"""Test port list command filtered by router ID."""
with self._network_subnet_router() as (network, subnet, router):
with self._router_interface(router, subnet):
query_params = "device_id=%s" % router['router']['id']
req = self.new_list_request('ports', self.fmt, query_params)
res = self.deserialize(self.fmt, req.get_response(self.api))
self.assertEqual(len(res['ports']), 1)
self.assertEqual(res['ports'][0]['device_id'],
router['router']['id'])
self.assertFalse(self.log_exc_count)
def test_add_remove_router_intf_with_nexus_l3_enabled(self):
"""Verifies proper add/remove intf operation with Nexus L3 enabled.
With 'nexus_l3_enable' configured to True, confirm that a switched
virtual interface (SVI) is created/deleted on the Nexus switch when
a virtual router interface is created/deleted.
"""
cisco_config.CONF.set_override('nexus_l3_enable', True, 'CISCO')
with self._network_subnet_router_interface():
self.assertTrue(self._is_in_last_nexus_cfg(
['interface', 'vlan', 'ip', 'address']))
# Clear list of calls made to mock ncclient
self.mock_ncclient.reset()
# Router interface is now deleted. Confirm that SVI
# has been deleted from the Nexus switch.
self.assertTrue(self._is_in_nexus_cfg(['no', 'interface', 'vlan']))
self.assertTrue(self._is_in_last_nexus_cfg(['no', 'vlan']))
def test_add_remove_router_intf_with_nexus_l3_disabled(self):
"""Verifies proper add/remove intf operation with Nexus L3 disabled.
With 'nexus_l3_enable' configured to False, confirm that no changes
are made to the Nexus switch running configuration when a virtual
router interface is created and then deleted.
"""
cisco_config.CONF.set_override('nexus_l3_enable', False, 'CISCO')
with self._network_subnet_router_interface():
self.assertFalse(self.mock_ncclient.manager.connect.
return_value.edit_config.called)
def test_create_svi_but_subnet_not_specified_exception(self):
"""Tests raising of SubnetNotSpecified exception.
Tests that a SubnetNotSpecified exception is raised when an
add_router_interface request is made for creating a switch virtual
interface (SVI), but the request does not specify a subnet.
"""
cisco_config.CONF.set_override('nexus_l3_enable', True, 'CISCO')
with self._network_subnet_router() as (network, subnet, router):
with self._router_interface(router, subnet=None) as response:
self._assertExpectedHTTP(response.status_int,
c_exc.SubnetNotSpecified)
def test_create_svi_but_port_id_included_exception(self):
"""Tests raising of PortIdForNexusSvi exception.
Tests that a PortIdForNexusSvi exception is raised when an
add_router_interface request is made for creating a switch virtual
interface (SVI), but the request includes a virtual port ID.
"""
cisco_config.CONF.set_override('nexus_l3_enable', True, 'CISCO')
with self._network_subnet_router_interface(
port_id='my_port_id') as response:
self._assertExpectedHTTP(response.status_int,
c_exc.PortIdForNexusSvi)
class TestCiscoPortsV2XML(TestCiscoPortsV2):
fmt = 'xml'
class TestCiscoNetworksV2XML(TestCiscoNetworksV2):
fmt = 'xml'
class TestCiscoSubnetsV2XML(TestCiscoSubnetsV2):
fmt = 'xml'
class TestCiscoRouterInterfacesV2XML(TestCiscoRouterInterfacesV2):
fmt = 'xml'
|
hadronproject/lpms
|
refs/heads/master
|
lpms/operations/remove.py
|
1
|
# Copyright 2009 - 2011 Burak Sezer <purak@hadronproject.org>
#
# This file is part of lpms
#
# lpms is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# lpms is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with lpms. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import lpms
from lpms import out
from lpms import utils
from lpms import shelltools
from lpms import constants as cst
from lpms.db import api as dbapi
# TODO:
# (-) config protect
# (-) directory symlinks
# (-) warning messages
class Remove:
def __init__(self, repo, category, name, version, real_root):
self.repo = repo
self.category = category
self.name = name
self.version = version
self.real_root = real_root
if self.real_root is None:
self.real_root = cst.root
self.filesdb = dbapi.FilesDB()
def remove_content(self):
dirs = []
for _file in self.filesdb.get_paths_by_package(self.name, category=self.category, version=self.version):
_file = _file[0]
target = os.path.join(self.real_root, _file[1:])
if os.path.dirname(_file[1:]) == cst.info:
utils.update_info_index(target, dir_path=os.path.join(self.real_root, cst.info, "dir"), delete=True)
if os.path.islink(target):
os.unlink(target)
elif os.path.isfile(target):
if os.path.exists(target):
shelltools.remove_file(target)
else:
dirs.append(target)
dirs.reverse()
for target in dirs:
if os.path.isdir(target) and not os.listdir(target):
shelltools.remove_dir(target)
def main(pkgname, real_root):
instdb = dbapi.InstallDB()
filesdb = dbapi.FilesDB()
# start remove operation
repo, category, name, version = pkgname
# initialize remove class
rmpkg = Remove(repo, category, name, version, real_root)
lpms.logger.info("removing %s/%s/%s-%s from %s" % \
(repo, category, name, version, rmpkg.real_root))
out.normal("removing %s/%s/%s-%s from %s" % \
(repo, category, name, version, rmpkg.real_root))
# remove the package content
rmpkg.remove_content()
# remove entries from the database
package_id = instdb.find_package(package_repo=repo, package_category=category, \
package_name=name, package_version=version).get(0).id
instdb.database.delete_build_info(package_id)
instdb.delete_conditional_versions(package_id=package_id)
instdb.delete_inline_options(package_id=package_id)
instdb.delete_package(package_repo=repo, package_category=category, \
package_name=name, package_version=version, commit=True)
# remove paths from files table
filesdb.delete_item_by_pkgdata(category, name, version, commit=True)
# unlock
if shelltools.is_exists(cst.lock_file):
shelltools.remove_file(cst.lock_file)
|
MenZil/kuma
|
refs/heads/master
|
scripts/pth_pydev.py
|
31
|
#!/usr/bin/env python
import sys
import os
from shutil import copyfile
from xml.etree.ElementTree import Element, ElementTree, tostring
pth_file = sys.argv[1]
pydevproject_file = sys.argv[2]
prefix = sys.argv[3]
copyfile(pydevproject_file, pydevproject_file+'.bak')
tree = ElementTree()
tree.parse(pydevproject_file)
pydev_pathproperty = tree.find("pydev_pathproperty")
paths = pydev_pathproperty.getiterator('path')
with open(pth_file) as f:
for line in f:
pydev_entry = prefix + line.rstrip()
if pydev_entry in paths:
pass
else:
pydev_element = Element('path')
pydev_element.text = pydev_entry
pydev_pathproperty.append(pydev_element)
paths = pydev_pathproperty.getiterator('path')
print tostring(pydev_pathproperty)
tree.write(pydevproject_file)
|
DanielAttia/namebench
|
refs/heads/master
|
nb_third_party/dns/rdtypes/ANY/NS.py
|
248
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.nsbase
class NS(dns.rdtypes.nsbase.NSBase):
"""NS record"""
pass
|
ffu/DSA-3.2.2
|
refs/heads/master
|
gnuradio-core/src/python/gnuradio/gruimpl/__init__.py
|
140
|
# make this a package
|
caorun0728/shadowsocks
|
refs/heads/master
|
shadowsocks/asyncdns.py
|
2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import struct
import re
import logging
if __name__ == '__main__':
import sys
import inspect
file_path = os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe())))
sys.path.insert(0, os.path.join(file_path, '../'))
from shadowsocks import common, lru_cache, eventloop, shell
CACHE_SWEEP_INTERVAL = 30
VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d_-]{1,63}(?<!-)$", re.IGNORECASE)
common.patch_socket()
# rfc1035
# format
# +---------------------+
# | Header |
# +---------------------+
# | Question | the question for the name server
# +---------------------+
# | Answer | RRs answering the question
# +---------------------+
# | Authority | RRs pointing toward an authority
# +---------------------+
# | Additional | RRs holding additional information
# +---------------------+
#
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
QTYPE_ANY = 255
QTYPE_A = 1
QTYPE_AAAA = 28
QTYPE_CNAME = 5
QTYPE_NS = 2
QCLASS_IN = 1
def detect_ipv6_supprot():
if 'has_ipv6' in dir(socket):
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 0))
print('IPv6 support')
return True
except:
pass
print('IPv6 not support')
return False
IPV6_CONNECTION_SUPPORT = detect_ipv6_supprot()
def build_address(address):
address = address.strip(b'.')
labels = address.split(b'.')
results = []
for label in labels:
l = len(label)
if l > 63:
return None
results.append(common.chr(l))
results.append(label)
results.append(b'\0')
return b''.join(results)
def build_request(address, qtype):
request_id = os.urandom(2)
header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0)
addr = build_address(address)
qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN)
return request_id + header + addr + qtype_qclass
def parse_ip(addrtype, data, length, offset):
if addrtype == QTYPE_A:
return socket.inet_ntop(socket.AF_INET, data[offset:offset + length])
elif addrtype == QTYPE_AAAA:
return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length])
elif addrtype in [QTYPE_CNAME, QTYPE_NS]:
return parse_name(data, offset)[1]
else:
return data[offset:offset + length]
def parse_name(data, offset):
p = offset
labels = []
l = common.ord(data[p])
while l > 0:
if (l & (128 + 64)) == (128 + 64):
# pointer
pointer = struct.unpack('!H', data[p:p + 2])[0]
pointer &= 0x3FFF
r = parse_name(data, pointer)
labels.append(r[1])
p += 2
# pointer is the end
return p - offset, b'.'.join(labels)
else:
labels.append(data[p + 1:p + 1 + l])
p += 1 + l
l = common.ord(data[p])
return p - offset + 1, b'.'.join(labels)
# rfc1035
# record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def parse_record(data, offset, question=False):
nlen, name = parse_name(data, offset)
if not question:
record_type, record_class, record_ttl, record_rdlength = struct.unpack(
'!HHiH', data[offset + nlen:offset + nlen + 10]
)
ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10)
return nlen + 10 + record_rdlength, \
(name, ip, record_type, record_class, record_ttl)
else:
record_type, record_class = struct.unpack(
'!HH', data[offset + nlen:offset + nlen + 4]
)
return nlen + 4, (name, None, record_type, record_class, None, None)
def parse_header(data):
if len(data) >= 12:
header = struct.unpack('!HBBHHHH', data[:12])
res_id = header[0]
res_qr = header[1] & 128
res_tc = header[1] & 2
res_ra = header[2] & 128
res_rcode = header[2] & 15
# assert res_tc == 0
# assert res_rcode in [0, 3]
res_qdcount = header[3]
res_ancount = header[4]
res_nscount = header[5]
res_arcount = header[6]
return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount,
res_ancount, res_nscount, res_arcount)
return None
def parse_response(data):
try:
if len(data) >= 12:
header = parse_header(data)
if not header:
return None
res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \
res_ancount, res_nscount, res_arcount = header
qds = []
ans = []
offset = 12
for i in range(0, res_qdcount):
l, r = parse_record(data, offset, True)
offset += l
if r:
qds.append(r)
for i in range(0, res_ancount):
l, r = parse_record(data, offset)
offset += l
if r:
ans.append(r)
for i in range(0, res_nscount):
l, r = parse_record(data, offset)
offset += l
for i in range(0, res_arcount):
l, r = parse_record(data, offset)
offset += l
response = DNSResponse()
if qds:
response.hostname = qds[0][0]
for an in qds:
response.questions.append((an[1], an[2], an[3]))
for an in ans:
response.answers.append((an[1], an[2], an[3]))
return response
except Exception as e:
shell.print_exception(e)
return None
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
if hostname[-1] == b'.':
hostname = hostname[:-1]
return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.'))
class DNSResponse(object):
def __init__(self):
self.hostname = None
self.questions = [] # each: (addr, type, class)
self.answers = [] # each: (addr, type, class)
def __str__(self):
return '%s: %s' % (self.hostname, str(self.answers))
STATUS_IPV4 = 0
STATUS_IPV6 = 1
class DNSResolver(object):
def __init__(self):
self._loop = None
self._hosts = {}
self._hostname_status = {}
self._hostname_to_cb = {}
self._cb_to_hostname = {}
self._cache = lru_cache.LRUCache(timeout=300)
self._sock = None
self._servers = None
self._parse_resolv()
self._parse_hosts()
# TODO monitor hosts change and reload hosts
# TODO parse /etc/gai.conf and follow its rules
def _parse_resolv(self):
self._servers = []
try:
with open('dns.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if line:
parts = line.split(b' ', 1)
if len(parts) >= 2:
server = parts[0]
port = int(parts[1])
else:
server = parts[0]
port = 53
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append((server, port))
except IOError:
pass
if not self._servers:
try:
with open('/etc/resolv.conf', 'rb') as f:
content = f.readlines()
for line in content:
line = line.strip()
if line:
if line.startswith(b'nameserver'):
parts = line.split()
if len(parts) >= 2:
server = parts[1]
if common.is_ip(server) == socket.AF_INET:
if type(server) != str:
server = server.decode('utf8')
self._servers.append((server, 53))
except IOError:
pass
if not self._servers:
self._servers = [('8.8.4.4', 53), ('8.8.8.8', 53)]
logging.info('dns server: %s' % (self._servers,))
def _parse_hosts(self):
etc_path = '/etc/hosts'
if 'WINDIR' in os.environ:
etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts'
try:
with open(etc_path, 'rb') as f:
for line in f.readlines():
line = line.strip()
if b"#" in line:
line = line[:line.find(b'#')]
parts = line.split()
if len(parts) >= 2:
ip = parts[0]
if common.is_ip(ip):
for i in range(1, len(parts)):
hostname = parts[i]
if hostname:
self._hosts[hostname] = ip
except IOError:
self._hosts['localhost'] = '127.0.0.1'
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
loop.add(self._sock, eventloop.POLL_IN, self)
loop.add_periodic(self.handle_periodic)
def _call_callback(self, hostname, ip, error=None):
callbacks = self._hostname_to_cb.get(hostname, [])
for callback in callbacks:
if callback in self._cb_to_hostname:
del self._cb_to_hostname[callback]
if ip or error:
callback((hostname, ip), error)
else:
callback((hostname, None),
Exception('unable to parse hostname %s' % hostname))
if hostname in self._hostname_to_cb:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _handle_data(self, data):
response = parse_response(data)
if response and response.hostname:
hostname = response.hostname
ip = None
for answer in response.answers:
if answer[1] in (QTYPE_A, QTYPE_AAAA) and \
answer[2] == QCLASS_IN:
ip = answer[0]
break
if IPV6_CONNECTION_SUPPORT:
if not ip and self._hostname_status.get(hostname, STATUS_IPV4) \
== STATUS_IPV6:
self._hostname_status[hostname] = STATUS_IPV4
self._send_req(hostname, QTYPE_A)
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) == STATUS_IPV4:
for question in response.questions:
if question[1] == QTYPE_A:
self._call_callback(hostname, None)
break
else:
if not ip and self._hostname_status.get(hostname, STATUS_IPV6) \
== STATUS_IPV4:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
if ip:
self._cache[hostname] = ip
self._call_callback(hostname, ip)
elif self._hostname_status.get(hostname, None) == STATUS_IPV6:
for question in response.questions:
if question[1] == QTYPE_AAAA:
self._call_callback(hostname, None)
break
def handle_event(self, sock, fd, event):
if sock != self._sock:
return
if event & eventloop.POLL_ERR:
logging.error('dns socket err')
self._loop.remove(self._sock)
self._sock.close()
# TODO when dns server is IPv6
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.SOL_UDP)
self._sock.setblocking(False)
self._loop.add(self._sock, eventloop.POLL_IN, self)
else:
data, addr = sock.recvfrom(1024)
if addr not in self._servers:
logging.warn('received a packet other than our dns')
return
self._handle_data(data)
def handle_periodic(self):
self._cache.sweep()
def remove_callback(self, callback):
hostname = self._cb_to_hostname.get(callback)
if hostname:
del self._cb_to_hostname[callback]
arr = self._hostname_to_cb.get(hostname, None)
if arr:
arr.remove(callback)
if not arr:
del self._hostname_to_cb[hostname]
if hostname in self._hostname_status:
del self._hostname_status[hostname]
def _send_req(self, hostname, qtype):
req = build_request(hostname, qtype)
for server in self._servers:
logging.debug('resolving %s with type %d using server %s',
hostname, qtype, server)
self._sock.sendto(req, server)
def resolve(self, hostname, callback):
if type(hostname) != bytes:
hostname = hostname.encode('utf8')
if not hostname:
callback(None, Exception('empty hostname'))
elif common.is_ip(hostname):
callback((hostname, hostname), None)
elif hostname in self._hosts:
logging.debug('hit hosts: %s', hostname)
ip = self._hosts[hostname]
callback((hostname, ip), None)
elif hostname in self._cache:
logging.debug('hit cache: %s', hostname)
ip = self._cache[hostname]
callback((hostname, ip), None)
else:
if not is_valid_hostname(hostname):
callback(None, Exception('invalid hostname: %s' % hostname))
return
if False:
addrs = socket.getaddrinfo(hostname, 0, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if addrs:
af, socktype, proto, canonname, sa = addrs[0]
logging.debug('DNS resolve %s %s' % (hostname, sa[0]) )
self._cache[hostname] = sa[0]
callback((hostname, sa[0]), None)
return
arr = self._hostname_to_cb.get(hostname, None)
if not arr:
if IPV6_CONNECTION_SUPPORT:
self._hostname_status[hostname] = STATUS_IPV6
self._send_req(hostname, QTYPE_AAAA)
else:
self._hostname_status[hostname] = STATUS_IPV4
self._send_req(hostname, QTYPE_A)
self._hostname_to_cb[hostname] = [callback]
self._cb_to_hostname[callback] = hostname
else:
arr.append(callback)
# TODO send again only if waited too long
if IPV6_CONNECTION_SUPPORT:
self._send_req(hostname, QTYPE_AAAA)
else:
self._send_req(hostname, QTYPE_A)
def close(self):
if self._sock:
if self._loop:
self._loop.remove_periodic(self.handle_periodic)
self._loop.remove(self._sock)
self._sock.close()
self._sock = None
def test():
dns_resolver = DNSResolver()
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
global counter
counter = 0
def make_callback():
global counter
def callback(result, error):
global counter
# TODO: what can we assert?
print(result, error)
counter += 1
if counter == 9:
dns_resolver.close()
loop.stop()
a_callback = callback
return a_callback
assert(make_callback() != make_callback())
dns_resolver.resolve(b'google.com', make_callback())
dns_resolver.resolve('google.com', make_callback())
dns_resolver.resolve('example.com', make_callback())
dns_resolver.resolve('ipv6.google.com', make_callback())
dns_resolver.resolve('www.facebook.com', make_callback())
dns_resolver.resolve('ns2.google.com', make_callback())
dns_resolver.resolve('invalid.@!#$%^&$@.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'ooooooooooooooooooooooooooooooooooooooooooooooooooo'
'long.hostname', make_callback())
loop.run()
if __name__ == '__main__':
test()
|
beezee/GAE-Django-base-app
|
refs/heads/master
|
django/contrib/messages/models.py
|
634
|
# Models module required so tests are discovered.
|
midgetspy/Sick-Beard
|
refs/heads/development
|
sickbeard/nzbSplitter.py
|
8
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import xml.etree.cElementTree as etree
import xml.etree
import re
from name_parser.parser import NameParser, InvalidNameException
from sickbeard import logger, classes, helpers
from sickbeard.common import Quality
from sickbeard import encodingKludge as ek
from sickbeard.exceptions import ex
def getSeasonNZBs(name, urlData, season):
try:
showXML = etree.ElementTree(etree.XML(urlData))
except SyntaxError:
logger.log(u"Unable to parse the XML of " + name + ", not splitting it", logger.ERROR)
return ({}, '')
filename = name.replace(".nzb", "")
nzbElement = showXML.getroot()
regex = '([\w\._\ ]+)[\. ]S%02d[\. ]([\w\._\-\ ]+)[\- ]([\w_\-\ ]+?)' % season
sceneNameMatch = re.search(regex, filename, re.I)
if sceneNameMatch:
showName, qualitySection, groupName = sceneNameMatch.groups() # @UnusedVariable
else:
logger.log(u"Unable to parse " + name + " into a scene name. If it's a valid one log a bug.", logger.ERROR)
return ({}, '')
regex = '(' + re.escape(showName) + '\.S%02d(?:[E0-9]+)\.[\w\._]+\-\w+' % season + ')'
regex = regex.replace(' ', '.')
epFiles = {}
xmlns = None
for curFile in nzbElement.getchildren():
xmlnsMatch = re.match("\{(http:\/\/[A-Za-z0-9_\.\/]+\/nzb)\}file", curFile.tag)
if not xmlnsMatch:
continue
else:
xmlns = xmlnsMatch.group(1)
match = re.search(regex, curFile.get("subject"), re.I)
if not match:
#print curFile.get("subject"), "doesn't match", regex
continue
curEp = match.group(1)
if curEp not in epFiles:
epFiles[curEp] = [curFile]
else:
epFiles[curEp].append(curFile)
return (epFiles, xmlns)
def createNZBString(fileElements, xmlns):
rootElement = etree.Element("nzb")
if xmlns:
rootElement.set("xmlns", xmlns)
for curFile in fileElements:
rootElement.append(stripNS(curFile, xmlns))
return xml.etree.ElementTree.tostring(rootElement, 'utf-8')
def saveNZB(nzbName, nzbString):
try:
with ek.ek(open, nzbName + ".nzb", 'w') as nzb_fh:
nzb_fh.write(nzbString)
except EnvironmentError, e:
logger.log(u"Unable to save NZB: " + ex(e), logger.ERROR)
def stripNS(element, ns):
element.tag = element.tag.replace("{" + ns + "}", "")
for curChild in element.getchildren():
stripNS(curChild, ns)
return element
def splitResult(result):
urlData = helpers.getURL(result.url)
if urlData is None:
logger.log(u"Unable to load url " + result.url + ", can't download season NZB", logger.ERROR)
return False
# parse the season ep name
try:
np = NameParser(False)
parse_result = np.parse(result.name)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + result.name + " into a valid episode", logger.WARNING)
return False
# bust it up
season = parse_result.season_number if parse_result.season_number != None else 1
separateNZBs, xmlns = getSeasonNZBs(result.name, urlData, season)
resultList = []
for newNZB in separateNZBs:
logger.log(u"Split out " + newNZB + " from " + result.name, logger.DEBUG)
# parse the name
try:
np = NameParser(False)
parse_result = np.parse(newNZB)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + newNZB + " into a valid episode", logger.WARNING)
return False
# make sure the result is sane
if (parse_result.season_number != None and parse_result.season_number != season) or (parse_result.season_number == None and season != 1):
logger.log(u"Found " + newNZB + " inside " + result.name + " but it doesn't seem to belong to the same season, ignoring it", logger.WARNING)
continue
elif len(parse_result.episode_numbers) == 0:
logger.log(u"Found " + newNZB + " inside " + result.name + " but it doesn't seem to be a valid episode NZB, ignoring it", logger.WARNING)
continue
wantEp = True
for epNo in parse_result.episode_numbers:
if not result.extraInfo[0].wantEpisode(season, epNo, result.quality):
logger.log(u"Ignoring result " + newNZB + " because we don't want an episode that is " + Quality.qualityStrings[result.quality], logger.DEBUG)
wantEp = False
break
if not wantEp:
continue
# get all the associated episode objects
epObjList = []
for curEp in parse_result.episode_numbers:
epObjList.append(result.extraInfo[0].getEpisode(season, curEp))
# make a result
curResult = classes.NZBDataSearchResult(epObjList)
curResult.name = newNZB
curResult.provider = result.provider
curResult.quality = result.quality
curResult.extraInfo = [createNZBString(separateNZBs[newNZB], xmlns)]
resultList.append(curResult)
return resultList
|
volatilityfoundation/volatility
|
refs/heads/master
|
volatility/plugins/mac/machine_info.py
|
58
|
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.obj as obj
import volatility.plugins.mac.common as common
class mac_machine_info(common.AbstractMacCommand):
""" Prints machine information about the sample """
def calculate(self):
common.set_plugin_members(self)
machine_info = obj.Object("machine_info", offset = self.addr_space.profile.get_symbol("_machine_info"), vm = self.addr_space)
yield machine_info
def render_text(self, outfd, data):
for machine_info in data:
info = (("Major Version:", machine_info.major_version),
("Minor Version:", machine_info.minor_version),
("Memory Size:", machine_info.max_mem),
("Max CPUs:", machine_info.max_cpus),
("Physical CPUs:", machine_info.physical_cpu),
("Logical CPUs:", machine_info.logical_cpu),
)
for i in info:
outfd.write("{0:15} {1}\n".format(i[0], i[1]))
|
nickpack/django-oscar
|
refs/heads/master
|
src/oscar/apps/dashboard/catalogue/views.py
|
22
|
from django.views import generic
from django.db.models import Q
from django.http import HttpResponseRedirect
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.shortcuts import get_object_or_404, redirect
from django.template.loader import render_to_string
from django.conf import settings
from oscar.core.loading import get_classes, get_model
from django_tables2 import SingleTableMixin
from oscar.views.generic import ObjectLookupView
(ProductForm,
ProductClassSelectForm,
ProductSearchForm,
ProductClassForm,
CategoryForm,
StockRecordFormSet,
StockAlertSearchForm,
ProductCategoryFormSet,
ProductImageFormSet,
ProductRecommendationFormSet,
ProductAttributesFormSet) \
= get_classes('dashboard.catalogue.forms',
('ProductForm',
'ProductClassSelectForm',
'ProductSearchForm',
'ProductClassForm',
'CategoryForm',
'StockRecordFormSet',
'StockAlertSearchForm',
'ProductCategoryFormSet',
'ProductImageFormSet',
'ProductRecommendationFormSet',
'ProductAttributesFormSet'))
ProductTable, CategoryTable \
= get_classes('dashboard.catalogue.tables',
('ProductTable', 'CategoryTable'))
Product = get_model('catalogue', 'Product')
Category = get_model('catalogue', 'Category')
ProductImage = get_model('catalogue', 'ProductImage')
ProductCategory = get_model('catalogue', 'ProductCategory')
ProductClass = get_model('catalogue', 'ProductClass')
StockRecord = get_model('partner', 'StockRecord')
StockAlert = get_model('partner', 'StockAlert')
Partner = get_model('partner', 'Partner')
def filter_products(queryset, user):
"""
Restrict the queryset to products the given user has access to.
A staff user is allowed to access all Products.
A non-staff user is only allowed access to a product if they are in at
least one stock record's partner user list.
"""
if user.is_staff:
return queryset
return queryset.filter(stockrecords__partner__users__pk=user.pk).distinct()
class ProductListView(SingleTableMixin, generic.TemplateView):
"""
Dashboard view of the product list.
Supports the permission-based dashboard.
"""
template_name = 'dashboard/catalogue/product_list.html'
form_class = ProductSearchForm
productclass_form_class = ProductClassSelectForm
table_class = ProductTable
context_table_name = 'products'
def get_context_data(self, **kwargs):
ctx = super(ProductListView, self).get_context_data(**kwargs)
ctx['form'] = self.form
ctx['productclass_form'] = self.productclass_form_class()
return ctx
def get_description(self, form):
if form.is_valid() and any(form.cleaned_data.values()):
return _('Product search results')
return _('Products')
def get_table(self, **kwargs):
if 'recently_edited' in self.request.GET:
kwargs.update(dict(orderable=False))
table = super(ProductListView, self).get_table(**kwargs)
table.caption = self.get_description(self.form)
return table
def get_table_pagination(self):
return dict(per_page=20)
def filter_queryset(self, queryset):
"""
Apply any filters to restrict the products that appear on the list
"""
return filter_products(queryset, self.request.user)
def get_queryset(self):
"""
Build the queryset for this list
"""
queryset = Product.browsable.base_queryset()
queryset = self.filter_queryset(queryset)
queryset = self.apply_search(queryset)
return queryset
def apply_search(self, queryset):
"""
Filter the queryset and set the description according to the search
parameters given
"""
self.form = self.form_class(self.request.GET)
if not self.form.is_valid():
return queryset
data = self.form.cleaned_data
if data.get('upc'):
# Filter the queryset by upc
# If there's an exact match, return it, otherwise return results
# that contain the UPC
matches_upc = Product.objects.filter(upc=data['upc'])
qs_match = queryset.filter(
Q(id=matches_upc.values('id')) |
Q(id=matches_upc.values('parent_id')))
if qs_match.exists():
queryset = qs_match
else:
matches_upc = Product.objects.filter(upc__icontains=data['upc'])
queryset = queryset.filter(
Q(id=matches_upc.values('id')) | Q(id=matches_upc.values('parent_id')))
if data.get('title'):
queryset = queryset.filter(title__icontains=data['title'])
return queryset
class ProductCreateRedirectView(generic.RedirectView):
permanent = False
productclass_form_class = ProductClassSelectForm
def get_product_create_url(self, product_class):
""" Allow site to provide custom URL """
return reverse('dashboard:catalogue-product-create',
kwargs={'product_class_slug': product_class.slug})
def get_invalid_product_class_url(self):
messages.error(self.request, _("Please choose a product type"))
return reverse('dashboard:catalogue-product-list')
def get_redirect_url(self, **kwargs):
form = self.productclass_form_class(self.request.GET)
if form.is_valid():
product_class = form.cleaned_data['product_class']
return self.get_product_create_url(product_class)
else:
return self.get_invalid_product_class_url()
class ProductCreateUpdateView(generic.UpdateView):
"""
Dashboard view that is can both create and update products of all kinds.
It can be used in three different ways, each of them with a unique URL
pattern:
- When creating a new standalone product, this view is called with the
desired product class
- When editing an existing product, this view is called with the product's
primary key. If the product is a child product, the template considerably
reduces the available form fields.
- When creating a new child product, this view is called with the parent's
primary key.
Supports the permission-based dashboard.
"""
template_name = 'dashboard/catalogue/product_update.html'
model = Product
context_object_name = 'product'
form_class = ProductForm
category_formset = ProductCategoryFormSet
image_formset = ProductImageFormSet
recommendations_formset = ProductRecommendationFormSet
stockrecord_formset = StockRecordFormSet
def __init__(self, *args, **kwargs):
super(ProductCreateUpdateView, self).__init__(*args, **kwargs)
self.formsets = {'category_formset': self.category_formset,
'image_formset': self.image_formset,
'recommended_formset': self.recommendations_formset,
'stockrecord_formset': self.stockrecord_formset}
def dispatch(self, request, *args, **kwargs):
resp = super(ProductCreateUpdateView, self).dispatch(
request, *args, **kwargs)
return self.check_objects_or_redirect() or resp
def check_objects_or_redirect(self):
"""
Allows checking the objects fetched by get_object and redirect
if they don't satisfy our needs.
Is used to redirect when create a new variant and the specified
parent product can't actually be turned into a parent product.
"""
if self.creating and self.parent is not None:
is_valid, reason = self.parent.can_be_parent(give_reason=True)
if not is_valid:
messages.error(self.request, reason)
return redirect('dashboard:catalogue-product-list')
def get_queryset(self):
"""
Filter products that the user doesn't have permission to update
"""
return filter_products(Product.objects.all(), self.request.user)
def get_object(self, queryset=None):
"""
This parts allows generic.UpdateView to handle creating products as
well. The only distinction between an UpdateView and a CreateView
is that self.object is None. We emulate this behavior.
This method is also responsible for setting self.product_class and
self.parent.
"""
self.creating = 'pk' not in self.kwargs
if self.creating:
# Specifying a parent product is only done when creating a child
# product.
parent_pk = self.kwargs.get('parent_pk')
if parent_pk is None:
self.parent = None
# A product class needs to be specified when creating a
# standalone product.
product_class_slug = self.kwargs.get('product_class_slug')
self.product_class = get_object_or_404(
ProductClass, slug=product_class_slug)
else:
self.parent = get_object_or_404(Product, pk=parent_pk)
self.product_class = self.parent.product_class
return None # success
else:
product = super(ProductCreateUpdateView, self).get_object(queryset)
self.product_class = product.get_product_class()
self.parent = product.parent
return product
def get_context_data(self, **kwargs):
ctx = super(ProductCreateUpdateView, self).get_context_data(**kwargs)
ctx['product_class'] = self.product_class
ctx['parent'] = self.parent
ctx['title'] = self.get_page_title()
for ctx_name, formset_class in self.formsets.items():
if ctx_name not in ctx:
ctx[ctx_name] = formset_class(self.product_class,
self.request.user,
instance=self.object)
return ctx
def get_page_title(self):
if self.creating:
if self.parent is None:
return _('Create new %(product_class)s product') % {
'product_class': self.product_class.name}
else:
return _('Create new variant of %(parent_product)s') % {
'parent_product': self.parent.title}
else:
if self.object.title or not self.parent:
return self.object.title
else:
return _('Editing variant of %(parent_product)s') % {
'parent_product': self.parent.title}
def get_form_kwargs(self):
kwargs = super(ProductCreateUpdateView, self).get_form_kwargs()
kwargs['product_class'] = self.product_class
kwargs['parent'] = self.parent
return kwargs
def process_all_forms(self, form):
"""
Short-circuits the regular logic to have one place to have our
logic to check all forms
"""
# Need to create the product here because the inline forms need it
# can't use commit=False because ProductForm does not support it
if self.creating and form.is_valid():
self.object = form.save()
formsets = {}
for ctx_name, formset_class in self.formsets.items():
formsets[ctx_name] = formset_class(self.product_class,
self.request.user,
self.request.POST,
self.request.FILES,
instance=self.object)
is_valid = form.is_valid() and all([formset.is_valid()
for formset in formsets.values()])
cross_form_validation_result = self.clean(form, formsets)
if is_valid and cross_form_validation_result:
return self.forms_valid(form, formsets)
else:
return self.forms_invalid(form, formsets)
# form_valid and form_invalid are called depending on the validation result
# of just the product form and redisplay the form respectively return a
# redirect to the success URL. In both cases we need to check our formsets
# as well, so both methods do the same. process_all_forms then calls
# forms_valid or forms_invalid respectively, which do the redisplay or
# redirect.
form_valid = form_invalid = process_all_forms
def clean(self, form, formsets):
"""
Perform any cross-form/formset validation. If there are errors, attach
errors to a form or a form field so that they are displayed to the user
and return False. If everything is valid, return True. This method will
be called regardless of whether the individual forms are valid.
"""
return True
def forms_valid(self, form, formsets):
"""
Save all changes and display a success url.
When creating the first child product, this method also sets the new
parent's structure accordingly.
"""
if self.creating:
self.handle_adding_child(self.parent)
else:
# a just created product was already saved in process_all_forms()
self.object = form.save()
# Save formsets
for formset in formsets.values():
formset.save()
return HttpResponseRedirect(self.get_success_url())
def handle_adding_child(self, parent):
"""
When creating the first child product, the parent product needs
to be implicitly converted from a standalone product to a
parent product.
"""
# ProductForm eagerly sets the future parent's structure to PARENT to
# pass validation, but it's not persisted in the database. We ensure
# it's persisted by calling save()
if parent is not None:
parent.structure = Product.PARENT
parent.save()
def forms_invalid(self, form, formsets):
# delete the temporary product again
if self.creating and self.object and self.object.pk is not None:
self.object.delete()
self.object = None
messages.error(self.request,
_("Your submitted data was not valid - please "
"correct the errors below"))
ctx = self.get_context_data(form=form, **formsets)
return self.render_to_response(ctx)
def get_url_with_querystring(self, url):
url_parts = [url]
if self.request.GET.urlencode():
url_parts += [self.request.GET.urlencode()]
return "?".join(url_parts)
def get_success_url(self):
"""
Renders a success message and redirects depending on the button:
- Standard case is pressing "Save"; redirects to the product list
- When "Save and continue" is pressed, we stay on the same page
- When "Create (another) child product" is pressed, it redirects
to a new product creation page
"""
msg = render_to_string(
'dashboard/catalogue/messages/product_saved.html',
{
'product': self.object,
'creating': self.creating,
'request': self.request
})
messages.success(self.request, msg, extra_tags="safe noicon")
action = self.request.POST.get('action')
if action == 'continue':
url = reverse(
'dashboard:catalogue-product', kwargs={"pk": self.object.id})
elif action == 'create-another-child' and self.parent:
url = reverse(
'dashboard:catalogue-product-create-child',
kwargs={'parent_pk': self.parent.pk})
elif action == 'create-child':
url = reverse(
'dashboard:catalogue-product-create-child',
kwargs={'parent_pk': self.object.pk})
else:
url = reverse('dashboard:catalogue-product-list')
return self.get_url_with_querystring(url)
class ProductDeleteView(generic.DeleteView):
"""
Dashboard view to delete a product. Has special logic for deleting the
last child product.
Supports the permission-based dashboard.
"""
template_name = 'dashboard/catalogue/product_delete.html'
model = Product
context_object_name = 'product'
def get_queryset(self):
"""
Filter products that the user doesn't have permission to update
"""
return filter_products(Product.objects.all(), self.request.user)
def get_context_data(self, **kwargs):
ctx = super(ProductDeleteView, self).get_context_data(**kwargs)
if self.object.is_child:
ctx['title'] = _("Delete product variant?")
else:
ctx['title'] = _("Delete product?")
return ctx
def delete(self, request, *args, **kwargs):
# We override the core delete method and don't call super in order to
# apply more sophisticated logic around handling child products.
# Calling super makes it difficult to test if the product being deleted
# is the last child.
self.object = self.get_object()
# Before performing the delete, record whether this product is the last
# child.
is_last_child = False
if self.object.is_child:
parent = self.object.parent
is_last_child = parent.children.count() == 1
# This also deletes any child products.
self.object.delete()
# If the product being deleted is the last child, then pass control
# to a method than can adjust the parent itself.
if is_last_child:
self.handle_deleting_last_child(parent)
return HttpResponseRedirect(self.get_success_url())
def handle_deleting_last_child(self, parent):
# If the last child product is deleted, this view defaults to turning
# the parent product into a standalone product. While this is
# appropriate for many scenarios, it is intentionally easily
# overridable and not automatically done in e.g. a Product's delete()
# method as it is more a UX helper than hard business logic.
parent.structure = parent.STANDALONE
parent.save()
def get_success_url(self):
"""
When deleting child products, this view redirects to editing the
parent product. When deleting any other product, it redirects to the
product list view.
"""
if self.object.is_child:
msg = _("Deleted product variant '%s'") % self.object.get_title()
messages.success(self.request, msg)
return reverse(
'dashboard:catalogue-product',
kwargs={'pk': self.object.parent_id})
else:
msg = _("Deleted product '%s'") % self.object.title
messages.success(self.request, msg)
return reverse('dashboard:catalogue-product-list')
class StockAlertListView(generic.ListView):
template_name = 'dashboard/catalogue/stockalert_list.html'
model = StockAlert
context_object_name = 'alerts'
paginate_by = settings.OSCAR_STOCK_ALERTS_PER_PAGE
def get_context_data(self, **kwargs):
ctx = super(StockAlertListView, self).get_context_data(**kwargs)
ctx['form'] = self.form
ctx['description'] = self.description
return ctx
def get_queryset(self):
if 'status' in self.request.GET:
self.form = StockAlertSearchForm(self.request.GET)
if self.form.is_valid():
status = self.form.cleaned_data['status']
self.description = _('Alerts with status "%s"') % status
return self.model.objects.filter(status=status)
else:
self.description = _('All alerts')
self.form = StockAlertSearchForm()
return self.model.objects.all()
class CategoryListView(SingleTableMixin, generic.TemplateView):
template_name = 'dashboard/catalogue/category_list.html'
table_class = CategoryTable
context_table_name = 'categories'
def get_queryset(self):
return Category.get_root_nodes()
def get_context_data(self, *args, **kwargs):
ctx = super(CategoryListView, self).get_context_data(*args, **kwargs)
ctx['child_categories'] = Category.get_root_nodes()
return ctx
class CategoryDetailListView(SingleTableMixin, generic.DetailView):
template_name = 'dashboard/catalogue/category_list.html'
model = Category
context_object_name = 'category'
table_class = CategoryTable
context_table_name = 'categories'
def get_table_data(self):
return self.object.get_children()
def get_context_data(self, *args, **kwargs):
ctx = super(CategoryDetailListView, self).get_context_data(*args,
**kwargs)
ctx['child_categories'] = self.object.get_children()
ctx['ancestors'] = self.object.get_ancestors_and_self()
return ctx
class CategoryListMixin(object):
def get_success_url(self):
parent = self.object.get_parent()
if parent is None:
return reverse("dashboard:catalogue-category-list")
else:
return reverse("dashboard:catalogue-category-detail-list",
args=(parent.pk,))
class CategoryCreateView(CategoryListMixin, generic.CreateView):
template_name = 'dashboard/catalogue/category_form.html'
model = Category
form_class = CategoryForm
def get_context_data(self, **kwargs):
ctx = super(CategoryCreateView, self).get_context_data(**kwargs)
ctx['title'] = _("Add a new category")
return ctx
def get_success_url(self):
messages.info(self.request, _("Category created successfully"))
return super(CategoryCreateView, self).get_success_url()
def get_initial(self):
# set child category if set in the URL kwargs
initial = super(CategoryCreateView, self).get_initial()
if 'parent' in self.kwargs:
initial['_ref_node_id'] = self.kwargs['parent']
return initial
class CategoryUpdateView(CategoryListMixin, generic.UpdateView):
template_name = 'dashboard/catalogue/category_form.html'
model = Category
form_class = CategoryForm
def get_context_data(self, **kwargs):
ctx = super(CategoryUpdateView, self).get_context_data(**kwargs)
ctx['title'] = _("Update category '%s'") % self.object.name
return ctx
def get_success_url(self):
messages.info(self.request, _("Category updated successfully"))
return super(CategoryUpdateView, self).get_success_url()
class CategoryDeleteView(CategoryListMixin, generic.DeleteView):
template_name = 'dashboard/catalogue/category_delete.html'
model = Category
def get_context_data(self, *args, **kwargs):
ctx = super(CategoryDeleteView, self).get_context_data(*args, **kwargs)
ctx['parent'] = self.object.get_parent()
return ctx
def get_success_url(self):
messages.info(self.request, _("Category deleted successfully"))
return super(CategoryDeleteView, self).get_success_url()
class ProductLookupView(ObjectLookupView):
model = Product
def get_queryset(self):
return self.model.browsable.all()
def lookup_filter(self, qs, term):
return qs.filter(Q(title__icontains=term)
| Q(parent__title__icontains=term))
class ProductClassCreateUpdateView(generic.UpdateView):
template_name = 'dashboard/catalogue/product_class_form.html'
model = ProductClass
form_class = ProductClassForm
product_attributes_formset = ProductAttributesFormSet
def process_all_forms(self, form):
"""
This validates both the ProductClass form and the
ProductClassAttributes formset at once
making it possible to display all their errors at once.
"""
if self.creating and form.is_valid():
# the object will be needed by the product_attributes_formset
self.object = form.save(commit=False)
attributes_formset = self.product_attributes_formset(
self.request.POST, self.request.FILES, instance=self.object)
is_valid = form.is_valid() and attributes_formset.is_valid()
if is_valid:
return self.forms_valid(form, attributes_formset)
else:
return self.forms_invalid(form, attributes_formset)
def forms_valid(self, form, attributes_formset):
form.save()
attributes_formset.save()
return HttpResponseRedirect(self.get_success_url())
def forms_invalid(self, form, attributes_formset):
messages.error(self.request,
_("Your submitted data was not valid - please "
"correct the errors below"
))
ctx = self.get_context_data(form=form,
attributes_formset=attributes_formset)
return self.render_to_response(ctx)
form_valid = form_invalid = process_all_forms
def get_context_data(self, *args, **kwargs):
ctx = super(ProductClassCreateUpdateView, self).get_context_data(
*args, **kwargs)
if "attributes_formset" not in ctx:
ctx["attributes_formset"] = self.product_attributes_formset(
instance=self.object)
ctx["title"] = self.get_title()
return ctx
class ProductClassCreateView(ProductClassCreateUpdateView):
creating = True
def get_object(self):
return None
def get_title(self):
return _("Add a new product type")
def get_success_url(self):
messages.info(self.request, _("Product type created successfully"))
return reverse("dashboard:catalogue-class-list")
class ProductClassUpdateView(ProductClassCreateUpdateView):
creating = False
def get_title(self):
return _("Update product type '%s'") % self.object.name
def get_success_url(self):
messages.info(self.request, _("Product type updated successfully"))
return reverse("dashboard:catalogue-class-list")
def get_object(self):
product_class = get_object_or_404(ProductClass, pk=self.kwargs['pk'])
return product_class
class ProductClassListView(generic.ListView):
template_name = 'dashboard/catalogue/product_class_list.html'
context_object_name = 'classes'
model = ProductClass
def get_context_data(self, *args, **kwargs):
ctx = super(ProductClassListView, self).get_context_data(*args,
**kwargs)
ctx['title'] = _("Product Types")
return ctx
class ProductClassDeleteView(generic.DeleteView):
template_name = 'dashboard/catalogue/product_class_delete.html'
model = ProductClass
form_class = ProductClassForm
def get_context_data(self, *args, **kwargs):
ctx = super(ProductClassDeleteView, self).get_context_data(*args,
**kwargs)
ctx['title'] = _("Delete product type '%s'") % self.object.name
product_count = self.object.products.count()
if product_count > 0:
ctx['disallow'] = True
ctx['title'] = _("Unable to delete '%s'") % self.object.name
messages.error(self.request,
_("%i products are still assigned to this type") %
product_count)
return ctx
def get_success_url(self):
messages.info(self.request, _("Product type deleted successfully"))
return reverse("dashboard:catalogue-class-list")
|
vatsala/python_koans
|
refs/heads/master
|
python2/libs/colorama/ansi.py
|
134
|
'''
This module generates ANSI character codes to printing colors to terminals.
See: http://en.wikipedia.org/wiki/ANSI_escape_code
'''
CSI = '\033['
def code_to_chars(code):
return CSI + str(code) + 'm'
class AnsiCodes(object):
def __init__(self, codes):
for name in dir(codes):
if not name.startswith('_'):
value = getattr(codes, name)
setattr(self, name, code_to_chars(value))
class AnsiFore:
BLACK = 30
RED = 31
GREEN = 32
YELLOW = 33
BLUE = 34
MAGENTA = 35
CYAN = 36
WHITE = 37
RESET = 39
class AnsiBack:
BLACK = 40
RED = 41
GREEN = 42
YELLOW = 43
BLUE = 44
MAGENTA = 45
CYAN = 46
WHITE = 47
RESET = 49
class AnsiStyle:
BRIGHT = 1
DIM = 2
NORMAL = 22
RESET_ALL = 0
Fore = AnsiCodes( AnsiFore )
Back = AnsiCodes( AnsiBack )
Style = AnsiCodes( AnsiStyle )
|
manderson23/NewsBlur
|
refs/heads/master
|
apps/rss_feeds/migrations/0010_stories_per_month.py
|
18
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'StoriesPerMonth'
db.create_table('rss_feeds_storiespermonth', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('feed', self.gf('django.db.models.fields.related.ForeignKey')(related_name='stories_per_month', to=orm['rss_feeds.Feed'])),
('year', self.gf('django.db.models.fields.IntegerField')()),
('month', self.gf('django.db.models.fields.IntegerField')()),
('story_count', self.gf('django.db.models.fields.IntegerField')()),
('beginning_of_month', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('rss_feeds', ['StoriesPerMonth'])
# Renaming field 'Feed.stories_per_month'
db.rename_column('feeds', 'stories_per_month', 'stories_last_month')
# Adding field 'Feed.average_stories_per_month'
db.add_column('feeds', 'average_stories_per_month', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Feed.stories_last_year'
db.add_column('feeds', 'stories_last_year', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True, blank=True), keep_default=False)
# Changing field 'Feed.feed_link'
db.alter_column('feeds', 'feed_link', self.gf('django.db.models.fields.URLField')(max_length=1000, null=True, blank=True))
# Changing field 'Story.story_tags'
db.alter_column('stories', 'story_tags', self.gf('django.db.models.fields.CharField')(max_length=2000, null=True, blank=True))
# Adding unique constraint on 'Story', fields ['story_feed', 'story_guid_hash']
# db.create_unique('stories', ['story_feed_id', 'story_guid_hash'])
def backwards(self, orm):
# Deleting model 'StoriesPerMonth'
db.delete_table('rss_feeds_storiespermonth')
# Adding field 'Feed.stories_per_month'
db.add_column('feeds', 'stories_per_month', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# rename field 'Feed.stories_last_month'
db.rename_column('feeds', 'stories_last_month', 'stories_per_month')
# Deleting field 'Feed.average_stories_per_month'
db.delete_column('feeds', 'average_stories_per_month')
# Deleting field 'Feed.stories_last_year'
db.delete_column('feeds', 'stories_last_year')
# Changing field 'Feed.feed_link'
db.alter_column('feeds', 'feed_link', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True))
# Changing field 'Story.story_tags'
db.alter_column('stories', 'story_tags', self.gf('django.db.models.fields.CharField')(max_length=2000))
# Removing unique constraint on 'Story', fields ['story_feed', 'story_guid_hash']
db.delete_unique('stories', ['story_feed_id', 'story_guid_hash'])
models = {
'rss_feeds.feed': {
'Meta': {'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': '0', 'auto_now': 'True', 'blank': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '15'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stories_last_year': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedfetchhistory': {
'Meta': {'object_name': 'FeedFetchHistory'},
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feed_fetch_history'", 'to': "orm['rss_feeds.Feed']"}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedpage': {
'Meta': {'object_name': 'FeedPage'},
'feed': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feed_page'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_data': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedupdatehistory': {
'Meta': {'object_name': 'FeedUpdateHistory'},
'average_per_feed': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_feeds': ('django.db.models.fields.IntegerField', [], {}),
'seconds_taken': ('django.db.models.fields.IntegerField', [], {})
},
'rss_feeds.feedxml': {
'Meta': {'object_name': 'FeedXML'},
'feed': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feed_xml'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rss_xml': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.pagefetchhistory': {
'Meta': {'object_name': 'PageFetchHistory'},
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'page_fetch_history'", 'to': "orm['rss_feeds.Feed']"}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'rss_feeds.storiespermonth': {
'Meta': {'object_name': 'StoriesPerMonth'},
'beginning_of_month': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stories_per_month'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'month': ('django.db.models.fields.IntegerField', [], {}),
'story_count': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'rss_feeds.story': {
'Meta': {'unique_together': "(('story_feed', 'story_guid_hash'),)", 'object_name': 'Story', 'db_table': "'stories'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'story_author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.StoryAuthor']"}),
'story_author_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'story_content': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_content_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'story_date': ('django.db.models.fields.DateTimeField', [], {}),
'story_feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stories'", 'to': "orm['rss_feeds.Feed']"}),
'story_guid': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_guid_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'story_original_content': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_past_trim_date': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'story_permalink': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_tags': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'story_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['rss_feeds.Tag']", 'symmetrical': 'False'})
},
'rss_feeds.storyauthor': {
'Meta': {'object_name': 'StoryAuthor'},
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.tag': {
'Meta': {'object_name': 'Tag'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['rss_feeds']
|
kionetworks/openstack-dashboard-havana
|
refs/heads/master
|
openstack_dashboard/dashboards/project/networks/subnets/tabs.py
|
6
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
class OverviewTab(tabs.Tab):
name = _("Overview")
slug = "overview"
template_name = "project/networks/subnets/_detail_overview.html"
def get_context_data(self, request):
subnet_id = self.tab_group.kwargs['subnet_id']
try:
subnet = api.neutron.subnet_get(self.request, subnet_id)
except Exception:
redirect = reverse('horizon:project:networks:index')
msg = _('Unable to retrieve subnet details.')
exceptions.handle(request, msg, redirect=redirect)
return {'subnet': subnet}
class SubnetDetailTabs(tabs.TabGroup):
slug = "subnet_details"
tabs = (OverviewTab,)
|
bitcrystal/p2pool-bitcrystal
|
refs/heads/master
|
wstools/__init__.py
|
293
|
#! /usr/bin/env python
"""WSDL parsing services package for Web Services for Python."""
ident = "$Id$"
import WSDLTools
import XMLname
import logging
|
OneAPI/GSMA-OneAPI
|
refs/heads/master
|
Python/response/location/ErrorInformation.py
|
2
|
from response.ServiceException import ServiceException
from response.PolicyException import PolicyException
class ErrorInformation:
"""dedicated error response for the location API"""
def __init__(self):
"""Default class constructor"""
self.serviceException=None
self.policyException=None
def __init__(self, jsondict):
"""Class constructor that will create an instance initialised from a parsed JSON data block"""
self.serviceException=None
if jsondict is not None and 'serviceException' in jsondict and jsondict['serviceException'] is not None:
self.serviceException=ServiceException(jsondict['serviceException'])
self.policyException=None
if jsondict is not None and 'policyException' in jsondict and jsondict['policyException'] is not None:
self.policyException=PolicyException(jsondict['policyException'])
def getServiceException(self):
"""getter for serviceException : details of a service exception"""
return self.serviceException
def setServiceException(self,serviceException):
"""setter for serviceException : details of a service exception"""
self.serviceException=serviceException
def getPolicyException(self):
"""getter for policyException : details of a service exception"""
return self.policyException
def setPolicyException(self,policyException):
"""setter for policyException : details of a service exception"""
self.policyException=policyException
|
LouisPlisso/pytomo
|
refs/heads/master
|
pytomo/dns/rdtypes/ANY/TXT.py
|
2
|
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import absolute_import
from .. import txtbase
class TXT(txtbase.TXTBase):
"""TXT record"""
pass
|
EE/bestja
|
refs/heads/master
|
addons/bestja_project_files/__init__.py
|
2355
|
# -*- coding: utf-8 -*-
import models
|
idiap/rgbd
|
refs/heads/master
|
Streaming/videoStreamer.py
|
1
|
"""
Copyright (c) 2014 Idiap Research Institute, http://www.idiap.ch/
Written by Kenneth Funes <kenneth.funes@idiap.ch>
This file contains the definition of a class able of loading and recording
video files. It is mostly based on opencv and ffmpeg.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
"""
import sys
import cv2
import subprocess
from threading import Thread, Lock
import numpy as np
import time
cv = cv2.cv
class VideoStreamer():
"""
Reader and recorder class for video files.
If write is True, then the video is opened for writing. If it is False, then
it is being open for reading.
callback is a callable object. Valid only when reading videos. Once an instance
of this class is created for reading a file, if callback is given, then a
separate thread will retrieve the frames. Each time a frame becomes available
the callback function will be invoked.
In the case callback is None, and the class is used for reading videos (write=False).
Then no Thread will be created and the getFrame function will do the waiting
for the new frame.
All videos will be assumed to be RGB and the compression will be minimal. The
Codec is Zlib, is a lossless compressor, and as backend for recording it is
using ffmpeg (or avconv as young people is calling it nowadays :) ). Opencv is used for
reading the videos and there is no restriction regarding the codec (as long
as ffmpeg can handle it, which is the backend of opencv). Of course, zlib is
included into those codecs.
WARNING: LARGE VIDEOS ARE GENERATED
The variable map provides a mechanism to skip frames when reading a video.
Here map is assumed to be of size M, where M is the length of the video
in frames. map[i] indicates whether the frame i is used (True) or ignored
(False). This is intended to synchronize videos with lost frames, so it is
expected for map to be almost full of "Trues", i.e. only a few frames
are missing, otherwise this can become quite inefficient.
"""
def __init__(self, filename, callback = None, write = False, width = 100, height = 60, fps = 29.7, lossy = False, map = None, keep_CV_BGR = False):
""" Constructor """
self.filename = filename
self.keep_CV_BGR = keep_CV_BGR # OpenCV reads as BGR, whether to give back the frames like this, or as RGB (default)
self.write = write
self.callback = callback
self.frameAvailable = False
self.frame = None
self.frameIndex = -1
self.map = map
if not write:
self.streamer = cv2.VideoCapture(filename)
if not self.streamer.isOpened() and self.callback:
self.callback()# Signalling there is a new frame, but not is given is
# the way to indicate it has finished
if self.callback is not None:
self.lock = Lock()
self.reading = True
self.readThread = Thread(target=self.readLoop)
self.readThread.start()
self.width = int(self.streamer.get(cv.CV_CAP_PROP_FRAME_WIDTH ))
self.height = int(self.streamer.get(cv.CV_CAP_PROP_FRAME_HEIGHT))
self.fps = int(self.streamer.get(cv.CV_CAP_PROP_FPS ))
print 'Opening Video ', self.width,'x',self.height, ' at ', self.fps, 'fps'
self.N_frames = 0
self.updateMap(self.map)
else:
self.width = width
self.height = height
self.fps = fps
self.N_frames = -1
args = ['ffmpeg', '-f', 'rawvideo', '-r', str(fps), '-y', '-s', str(width)+'x'+str(height), '-pix_fmt', 'rgb24', '-v', '2','-i', 'pipe:0', '-an', '-vcodec']
if lossy:
args = args + ['mpeg4', '-qscale','1']
else:
args = args + ['zlib',]
args.append(filename)
print '------ STARTING TO WRITE VIDEO -------'
print args
self.streamer = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=None, stderr=None)
def updateMap(self, map = None):
self.N_frames = int(self.streamer.get(cv.CV_CAP_PROP_FRAME_COUNT ))
self.map = map
if self.map is not None:
# Transform from the index understood outside, to the actual index of the video
self.mapDirect = np.arange(0,len(self.map))
self.mapDirect = self.mapDirect[self.map]
# Transforms from the index seen in the video file, to the index understood outside (a -1 indicates the frame is not included)
self.mapInverse = np.cumsum(self.map)-1
self.mapInverse[~self.map] = -1
# The number of frames, as seen from the caller is the following
self.N_frames = len(self.mapDirect)
def jump(self, frame_idx_in):
"""
Does a jump to the requested frame. Actually to the one before, such that
the next read is the desired frame
"""
if self.callback:
self.lock.acquire()
frame_idx_in = min(frame_idx_in, self.N_frames - 1)
# This is an inefficient thing to do, but it is a safe jump, as opencv/ffmpeg
# is sometimes not very accurate when jumping to a frame, so we will go a few
# frames before what it's actually asked and then start asking for frames
# until we get to the desired position
frame_idx = frame_idx_in
if self.map is not None:
frame_idx = self.mapDirect[frame_idx_in]
cv_frametojump = frame_idx-10# max(frame_idx-10, -1)
count_tries = 0
while True:
#print 'Trying to jump to ',cv_frametojump
self.streamer.set(cv.CV_CAP_PROP_POS_FRAMES, cv_frametojump)
self.frameIndex = int(self.streamer.get(cv.CV_CAP_PROP_POS_FRAMES ) - 1)
#print 'frameIndex is ',self.frameIndex
if self.frameIndex <= frame_idx_in - 1: # This means that if the current frame is still passing the desired position, we try again
break
cv_frametojump -= 10
count_tries += 1
if count_tries > 5:
print 'failed to jump as current frame index is ',self.frameIndex, ' and wanted to go to ',frame_idx
raise
count_tries = 0
#print 'Before calling readFrame ' , int(self.streamer.get(cv.CV_CAP_PROP_POS_FRAMES ) - 1)
ok, frame = self.streamer.read()
#print 'After calling readFrame ' , int(self.streamer.get(cv.CV_CAP_PROP_POS_FRAMES ) - 1)
# Goes to the frame before frame_idx because if we have or not a callback, the
# next time it reads a frame will be the desired (frame_idx)
while self.frameIndex < frame_idx_in - 1:
self.readFrame()
count_tries += 1
if count_tries> 10*self.fps: # 10 seconds max of reading
print 'failed to jump as current frame index is ',self.frameIndex, ' and wanted to go to ',frame_idx
raise
if self.callback:
self.frameAvailable = False
self.lock.release()
def readLoop(self):
if self.callback is not None:
self.alive = True
while self.reading:
if not self.frameAvailable:
self.lock.acquire()
self.readFrame()
self.frameAvailable = True
self.callback()
self.lock.release()
else:
time.sleep(0.00001)
def stop(self):
if not self.write:
if self.callback:
self.reading = False
self.readThread.join()
self.streamer.release()
else:
self.streamer.stdin.flush()
time.sleep(1.0)
self.streamer.stdin.close()
def readFrame(self):
new_frame = None
ok, frame = self.streamer.read()
if ok:
#cv2.imwrite('/idiap/temp/kfunes/tmp/video_cv.png', frame)
if self.keep_CV_BGR:
new_frame = frame
else:
new_frame = cv2.cvtColor(frame, cv.CV_BGR2RGB)
self.frameIndex = int(self.streamer.get(cv.CV_CAP_PROP_POS_FRAMES ) - 1) # Frame just read
if self.map is not None:
while self.mapInverse[self.frameIndex] == -1:# If there are frames to ignore
print 'missing frame ',self.frameIndex
ok, frame = self.streamer.read()
if frame is not None:
new_frame = cv2.cvtColor(frame, cv.CV_BGR2RGB)
self.frameIndex = int(self.streamer.get(cv.CV_CAP_PROP_POS_FRAMES ) - 1) # Frame just read
self.frameIndex = self.mapInverse[self.frameIndex]# Translate the index into what is understood from the caller
self.frame = new_frame, self.frameIndex
def writeFrame(self, frame, header = None):
if self.write:
#self.streamer.stdin.write(frame.tostring())
if header is not None:
drawHeaderString(frame, header)
self.streamer.stdin.write(np.getbuffer(frame))
self.streamer.stdin.flush()
def getFrame(self):
"""
This function will take the available frame and give it if is working with
the callback functionality, or it will read it and give it if it isn't
"""
if not self.write:
if self.callback is None:
self.readFrame()# When there is no callback, it reads the file here
else:
self.lock.acquire()
frame = self.frame
self.frame = None
self.frameAvailable = False
if self.callback is not None:
self.lock.release()
return frame
|
sivaprakashniet/push_pull
|
refs/heads/master
|
p2p/lib/python2.7/site-packages/celery/worker/autoscale.py
|
4
|
# -*- coding: utf-8 -*-
"""
celery.worker.autoscale
~~~~~~~~~~~~~~~~~~~~~~~
This module implements the internal thread responsible
for growing and shrinking the pool according to the
current autoscale settings.
The autoscale thread is only enabled if :option:`--autoscale`
has been enabled on the command-line.
"""
from __future__ import absolute_import
import os
import threading
from time import sleep
from kombu.async.semaphore import DummyLock
from celery import bootsteps
from celery.five import monotonic
from celery.utils.log import get_logger
from celery.utils.threads import bgThread
from . import state
from .components import Pool
__all__ = ['Autoscaler', 'WorkerComponent']
logger = get_logger(__name__)
debug, info, error = logger.debug, logger.info, logger.error
AUTOSCALE_KEEPALIVE = float(os.environ.get('AUTOSCALE_KEEPALIVE', 30))
class WorkerComponent(bootsteps.StartStopStep):
label = 'Autoscaler'
conditional = True
requires = (Pool, )
def __init__(self, w, **kwargs):
self.enabled = w.autoscale
w.autoscaler = None
def create(self, w):
scaler = w.autoscaler = self.instantiate(
w.autoscaler_cls,
w.pool, w.max_concurrency, w.min_concurrency,
worker=w, mutex=DummyLock() if w.use_eventloop else None,
)
return scaler if not w.use_eventloop else None
def register_with_event_loop(self, w, hub):
w.consumer.on_task_message.add(w.autoscaler.maybe_scale)
hub.call_repeatedly(
w.autoscaler.keepalive, w.autoscaler.maybe_scale,
)
class Autoscaler(bgThread):
def __init__(self, pool, max_concurrency,
min_concurrency=0, worker=None,
keepalive=AUTOSCALE_KEEPALIVE, mutex=None):
super(Autoscaler, self).__init__()
self.pool = pool
self.mutex = mutex or threading.Lock()
self.max_concurrency = max_concurrency
self.min_concurrency = min_concurrency
self.keepalive = keepalive
self._last_action = None
self.worker = worker
assert self.keepalive, 'cannot scale down too fast.'
def body(self):
with self.mutex:
self.maybe_scale()
sleep(1.0)
def _maybe_scale(self):
procs = self.processes
cur = min(self.qty, self.max_concurrency)
if cur > procs:
self.scale_up(cur - procs)
return True
elif cur < procs:
self.scale_down((procs - cur) - self.min_concurrency)
return True
def maybe_scale(self):
if self._maybe_scale():
self.pool.maintain_pool()
def update(self, max=None, min=None):
with self.mutex:
if max is not None:
if max < self.max_concurrency:
self._shrink(self.processes - max)
self.max_concurrency = max
if min is not None:
if min > self.min_concurrency:
self._grow(min - self.min_concurrency)
self.min_concurrency = min
return self.max_concurrency, self.min_concurrency
def force_scale_up(self, n):
with self.mutex:
new = self.processes + n
if new > self.max_concurrency:
self.max_concurrency = new
self.min_concurrency += 1
self._grow(n)
def force_scale_down(self, n):
with self.mutex:
new = self.processes - n
if new < self.min_concurrency:
self.min_concurrency = max(new, 0)
self._shrink(min(n, self.processes))
def scale_up(self, n):
self._last_action = monotonic()
return self._grow(n)
def scale_down(self, n):
if n and self._last_action and (
monotonic() - self._last_action > self.keepalive):
self._last_action = monotonic()
return self._shrink(n)
def _grow(self, n):
info('Scaling up %s processes.', n)
self.pool.grow(n)
self.worker.consumer._update_prefetch_count(n)
def _shrink(self, n):
info('Scaling down %s processes.', n)
try:
self.pool.shrink(n)
except ValueError:
debug("Autoscaler won't scale down: all processes busy.")
except Exception as exc:
error('Autoscaler: scale_down: %r', exc, exc_info=True)
self.worker.consumer._update_prefetch_count(-n)
def info(self):
return {'max': self.max_concurrency,
'min': self.min_concurrency,
'current': self.processes,
'qty': self.qty}
@property
def qty(self):
return len(state.reserved_requests)
@property
def processes(self):
return self.pool.num_processes
|
nghia-huynh/gem5-stable
|
refs/heads/master
|
ext/ply/test/yacc_badrule.py
|
174
|
# -----------------------------------------------------------------------------
# yacc_badrule.py
#
# Syntax problems in the rule strings
# -----------------------------------------------------------------------------
import sys
if ".." not in sys.path: sys.path.insert(0,"..")
import ply.yacc as yacc
from calclex import tokens
# Parsing rules
precedence = (
('left','PLUS','MINUS'),
('left','TIMES','DIVIDE'),
('right','UMINUS'),
)
# dictionary of names
names = { }
def p_statement_assign(t):
'statement NAME EQUALS expression'
names[t[1]] = t[3]
def p_statement_expr(t):
'statement'
print(t[1])
def p_expression_binop(t):
'''expression : expression PLUS expression
expression MINUS expression
| expression TIMES expression
| expression DIVIDE expression'''
if t[2] == '+' : t[0] = t[1] + t[3]
elif t[2] == '-': t[0] = t[1] - t[3]
elif t[2] == '*': t[0] = t[1] * t[3]
elif t[2] == '/': t[0] = t[1] / t[3]
def p_expression_uminus(t):
'expression: MINUS expression %prec UMINUS'
t[0] = -t[2]
def p_expression_group(t):
'expression : LPAREN expression RPAREN'
t[0] = t[2]
def p_expression_number(t):
'expression : NUMBER'
t[0] = t[1]
def p_expression_name(t):
'expression : NAME'
try:
t[0] = names[t[1]]
except LookupError:
print("Undefined name '%s'" % t[1])
t[0] = 0
def p_error(t):
print("Syntax error at '%s'" % t.value)
yacc.yacc()
|
wreckoner/PyEuler
|
refs/heads/master
|
src/problem012.py
|
1
|
# -*- coding:utf-8 -*-
"""
Problem 12: Highly divisible triangular number
The sequence of triangle numbers is generated by adding the natural numbers. So the 7th triangle number would be 1 + 2 + 3 + 4 + 5 + 6 + 7 = 28. The first ten terms would be:
1, 3, 6, 10, 15, 21, 28, 36, 45, 55, ...
Let us list the factors of the first seven triangle numbers:
1: 1
3: 1,3
6: 1,2,3,6
10: 1,2,5,10
15: 1,3,5,15
21: 1,3,7,21
28: 1,2,4,7,14,28
We can see that 28 is the first triangle number to have over five divisors.
What is the value of the first triangle number to have over five hundred divisors?
Answer: 76576500 (12375 th divisor)
"""
import math
def highly_divisible_triangular_number(num):
"""
Simple brute force, keeps generating triangle numbers, calculates all divisors and finds their count till more than 500.
"""
n = 1
divisors = {1:1, 2:2}
while True:
triangle_number = (n * (n+1))/2
divisors = []
for i in xrange(1, int(math.ceil(math.sqrt(triangle_number)))):
if triangle_number%i == 0:
divisors.extend([i, triangle_number/i])
if len(divisors) > num:
return triangle_number
n += 1
if __name__ == '__main__':
print highly_divisible_triangular_number(500)
|
djnugent/mavlink
|
refs/heads/master
|
pymavlink/tools/mavtogpx.py
|
33
|
#!/usr/bin/env python
'''
example program to extract GPS data from a mavlink log, and create a GPX
file, for loading into google earth
'''
import sys, struct, time, os
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--condition", default=None, help="select packets by a condition")
parser.add_argument("--nofixcheck", default=False, action='store_true', help="don't check for GPS fix")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
def mav_to_gpx(infilename, outfilename):
'''convert a mavlink log file to a GPX file'''
mlog = mavutil.mavlink_connection(infilename)
outf = open(outfilename, mode='w')
def process_packet(timestamp, lat, lon, alt, hdg, v):
t = time.localtime(timestamp)
outf.write('''<trkpt lat="%s" lon="%s">
<ele>%s</ele>
<time>%s</time>
<course>%s</course>
<speed>%s</speed>
<fix>3d</fix>
</trkpt>
''' % (lat, lon, alt,
time.strftime("%Y-%m-%dT%H:%M:%SZ", t),
hdg, v))
def add_header():
outf.write('''<?xml version="1.0" encoding="UTF-8"?>
<gpx
version="1.0"
creator="pymavlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns="http://www.topografix.com/GPX/1/0"
xsi:schemaLocation="http://www.topografix.com/GPX/1/0 http://www.topografix.com/GPX/1/0/gpx.xsd">
<trk>
<trkseg>
''')
def add_footer():
outf.write('''</trkseg>
</trk>
</gpx>
''')
add_header()
count=0
lat=0
lon=0
fix=0
while True:
m = mlog.recv_match(type=['GPS_RAW', 'GPS_RAW_INT', 'GPS', 'GPS2'], condition=args.condition)
if m is None:
break
if m.get_type() == 'GPS_RAW_INT':
lat = m.lat/1.0e7
lon = m.lon/1.0e7
alt = m.alt/1.0e3
v = m.vel/100.0
hdg = m.cog/100.0
timestamp = m._timestamp
fix = m.fix_type
elif m.get_type() == 'GPS_RAW':
lat = m.lat
lon = m.lon
alt = m.alt
v = m.v
hdg = m.hdg
timestamp = m._timestamp
fix = m.fix_type
elif m.get_type() == 'GPS' or m.get_type() == 'GPS2':
lat = m.Lat
lon = m.Lng
alt = m.Alt
v = m.Spd
hdg = m.GCrs
timestamp = m._timestamp
fix = m.Status
else:
pass
if fix < 2 and not args.nofixcheck:
continue
if lat == 0.0 or lon == 0.0:
continue
process_packet(timestamp, lat, lon, alt, hdg, v)
count += 1
add_footer()
print("Created %s with %u points" % (outfilename, count))
for infilename in args.logs:
outfilename = infilename + '.gpx'
mav_to_gpx(infilename, outfilename)
|
jxtech/wechatpy
|
refs/heads/master
|
wechatpy/client/api/media.py
|
2
|
# -*- coding: utf-8 -*-
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatMedia(BaseWeChatAPI):
"""素材管理
https://developers.weixin.qq.com/doc/offiaccount/Asset_Management/New_temporary_materials.html
"""
def upload(self, media_type, media_file):
"""
新增临时素材
详情请参考
https://developers.weixin.qq.com/doc/offiaccount/Asset_Management/New_temporary_materials.html
:param media_type: 媒体文件类型,分别有图片(image)、语音(voice)、视频(video)和缩略图(thumb)
:param media_file: 要上传的文件,一个 File-object
:return: 返回的 JSON 数据包
"""
return self._post(url="media/upload", params={"type": media_type}, files={"media": media_file})
def download(self, media_id):
"""
获取临时素材
详情请参考
https://developers.weixin.qq.com/doc/offiaccount/Asset_Management/Get_temporary_materials.html
:param media_id: 媒体文件 ID
:return: requests 的 Response 实例
"""
return self._get("media/get", params={"media_id": media_id})
def get_url(self, media_id):
"""
获取临时素材下载地址
:param media_id: 媒体文件 ID
:return: 临时素材下载地址
"""
return f"https://api.weixin.qq.com/cgi-bin/media/get?access_token={self.access_token}&media_id={media_id}"
def upload_video(self, media_id, title, description):
"""
群发视频消息时获取视频 media_id
详情请参考
http://mp.weixin.qq.com/wiki/15/5380a4e6f02f2ffdc7981a8ed7a40753.html
:param media_id: 需通过基础支持中的上传下载多媒体文件 :func:`upload` 来得到
:param title: 视频标题
:param description: 视频描述
:return: 返回的 JSON 数据包
"""
return self._post(
url="media/uploadvideo",
data={"media_id": media_id, "title": title, "description": description},
)
def upload_articles(self, articles):
"""
上传图文消息素材
详情请参考
http://mp.weixin.qq.com/wiki/15/5380a4e6f02f2ffdc7981a8ed7a40753.html
:param articles: 图文消息数组
:return: 返回的 JSON 数据包
"""
articles_data = []
for article in articles:
articles_data.append(
{
"thumb_media_id": article["thumb_media_id"],
"title": article["title"],
"content": article["content"],
"author": article.get("author", ""),
"content_source_url": article.get("content_source_url", ""),
"digest": article.get("digest", ""),
"show_cover_pic": article.get("show_cover_pic", 0),
}
)
return self._post("media/uploadnews", data={"articles": articles_data})
def upload_image(self, media_file):
"""
上传群发消息内的图片
详情请参考
https://developers.weixin.qq.com/doc/offiaccount/Asset_Management/Adding_Permanent_Assets.html
:param media_file: 要上传的文件,一个 File-object
:return: 上传成功时返回图片 URL
"""
res = self._post(
url="media/uploadimg",
files={"media": media_file},
result_processor=lambda x: x["url"],
)
return res
upload_mass_image = upload_image
|
PaddlePaddle/Paddle
|
refs/heads/develop
|
python/paddle/vision/datasets/voc2012.py
|
1
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import io
import tarfile
import numpy as np
from PIL import Image
import paddle
from paddle.io import Dataset
from paddle.dataset.common import _check_exists_and_download
__all__ = []
VOC_URL = 'https://dataset.bj.bcebos.com/voc/VOCtrainval_11-May-2012.tar'
VOC_MD5 = '6cd6e144f989b92b3379bac3b3de84fd'
SET_FILE = 'VOCdevkit/VOC2012/ImageSets/Segmentation/{}.txt'
DATA_FILE = 'VOCdevkit/VOC2012/JPEGImages/{}.jpg'
LABEL_FILE = 'VOCdevkit/VOC2012/SegmentationClass/{}.png'
CACHE_DIR = 'voc2012'
MODE_FLAG_MAP = {'train': 'trainval', 'test': 'train', 'valid': "val"}
class VOC2012(Dataset):
"""
Implementation of `VOC2012 <http://host.robots.ox.ac.uk/pascal/VOC/voc2012/>`_ dataset
To speed up the download, we put the data on https://dataset.bj.bcebos.com/voc/VOCtrainval_11-May-2012.tar.
Original data can get from http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar.
Args:
data_file(str): path to data file, can be set None if
:attr:`download` is True. Default None, default data path: ~/.cache/paddle/dataset/voc2012
mode(str): 'train', 'valid' or 'test' mode. Default 'train'.
download(bool): download dataset automatically if :attr:`data_file` is None. Default True
backend(str, optional): Specifies which type of image to be returned:
PIL.Image or numpy.ndarray. Should be one of {'pil', 'cv2'}.
If this option is not set, will get backend from ``paddle.vsion.get_image_backend`` ,
default backend is 'pil'. Default: None.
Examples:
.. code-block:: python
import paddle
from paddle.vision.datasets import VOC2012
from paddle.vision.transforms import Normalize
class SimpleNet(paddle.nn.Layer):
def __init__(self):
super(SimpleNet, self).__init__()
def forward(self, image, label):
return paddle.sum(image), label
normalize = Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5],
data_format='HWC')
voc2012 = VOC2012(mode='train', transform=normalize, backend='cv2')
for i in range(10):
image, label= voc2012[i]
image = paddle.cast(paddle.to_tensor(image), 'float32')
label = paddle.to_tensor(label)
model = SimpleNet()
image, label= model(image, label)
print(image.numpy().shape, label.numpy().shape)
"""
def __init__(self,
data_file=None,
mode='train',
transform=None,
download=True,
backend=None):
assert mode.lower() in ['train', 'valid', 'test'], \
"mode should be 'train', 'valid' or 'test', but got {}".format(mode)
if backend is None:
backend = paddle.vision.get_image_backend()
if backend not in ['pil', 'cv2']:
raise ValueError(
"Expected backend are one of ['pil', 'cv2'], but got {}"
.format(backend))
self.backend = backend
self.flag = MODE_FLAG_MAP[mode.lower()]
self.data_file = data_file
if self.data_file is None:
assert download, "data_file is not set and downloading automatically is disabled"
self.data_file = _check_exists_and_download(
data_file, VOC_URL, VOC_MD5, CACHE_DIR, download)
self.transform = transform
# read dataset into memory
self._load_anno()
self.dtype = paddle.get_default_dtype()
def _load_anno(self):
self.name2mem = {}
self.data_tar = tarfile.open(self.data_file)
for ele in self.data_tar.getmembers():
self.name2mem[ele.name] = ele
set_file = SET_FILE.format(self.flag)
sets = self.data_tar.extractfile(self.name2mem[set_file])
self.data = []
self.labels = []
for line in sets:
line = line.strip()
data = DATA_FILE.format(line.decode('utf-8'))
label = LABEL_FILE.format(line.decode('utf-8'))
self.data.append(data)
self.labels.append(label)
def __getitem__(self, idx):
data_file = self.data[idx]
label_file = self.labels[idx]
data = self.data_tar.extractfile(self.name2mem[data_file]).read()
label = self.data_tar.extractfile(self.name2mem[label_file]).read()
data = Image.open(io.BytesIO(data))
label = Image.open(io.BytesIO(label))
if self.backend == 'cv2':
data = np.array(data)
label = np.array(label)
if self.transform is not None:
data = self.transform(data)
if self.backend == 'cv2':
return data.astype(self.dtype), label.astype(self.dtype)
return data, label
def __len__(self):
return len(self.data)
def __del__(self):
if self.data_tar:
self.data_tar.close()
|
bernardokyotoku/skillplant
|
refs/heads/master
|
django/contrib/admindocs/models.py
|
12
|
# Empty models.py to allow for specifying admindocs as a test label.
|
yashodhank/erpnext
|
refs/heads/develop
|
erpnext/setup/setup_wizard/test_setup_wizard.py
|
30
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from erpnext.setup.setup_wizard.test_setup_data import args
from frappe.desk.page.setup_wizard.setup_wizard import setup_complete
import frappe.utils.scheduler
if __name__=="__main__":
frappe.connect()
frappe.local.form_dict = frappe._dict(args)
setup_complete()
frappe.utils.scheduler.disable_scheduler()
|
BCLab-UNM/SwarmBaseCode-ROS
|
refs/heads/master
|
arduino/libraries/ros_lib/examples/ServiceClient/client.py
|
61
|
#!/usr/bin/env python
"""
Sample code to use with ServiceClient.pde
"""
import roslib; roslib.load_manifest("rosserial_arduino")
import rospy
from rosserial_arduino.srv import *
def callback(req):
print "The arduino is calling! Please send it a message:"
t = TestResponse()
t.output = raw_input()
return t
rospy.init_node("service_client_test")
rospy.Service("test_srv", Test, callback)
rospy.spin()
|
dpgeorge/micropython
|
refs/heads/master
|
tests/micropython/extreme_exc.py
|
9
|
# test some extreme cases of allocating exceptions and tracebacks
import micropython
# Check for stackless build, which can't call functions without
# allocating a frame on the heap.
try:
def stackless():
pass
micropython.heap_lock()
stackless()
micropython.heap_unlock()
except RuntimeError:
print("SKIP")
raise SystemExit
# some ports need to allocate heap for the emergency exception
try:
micropython.alloc_emergency_exception_buf(256)
except AttributeError:
pass
def main():
# create an exception with many args while heap is locked
# should revert to empty tuple for args
micropython.heap_lock()
e = Exception(
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
)
micropython.heap_unlock()
print(repr(e))
# create an exception with a long formatted error message while heap is locked
# should use emergency exception buffer and truncate the message
def f():
pass
micropython.heap_lock()
try:
f(
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=1
)
except Exception as er:
e = er
micropython.heap_unlock()
print(repr(e)[:10])
# create an exception with a long formatted error message while heap is low
# should use the heap and truncate the message
lst = []
while 1:
try:
lst = [lst]
except MemoryError:
break
try:
f(
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz=1
)
except Exception as er:
e = er
lst[0][0] = None
lst = None
print(repr(e)[:10])
# raise a deep exception with the heap locked
# should use emergency exception and be unable to resize traceback array
def g():
g()
micropython.heap_lock()
try:
g()
except Exception as er:
e = er
micropython.heap_unlock()
print(repr(e)[:13])
# create an exception on the heap with some traceback on the heap, but then
# raise it with the heap locked so it can't allocate any more traceback
exc = Exception("my exception")
try:
raise exc
except:
pass
def h(e):
raise e
micropython.heap_lock()
try:
h(exc)
except Exception as er:
e = er
micropython.heap_unlock()
print(repr(e))
main()
|
yashu-seth/networkx
|
refs/heads/master
|
networkx/linalg/__init__.py
|
61
|
from networkx.linalg.attrmatrix import *
import networkx.linalg.attrmatrix
from networkx.linalg.spectrum import *
import networkx.linalg.spectrum
from networkx.linalg.graphmatrix import *
import networkx.linalg.graphmatrix
from networkx.linalg.laplacianmatrix import *
import networkx.linalg.laplacianmatrix
from networkx.linalg.algebraicconnectivity import *
from networkx.linalg.modularitymatrix import *
import networkx.linalg.modularitymatrix
|
MattCrystal/Haunted-X-series
|
refs/heads/master
|
scripts/build-all.py
|
1182
|
#! /usr/bin/env python
# Copyright (c) 2009-2011, The Linux Foundation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Linux Foundation nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Build the kernel for all targets using the Android build environment.
#
# TODO: Accept arguments to indicate what to build.
import glob
from optparse import OptionParser
import subprocess
import os
import os.path
import shutil
import sys
version = 'build-all.py, version 0.01'
build_dir = '../all-kernels'
make_command = ["vmlinux", "modules"]
make_env = os.environ
make_env.update({
'ARCH': 'arm',
'CROSS_COMPILE': 'arm-none-linux-gnueabi-',
'KCONFIG_NOTIMESTAMP': 'true' })
all_options = {}
def error(msg):
sys.stderr.write("error: %s\n" % msg)
def fail(msg):
"""Fail with a user-printed message"""
error(msg)
sys.exit(1)
def check_kernel():
"""Ensure that PWD is a kernel directory"""
if (not os.path.isfile('MAINTAINERS') or
not os.path.isfile('arch/arm/mach-msm/Kconfig')):
fail("This doesn't seem to be an MSM kernel dir")
def check_build():
"""Ensure that the build directory is present."""
if not os.path.isdir(build_dir):
try:
os.makedirs(build_dir)
except OSError as exc:
if exc.errno == errno.EEXIST:
pass
else:
raise
def update_config(file, str):
print 'Updating %s with \'%s\'\n' % (file, str)
defconfig = open(file, 'a')
defconfig.write(str + '\n')
defconfig.close()
def scan_configs():
"""Get the full list of defconfigs appropriate for this tree."""
names = {}
for n in glob.glob('arch/arm/configs/[fm]sm[0-9-]*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/qsd*_defconfig'):
names[os.path.basename(n)[:-10]] = n
for n in glob.glob('arch/arm/configs/apq*_defconfig'):
names[os.path.basename(n)[:-10]] = n
return names
class Builder:
def __init__(self, logname):
self.logname = logname
self.fd = open(logname, 'w')
def run(self, args):
devnull = open('/dev/null', 'r')
proc = subprocess.Popen(args, stdin=devnull,
env=make_env,
bufsize=0,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
count = 0
# for line in proc.stdout:
rawfd = proc.stdout.fileno()
while True:
line = os.read(rawfd, 1024)
if not line:
break
self.fd.write(line)
self.fd.flush()
if all_options.verbose:
sys.stdout.write(line)
sys.stdout.flush()
else:
for i in range(line.count('\n')):
count += 1
if count == 64:
count = 0
print
sys.stdout.write('.')
sys.stdout.flush()
print
result = proc.wait()
self.fd.close()
return result
failed_targets = []
def build(target):
dest_dir = os.path.join(build_dir, target)
log_name = '%s/log-%s.log' % (build_dir, target)
print 'Building %s in %s log %s' % (target, dest_dir, log_name)
if not os.path.isdir(dest_dir):
os.mkdir(dest_dir)
defconfig = 'arch/arm/configs/%s_defconfig' % target
dotconfig = '%s/.config' % dest_dir
savedefconfig = '%s/defconfig' % dest_dir
shutil.copyfile(defconfig, dotconfig)
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'%s_defconfig' % target], env=make_env, stdin=devnull)
devnull.close()
if not all_options.updateconfigs:
build = Builder(log_name)
result = build.run(['make', 'O=%s' % dest_dir] + make_command)
if result != 0:
if all_options.keep_going:
failed_targets.append(target)
fail_or_error = error
else:
fail_or_error = fail
fail_or_error("Failed to build %s, see %s" % (target, build.logname))
# Copy the defconfig back.
if all_options.configs or all_options.updateconfigs:
devnull = open('/dev/null', 'r')
subprocess.check_call(['make', 'O=%s' % dest_dir,
'savedefconfig'], env=make_env, stdin=devnull)
devnull.close()
shutil.copyfile(savedefconfig, defconfig)
def build_many(allconf, targets):
print "Building %d target(s)" % len(targets)
for target in targets:
if all_options.updateconfigs:
update_config(allconf[target], all_options.updateconfigs)
build(target)
if failed_targets:
fail('\n '.join(["Failed targets:"] +
[target for target in failed_targets]))
def main():
global make_command
check_kernel()
check_build()
configs = scan_configs()
usage = ("""
%prog [options] all -- Build all targets
%prog [options] target target ... -- List specific targets
%prog [options] perf -- Build all perf targets
%prog [options] noperf -- Build all non-perf targets""")
parser = OptionParser(usage=usage, version=version)
parser.add_option('--configs', action='store_true',
dest='configs',
help="Copy configs back into tree")
parser.add_option('--list', action='store_true',
dest='list',
help='List available targets')
parser.add_option('-v', '--verbose', action='store_true',
dest='verbose',
help='Output to stdout in addition to log file')
parser.add_option('--oldconfig', action='store_true',
dest='oldconfig',
help='Only process "make oldconfig"')
parser.add_option('--updateconfigs',
dest='updateconfigs',
help="Update defconfigs with provided option setting, "
"e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
parser.add_option('-j', '--jobs', type='int', dest="jobs",
help="Number of simultaneous jobs")
parser.add_option('-l', '--load-average', type='int',
dest='load_average',
help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
parser.add_option('-k', '--keep-going', action='store_true',
dest='keep_going', default=False,
help="Keep building other targets if a target fails")
parser.add_option('-m', '--make-target', action='append',
help='Build the indicated make target (default: %s)' %
' '.join(make_command))
(options, args) = parser.parse_args()
global all_options
all_options = options
if options.list:
print "Available targets:"
for target in configs.keys():
print " %s" % target
sys.exit(0)
if options.oldconfig:
make_command = ["oldconfig"]
elif options.make_target:
make_command = options.make_target
if options.jobs:
make_command.append("-j%d" % options.jobs)
if options.load_average:
make_command.append("-l%d" % options.load_average)
if args == ['all']:
build_many(configs, configs.keys())
elif args == ['perf']:
targets = []
for t in configs.keys():
if "perf" in t:
targets.append(t)
build_many(configs, targets)
elif args == ['noperf']:
targets = []
for t in configs.keys():
if "perf" not in t:
targets.append(t)
build_many(configs, targets)
elif len(args) > 0:
targets = []
for t in args:
if t not in configs.keys():
parser.error("Target '%s' not one of %s" % (t, configs.keys()))
targets.append(t)
build_many(configs, targets)
else:
parser.error("Must specify a target to build, or 'all'")
if __name__ == "__main__":
main()
|
Bystroushaak/pyDHTMLParser
|
refs/heads/master
|
setup.py
|
1
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
from setuptools import setup
from setuptools import find_packages
from docs import get_version
# Variables ===================================================================
CHANGELOG = open('CHANGES.rst').read()
LONG_DESCRIPTION = "\n\n".join([
open('README.rst').read(),
CHANGELOG
])
# Actual setup definition =====================================================
setup(
name='pyDHTMLParser',
version=get_version(CHANGELOG),
py_modules=['dhtmlparser'],
author='Bystroushaak',
author_email='bystrousak@kitakitsune.org',
url='https://github.com/Bystroushaak/pyDHTMLParser',
license='MIT',
description='Python HTML/XML parser for easy web scraping.',
long_description=LONG_DESCRIPTION,
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup :: HTML",
"Topic :: Text Processing :: Markup :: XML"
],
extras_require={
"test": [
"pytest",
"pytest-cov",
],
"docs": [
"sphinx",
"sphinxcontrib-napoleon",
]
}
)
|
lseyesl/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/lint_test_expectations_unittest.py
|
122
|
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
import StringIO
import unittest2 as unittest
from webkitpy.common.host_mock import MockHost
from webkitpy.layout_tests import lint_test_expectations
class FakePort(object):
def __init__(self, host, name, path):
self.host = host
self.name = name
self.path = path
def test_configuration(self):
return None
def expectations_dict(self):
self.host.ports_parsed.append(self.name)
return {self.path: ''}
def skipped_layout_tests(self, _):
return set([])
def all_test_configurations(self):
return []
def configuration_specifier_macros(self):
return []
def get_option(self, _, val):
return val
def path_to_generic_test_expectations_file(self):
return ''
class FakeFactory(object):
def __init__(self, host, ports):
self.host = host
self.ports = {}
for port in ports:
self.ports[port.name] = port
def get(self, port_name, *args, **kwargs): # pylint: disable=W0613,E0202
return self.ports[port_name]
def all_port_names(self, platform=None): # pylint: disable=W0613,E0202
return sorted(self.ports.keys())
class LintTest(unittest.TestCase):
def test_all_configurations(self):
host = MockHost()
host.ports_parsed = []
host.port_factory = FakeFactory(host, (FakePort(host, 'a', 'path-to-a'),
FakePort(host, 'b', 'path-to-b'),
FakePort(host, 'b-win', 'path-to-b')))
logging_stream = StringIO.StringIO()
options = optparse.Values({'platform': None})
res = lint_test_expectations.lint(host, options, logging_stream)
self.assertEqual(res, 0)
self.assertEqual(host.ports_parsed, ['a', 'b', 'b-win'])
def test_lint_test_files(self):
logging_stream = StringIO.StringIO()
options = optparse.Values({'platform': 'test-mac-leopard'})
host = MockHost()
# pylint appears to complain incorrectly about the method overrides pylint: disable=E0202,C0322
# FIXME: incorrect complaints about spacing pylint: disable=C0322
host.port_factory.all_port_names = lambda platform=None: [platform]
res = lint_test_expectations.lint(host, options, logging_stream)
self.assertEqual(res, 0)
self.assertIn('Lint succeeded', logging_stream.getvalue())
def test_lint_test_files__errors(self):
options = optparse.Values({'platform': 'test', 'debug_rwt_logging': False})
host = MockHost()
# FIXME: incorrect complaints about spacing pylint: disable=C0322
port = host.port_factory.get(options.platform, options=options)
port.expectations_dict = lambda: {'foo': '-- syntax error1', 'bar': '-- syntax error2'}
host.port_factory.get = lambda platform, options=None: port
host.port_factory.all_port_names = lambda platform=None: [port.name()]
logging_stream = StringIO.StringIO()
res = lint_test_expectations.lint(host, options, logging_stream)
self.assertEqual(res, -1)
self.assertIn('Lint failed', logging_stream.getvalue())
self.assertIn('foo:1', logging_stream.getvalue())
self.assertIn('bar:1', logging_stream.getvalue())
class MainTest(unittest.TestCase):
def test_success(self):
orig_lint_fn = lint_test_expectations.lint
# unused args pylint: disable=W0613
def interrupting_lint(host, options, logging_stream):
raise KeyboardInterrupt
def successful_lint(host, options, logging_stream):
return 0
def exception_raising_lint(host, options, logging_stream):
assert False
stdout = StringIO.StringIO()
stderr = StringIO.StringIO()
try:
lint_test_expectations.lint = interrupting_lint
res = lint_test_expectations.main([], stdout, stderr)
self.assertEqual(res, lint_test_expectations.INTERRUPTED_EXIT_STATUS)
lint_test_expectations.lint = successful_lint
res = lint_test_expectations.main(['--platform', 'test'], stdout, stderr)
self.assertEqual(res, 0)
lint_test_expectations.lint = exception_raising_lint
res = lint_test_expectations.main([], stdout, stderr)
self.assertEqual(res, lint_test_expectations.EXCEPTIONAL_EXIT_STATUS)
finally:
lint_test_expectations.lint = orig_lint_fn
|
vuteam/BlackHole-New
|
refs/heads/master
|
lib/python/Tools/Notifications.py
|
3
|
notifications = [ ]
notificationAdded = [ ]
# notifications which are currently on screen (and might be closed by similiar notifications)
current_notifications = [ ]
def __AddNotification(fnc, screen, id, *args, **kwargs):
if ".MessageBox'>" in `screen`:
kwargs["simple"] = True
if ".Standby'>" in `screen`:
removeCIdialog()
notifications.append((fnc, screen, args, kwargs, id))
for x in notificationAdded:
x()
def AddNotification(screen, *args, **kwargs):
AddNotificationWithCallback(None, screen, *args, **kwargs)
def AddNotificationWithCallback(fnc, screen, *args, **kwargs):
__AddNotification(fnc, screen, None, *args, **kwargs)
def AddNotificationParentalControl(fnc, screen, *args, **kwargs):
RemovePopup("Parental control")
__AddNotification(fnc, screen, "Parental control", *args, **kwargs)
def AddNotificationWithID(id, screen, *args, **kwargs):
__AddNotification(None, screen, id, *args, **kwargs)
# we don't support notifications with callback and ID as this
# would require manually calling the callback on cancelled popups.
def RemovePopup(id):
# remove similiar notifications
print "RemovePopup, id =", id
for x in notifications:
if x[4] and x[4] == id:
print "(found in notifications)"
notifications.remove(x)
for x in current_notifications:
if x[0] == id:
print "(found in current notifications)"
x[1].close()
from Screens.MessageBox import MessageBox
def AddPopup(text, type, timeout, id = None):
if id is not None:
RemovePopup(id)
print "AddPopup, id =", id
AddNotificationWithID(id, MessageBox, text = text, type = type, timeout = timeout, close_on_any_key = True)
def removeCIdialog():
import NavigationInstance
if NavigationInstance.instance and NavigationInstance.instance.wasTimerWakeup():
import Screens.Ci
for slot in Screens.Ci.CiHandler.dlgs:
if hasattr(Screens.Ci.CiHandler.dlgs[slot], "forceExit"):
Screens.Ci.CiHandler.dlgs[slot].tag = "WAIT"
Screens.Ci.CiHandler.dlgs[slot].forceExit()
|
lucafavatella/intellij-community
|
refs/heads/cli-wip
|
python/lib/Lib/socket.py
|
73
|
"""
This is an updated socket module for use on JVMs > 1.5; it is derived from the old jython socket module.
It is documented, along with known issues and workarounds, on the jython wiki.
http://wiki.python.org/jython/NewSocketModule
"""
_defaulttimeout = None
import errno
import jarray
import string
import struct
import sys
import threading
import time
import types
# Java.io classes
import java.io.BufferedInputStream
import java.io.BufferedOutputStream
# Java.io exceptions
import java.io.InterruptedIOException
import java.io.IOException
# Java.lang classes
import java.lang.String
# Java.lang exceptions
import java.lang.Exception
# Java.net classes
import java.net.DatagramPacket
import java.net.InetAddress
import java.net.InetSocketAddress
import java.net.Socket
# Java.net exceptions
import java.net.BindException
import java.net.ConnectException
import java.net.NoRouteToHostException
import java.net.PortUnreachableException
import java.net.ProtocolException
import java.net.SocketException
import java.net.SocketTimeoutException
import java.net.UnknownHostException
# Java.nio classes
import java.nio.ByteBuffer
import java.nio.channels.DatagramChannel
import java.nio.channels.ServerSocketChannel
import java.nio.channels.SocketChannel
# Java.nio exceptions
import java.nio.channels.AlreadyConnectedException
import java.nio.channels.AsynchronousCloseException
import java.nio.channels.CancelledKeyException
import java.nio.channels.ClosedByInterruptException
import java.nio.channels.ClosedChannelException
import java.nio.channels.ClosedSelectorException
import java.nio.channels.ConnectionPendingException
import java.nio.channels.IllegalBlockingModeException
import java.nio.channels.IllegalSelectorException
import java.nio.channels.NoConnectionPendingException
import java.nio.channels.NonReadableChannelException
import java.nio.channels.NonWritableChannelException
import java.nio.channels.NotYetBoundException
import java.nio.channels.NotYetConnectedException
import java.nio.channels.UnresolvedAddressException
import java.nio.channels.UnsupportedAddressTypeException
# Javax.net.ssl classes
import javax.net.ssl.SSLSocketFactory
# Javax.net.ssl exceptions
javax.net.ssl.SSLException
javax.net.ssl.SSLHandshakeException
javax.net.ssl.SSLKeyException
javax.net.ssl.SSLPeerUnverifiedException
javax.net.ssl.SSLProtocolException
import org.python.core.io.DatagramSocketIO
import org.python.core.io.ServerSocketIO
import org.python.core.io.SocketIO
from org.python.core.Py import newString as asPyString
class error(Exception): pass
class herror(error): pass
class gaierror(error): pass
class timeout(error): pass
class sslerror(error): pass
def _unmapped_exception(exc):
return error(-1, 'Unmapped exception: %s' % exc)
def java_net_socketexception_handler(exc):
if exc.message.startswith("Address family not supported by protocol family"):
return error(errno.EAFNOSUPPORT, 'Address family not supported by protocol family: See http://wiki.python.org/jython/NewSocketModule#IPV6addresssupport')
return _unmapped_exception(exc)
def would_block_error(exc=None):
return error(errno.EWOULDBLOCK, 'The socket operation could not complete without blocking')
ALL = None
_exception_map = {
# (<javaexception>, <circumstance>) : callable that raises the python equivalent exception, or None to stub out as unmapped
(java.io.IOException, ALL) : lambda x: error(errno.ECONNRESET, 'Software caused connection abort'),
(java.io.InterruptedIOException, ALL) : lambda x: timeout('timed out'),
(java.net.BindException, ALL) : lambda x: error(errno.EADDRINUSE, 'Address already in use'),
(java.net.ConnectException, ALL) : lambda x: error(errno.ECONNREFUSED, 'Connection refused'),
(java.net.NoRouteToHostException, ALL) : None,
(java.net.PortUnreachableException, ALL) : None,
(java.net.ProtocolException, ALL) : None,
(java.net.SocketException, ALL) : java_net_socketexception_handler,
(java.net.SocketTimeoutException, ALL) : lambda x: timeout('timed out'),
(java.net.UnknownHostException, ALL) : lambda x: gaierror(errno.EGETADDRINFOFAILED, 'getaddrinfo failed'),
(java.nio.channels.AlreadyConnectedException, ALL) : lambda x: error(errno.EISCONN, 'Socket is already connected'),
(java.nio.channels.AsynchronousCloseException, ALL) : None,
(java.nio.channels.CancelledKeyException, ALL) : None,
(java.nio.channels.ClosedByInterruptException, ALL) : None,
(java.nio.channels.ClosedChannelException, ALL) : lambda x: error(errno.EPIPE, 'Socket closed'),
(java.nio.channels.ClosedSelectorException, ALL) : None,
(java.nio.channels.ConnectionPendingException, ALL) : None,
(java.nio.channels.IllegalBlockingModeException, ALL) : None,
(java.nio.channels.IllegalSelectorException, ALL) : None,
(java.nio.channels.NoConnectionPendingException, ALL) : None,
(java.nio.channels.NonReadableChannelException, ALL) : None,
(java.nio.channels.NonWritableChannelException, ALL) : None,
(java.nio.channels.NotYetBoundException, ALL) : None,
(java.nio.channels.NotYetConnectedException, ALL) : None,
(java.nio.channels.UnresolvedAddressException, ALL) : lambda x: gaierror(errno.EGETADDRINFOFAILED, 'getaddrinfo failed'),
(java.nio.channels.UnsupportedAddressTypeException, ALL) : None,
# These error codes are currently wrong: getting them correct is going to require
# some investigation. Cpython 2.6 introduced extensive SSL support.
(javax.net.ssl.SSLException, ALL) : lambda x: sslerror(-1, 'SSL exception'),
(javax.net.ssl.SSLHandshakeException, ALL) : lambda x: sslerror(-1, 'SSL handshake exception'),
(javax.net.ssl.SSLKeyException, ALL) : lambda x: sslerror(-1, 'SSL key exception'),
(javax.net.ssl.SSLPeerUnverifiedException, ALL) : lambda x: sslerror(-1, 'SSL peer unverified exception'),
(javax.net.ssl.SSLProtocolException, ALL) : lambda x: sslerror(-1, 'SSL protocol exception'),
}
def _map_exception(exc, circumstance=ALL):
# print "Mapping exception: %s" % exc
mapped_exception = _exception_map.get((exc.__class__, circumstance))
if mapped_exception:
exception = mapped_exception(exc)
else:
exception = error(-1, 'Unmapped exception: %s' % exc)
exception.java_exception = exc
return exception
_feature_support_map = {
'ipv6': True,
'idna': False,
'tipc': False,
}
def supports(feature, *args):
if len(args) == 1:
_feature_support_map[feature] = args[0]
return _feature_support_map.get(feature, False)
MODE_BLOCKING = 'block'
MODE_NONBLOCKING = 'nonblock'
MODE_TIMEOUT = 'timeout'
_permitted_modes = (MODE_BLOCKING, MODE_NONBLOCKING, MODE_TIMEOUT)
SHUT_RD = 0
SHUT_WR = 1
SHUT_RDWR = 2
AF_UNSPEC = 0
AF_INET = 2
AF_INET6 = 23
AI_PASSIVE=1
AI_CANONNAME=2
# For some reason, probably historical, SOCK_DGRAM and SOCK_STREAM are opposite values of what they are on cpython.
# I.E. The following is the way they are on cpython
# SOCK_STREAM = 1
# SOCK_DGRAM = 2
# At some point, we should probably switch them around, which *should* not affect anybody
SOCK_DGRAM = 1
SOCK_STREAM = 2
SOCK_RAW = 3 # not supported
SOCK_RDM = 4 # not supported
SOCK_SEQPACKET = 5 # not supported
SOL_SOCKET = 0xFFFF
IPPROTO_TCP = 6
IPPROTO_UDP = 17
SO_BROADCAST = 1
SO_KEEPALIVE = 2
SO_LINGER = 4
SO_OOBINLINE = 8
SO_RCVBUF = 16
SO_REUSEADDR = 32
SO_SNDBUF = 64
SO_TIMEOUT = 128
TCP_NODELAY = 256
INADDR_ANY = "0.0.0.0"
INADDR_BROADCAST = "255.255.255.255"
# Options with negative constants are not supported
# They are being added here so that code that refers to them
# will not break with an AttributeError
SO_ACCEPTCONN = -1
SO_DEBUG = -2
SO_DONTROUTE = -4
SO_ERROR = -8
SO_EXCLUSIVEADDRUSE = -16
SO_RCVLOWAT = -32
SO_RCVTIMEO = -64
SO_REUSEPORT = -128
SO_SNDLOWAT = -256
SO_SNDTIMEO = -512
SO_TYPE = -1024
SO_USELOOPBACK = -2048
__all__ = ['AF_UNSPEC', 'AF_INET', 'AF_INET6', 'AI_PASSIVE', 'SOCK_DGRAM',
'SOCK_RAW', 'SOCK_RDM', 'SOCK_SEQPACKET', 'SOCK_STREAM', 'SOL_SOCKET',
'SO_BROADCAST', 'SO_ERROR', 'SO_KEEPALIVE', 'SO_LINGER', 'SO_OOBINLINE',
'SO_RCVBUF', 'SO_REUSEADDR', 'SO_SNDBUF', 'SO_TIMEOUT', 'TCP_NODELAY',
'INADDR_ANY', 'INADDR_BROADCAST', 'IPPROTO_TCP', 'IPPROTO_UDP',
'SocketType', 'error', 'herror', 'gaierror', 'timeout',
'getfqdn', 'gethostbyaddr', 'gethostbyname', 'gethostname',
'socket', 'getaddrinfo', 'getdefaulttimeout', 'setdefaulttimeout',
'has_ipv6', 'htons', 'htonl', 'ntohs', 'ntohl',
'SHUT_RD', 'SHUT_WR', 'SHUT_RDWR',
]
def _constant_to_name(const_value):
sock_module = sys.modules['socket']
try:
for name in dir(sock_module):
if getattr(sock_module, name) is const_value:
return name
return "Unknown"
finally:
sock_module = None
class _nio_impl:
timeout = None
mode = MODE_BLOCKING
def getpeername(self):
return (self.jsocket.getInetAddress().getHostAddress(), self.jsocket.getPort() )
def config(self, mode, timeout):
self.mode = mode
if self.mode == MODE_BLOCKING:
self.jchannel.configureBlocking(1)
if self.mode == MODE_NONBLOCKING:
self.jchannel.configureBlocking(0)
if self.mode == MODE_TIMEOUT:
self.jchannel.configureBlocking(1)
self._timeout_millis = int(timeout*1000)
self.jsocket.setSoTimeout(self._timeout_millis)
def getsockopt(self, level, option):
if (level, option) in self.options:
result = getattr(self.jsocket, "get%s" % self.options[ (level, option) ])()
if option == SO_LINGER:
if result == -1:
enabled, linger_time = 0, 0
else:
enabled, linger_time = 1, result
return struct.pack('ii', enabled, linger_time)
return result
else:
raise error(errno.ENOPROTOOPT, "Socket option '%s' (level '%s') not supported on socket(%s)" % (_constant_to_name(option), _constant_to_name(level), str(self.jsocket)))
def setsockopt(self, level, option, value):
if (level, option) in self.options:
if option == SO_LINGER:
values = struct.unpack('ii', value)
self.jsocket.setSoLinger(*values)
else:
getattr(self.jsocket, "set%s" % self.options[ (level, option) ])(value)
else:
raise error(errno.ENOPROTOOPT, "Socket option '%s' (level '%s') not supported on socket(%s)" % (_constant_to_name(option), _constant_to_name(level), str(self.jsocket)))
def close(self):
self.jsocket.close()
def getchannel(self):
return self.jchannel
def fileno(self):
return self.socketio
class _client_socket_impl(_nio_impl):
options = {
(SOL_SOCKET, SO_KEEPALIVE): 'KeepAlive',
(SOL_SOCKET, SO_LINGER): 'SoLinger',
(SOL_SOCKET, SO_OOBINLINE): 'OOBInline',
(SOL_SOCKET, SO_RCVBUF): 'ReceiveBufferSize',
(SOL_SOCKET, SO_REUSEADDR): 'ReuseAddress',
(SOL_SOCKET, SO_SNDBUF): 'SendBufferSize',
(SOL_SOCKET, SO_TIMEOUT): 'SoTimeout',
(IPPROTO_TCP, TCP_NODELAY): 'TcpNoDelay',
}
def __init__(self, socket=None):
if socket:
self.jchannel = socket.getChannel()
else:
self.jchannel = java.nio.channels.SocketChannel.open()
self.jsocket = self.jchannel.socket()
self.socketio = org.python.core.io.SocketIO(self.jchannel, 'rw')
def bind(self, jsockaddr, reuse_addr):
self.jsocket.setReuseAddress(reuse_addr)
self.jsocket.bind(jsockaddr)
def connect(self, jsockaddr):
if self.mode == MODE_TIMEOUT:
self.jsocket.connect (jsockaddr, self._timeout_millis)
else:
self.jchannel.connect(jsockaddr)
def finish_connect(self):
return self.jchannel.finishConnect()
def _do_read_net(self, buf):
# Need two separate implementations because the java.nio APIs do not support timeouts
return self.jsocket.getInputStream().read(buf)
def _do_read_nio(self, buf):
bytebuf = java.nio.ByteBuffer.wrap(buf)
count = self.jchannel.read(bytebuf)
return count
def _do_write_net(self, buf):
self.jsocket.getOutputStream().write(buf)
return len(buf)
def _do_write_nio(self, buf):
bytebuf = java.nio.ByteBuffer.wrap(buf)
count = self.jchannel.write(bytebuf)
return count
def read(self, buf):
if self.mode == MODE_TIMEOUT:
return self._do_read_net(buf)
else:
return self._do_read_nio(buf)
def write(self, buf):
if self.mode == MODE_TIMEOUT:
return self._do_write_net(buf)
else:
return self._do_write_nio(buf)
def shutdown(self, how):
if how in (SHUT_RD, SHUT_RDWR):
self.jsocket.shutdownInput()
if how in (SHUT_WR, SHUT_RDWR):
self.jsocket.shutdownOutput()
class _server_socket_impl(_nio_impl):
options = {
(SOL_SOCKET, SO_RCVBUF): 'ReceiveBufferSize',
(SOL_SOCKET, SO_REUSEADDR): 'ReuseAddress',
(SOL_SOCKET, SO_TIMEOUT): 'SoTimeout',
}
def __init__(self, jsockaddr, backlog, reuse_addr):
self.jchannel = java.nio.channels.ServerSocketChannel.open()
self.jsocket = self.jchannel.socket()
self.jsocket.setReuseAddress(reuse_addr)
self.jsocket.bind(jsockaddr, backlog)
self.socketio = org.python.core.io.ServerSocketIO(self.jchannel, 'rw')
def accept(self):
if self.mode in (MODE_BLOCKING, MODE_NONBLOCKING):
new_cli_chan = self.jchannel.accept()
if new_cli_chan is not None:
return _client_socket_impl(new_cli_chan.socket())
else:
return None
else:
# In timeout mode now
new_cli_sock = self.jsocket.accept()
return _client_socket_impl(new_cli_sock)
def shutdown(self, how):
# This is no-op on java, for server sockets.
# What the user wants to achieve is achieved by calling close() on
# java/jython. But we can't call that here because that would then
# later cause the user explicit close() call to fail
pass
class _datagram_socket_impl(_nio_impl):
options = {
(SOL_SOCKET, SO_BROADCAST): 'Broadcast',
(SOL_SOCKET, SO_RCVBUF): 'ReceiveBufferSize',
(SOL_SOCKET, SO_REUSEADDR): 'ReuseAddress',
(SOL_SOCKET, SO_SNDBUF): 'SendBufferSize',
(SOL_SOCKET, SO_TIMEOUT): 'SoTimeout',
}
def __init__(self, jsockaddr=None, reuse_addr=0):
self.jchannel = java.nio.channels.DatagramChannel.open()
self.jsocket = self.jchannel.socket()
if jsockaddr is not None:
self.jsocket.setReuseAddress(reuse_addr)
self.jsocket.bind(jsockaddr)
self.socketio = org.python.core.io.DatagramSocketIO(self.jchannel, 'rw')
def connect(self, jsockaddr):
self.jchannel.connect(jsockaddr)
def disconnect(self):
"""
Disconnect the datagram socket.
cpython appears not to have this operation
"""
self.jchannel.disconnect()
def shutdown(self, how):
# This is no-op on java, for datagram sockets.
# What the user wants to achieve is achieved by calling close() on
# java/jython. But we can't call that here because that would then
# later cause the user explicit close() call to fail
pass
def _do_send_net(self, byte_array, socket_address, flags):
# Need two separate implementations because the java.nio APIs do not support timeouts
num_bytes = len(byte_array)
if self.jsocket.isConnected() and socket_address is None:
packet = java.net.DatagramPacket(byte_array, num_bytes)
else:
packet = java.net.DatagramPacket(byte_array, num_bytes, socket_address)
self.jsocket.send(packet)
return num_bytes
def _do_send_nio(self, byte_array, socket_address, flags):
byte_buf = java.nio.ByteBuffer.wrap(byte_array)
if self.jchannel.isConnected() and socket_address is None:
bytes_sent = self.jchannel.write(byte_buf)
else:
bytes_sent = self.jchannel.send(byte_buf, socket_address)
return bytes_sent
def sendto(self, byte_array, jsockaddr, flags):
if self.mode == MODE_TIMEOUT:
return self._do_send_net(byte_array, jsockaddr, flags)
else:
return self._do_send_nio(byte_array, jsockaddr, flags)
def send(self, byte_array, flags):
if self.mode == MODE_TIMEOUT:
return self._do_send_net(byte_array, None, flags)
else:
return self._do_send_nio(byte_array, None, flags)
def _do_receive_net(self, return_source_address, num_bytes, flags):
byte_array = jarray.zeros(num_bytes, 'b')
packet = java.net.DatagramPacket(byte_array, num_bytes)
self.jsocket.receive(packet)
bytes_rcvd = packet.getLength()
if bytes_rcvd < num_bytes:
byte_array = byte_array[:bytes_rcvd]
return_data = byte_array.tostring()
if return_source_address:
host = None
if packet.getAddress():
host = packet.getAddress().getHostAddress()
port = packet.getPort()
return return_data, (host, port)
else:
return return_data
def _do_receive_nio(self, return_source_address, num_bytes, flags):
byte_array = jarray.zeros(num_bytes, 'b')
byte_buf = java.nio.ByteBuffer.wrap(byte_array)
source_address = self.jchannel.receive(byte_buf)
if source_address is None and not self.jchannel.isBlocking():
raise would_block_error()
byte_buf.flip() ; bytes_read = byte_buf.remaining()
if bytes_read < num_bytes:
byte_array = byte_array[:bytes_read]
return_data = byte_array.tostring()
if return_source_address:
return return_data, (source_address.getAddress().getHostAddress(), source_address.getPort())
else:
return return_data
def recvfrom(self, num_bytes, flags):
if self.mode == MODE_TIMEOUT:
return self._do_receive_net(1, num_bytes, flags)
else:
return self._do_receive_nio(1, num_bytes, flags)
def recv(self, num_bytes, flags):
if self.mode == MODE_TIMEOUT:
return self._do_receive_net(0, num_bytes, flags)
else:
return self._do_receive_nio(0, num_bytes, flags)
has_ipv6 = True # IPV6 FTW!
# Name and address functions
def _gethostbyaddr(name):
# This is as close as I can get; at least the types are correct...
addresses = java.net.InetAddress.getAllByName(gethostbyname(name))
names = []
addrs = []
for addr in addresses:
names.append(asPyString(addr.getHostName()))
addrs.append(asPyString(addr.getHostAddress()))
return (names, addrs)
def getfqdn(name=None):
"""
Return a fully qualified domain name for name. If name is omitted or empty
it is interpreted as the local host. To find the fully qualified name,
the hostname returned by gethostbyaddr() is checked, then aliases for the
host, if available. The first name which includes a period is selected.
In case no fully qualified domain name is available, the hostname is retur
New in version 2.0.
"""
if not name:
name = gethostname()
names, addrs = _gethostbyaddr(name)
for a in names:
if a.find(".") >= 0:
return a
return name
def gethostname():
try:
return asPyString(java.net.InetAddress.getLocalHost().getHostName())
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def gethostbyname(name):
try:
return asPyString(java.net.InetAddress.getByName(name).getHostAddress())
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def gethostbyaddr(name):
names, addrs = _gethostbyaddr(name)
return (names[0], names, addrs)
def getservbyname(service_name, protocol_name=None):
try:
from jnr.netdb import Service
except ImportError:
return None
return Service.getServiceByName(service_name, protocol_name).getPort()
def getservbyport(port, protocol_name=None):
try:
from jnr.netdb import Service
except ImportError:
return None
return Service.getServiceByPort(port, protocol_name).getName()
def getprotobyname(protocol_name=None):
try:
from jnr.netdb import Protocol
except ImportError:
return None
return Protocol.getProtocolByName(protocol_name).getProto()
def _realsocket(family = AF_INET, type = SOCK_STREAM, protocol=0):
assert family in (AF_INET, AF_INET6), "Only AF_INET and AF_INET6 sockets are currently supported on jython"
assert type in (SOCK_DGRAM, SOCK_STREAM), "Only SOCK_STREAM and SOCK_DGRAM sockets are currently supported on jython"
if type == SOCK_STREAM:
if protocol != 0:
assert protocol == IPPROTO_TCP, "Only IPPROTO_TCP supported on SOCK_STREAM sockets"
return _tcpsocket()
else:
if protocol != 0:
assert protocol == IPPROTO_UDP, "Only IPPROTO_UDP supported on SOCK_DGRAM sockets"
return _udpsocket()
#
# Attempt to provide IDNA (RFC 3490) support.
#
# Try java.net.IDN, built into java 6
#
idna_libraries = [
('java.net.IDN', 'toASCII', java.lang.IllegalArgumentException)
]
for idna_lib, idna_fn_name, exc in idna_libraries:
try:
m = __import__(idna_lib, globals(), locals(), [idna_fn_name])
idna_fn = getattr(m, idna_fn_name)
def _encode_idna(name):
try:
return idna_fn(name)
except exc:
raise UnicodeEncodeError(name)
supports('idna', True)
break
except (AttributeError, ImportError), e:
pass
else:
_encode_idna = lambda x: x.encode("ascii")
#
# Define data structures to support IPV4 and IPV6.
#
class _ip_address_t: pass
class _ipv4_address_t(_ip_address_t):
def __init__(self, sockaddr, port, jaddress):
self.sockaddr = sockaddr
self.port = port
self.jaddress = jaddress
def __getitem__(self, index):
if 0 == index:
return self.sockaddr
elif 1 == index:
return self.port
else:
raise IndexError()
def __len__(self):
return 2
def __str__(self):
return "('%s', %d)" % (self.sockaddr, self.port)
__repr__ = __str__
class _ipv6_address_t(_ip_address_t):
def __init__(self, sockaddr, port, jaddress):
self.sockaddr = sockaddr
self.port = port
self.jaddress = jaddress
def __getitem__(self, index):
if 0 == index:
return self.sockaddr
elif 1 == index:
return self.port
elif 2 == index:
return 0
elif 3 == index:
return self.jaddress.scopeId
else:
raise IndexError()
def __len__(self):
return 4
def __str__(self):
return "('%s', %d, 0, %d)" % (self.sockaddr, self.port, self.jaddress.scopeId)
__repr__ = __str__
def _get_jsockaddr(address_object, for_udp=False):
if address_object is None:
return java.net.InetSocketAddress(0) # Let the system pick an ephemeral port
if isinstance(address_object, _ip_address_t):
return java.net.InetSocketAddress(address_object.jaddress, address_object[1])
error_message = "Address must be a 2-tuple (ipv4: (host, port)) or a 4-tuple (ipv6: (host, port, flow, scope))"
if not isinstance(address_object, tuple) or \
len(address_object) not in [2,4] or \
not isinstance(address_object[0], basestring) or \
not isinstance(address_object[1], (int, long)):
raise TypeError(error_message)
if len(address_object) == 4 and not isinstance(address_object[3], (int, long)):
raise TypeError(error_message)
hostname, port = address_object[0].strip(), address_object[1]
if for_udp:
if hostname == "":
hostname = INADDR_ANY
elif hostname == "<broadcast>":
hostname = INADDR_BROADCAST
else:
if hostname == "":
hostname = None
if hostname is None:
return java.net.InetSocketAddress(port)
if isinstance(hostname, unicode):
hostname = _encode_idna(hostname)
if len(address_object) == 4:
# There is no way to get a Inet6Address: Inet6Address.getByName() simply calls
# InetAddress.getByName,() which also returns Inet4Address objects
# If users want to use IPv6 address, scoped or not,
# they should use getaddrinfo(family=AF_INET6)
pass
return java.net.InetSocketAddress(java.net.InetAddress.getByName(hostname), port)
_ipv4_addresses_only = False
def _use_ipv4_addresses_only(value):
global _ipv4_addresses_only
_ipv4_addresses_only = value
def getaddrinfo(host, port, family=AF_INET, socktype=None, proto=0, flags=None):
try:
if not family in [AF_INET, AF_INET6, AF_UNSPEC]:
raise gaierror(errno.EIO, 'ai_family not supported')
filter_fns = []
if _ipv4_addresses_only:
filter_fns.append( lambda x: isinstance(x, java.net.Inet4Address) )
else:
filter_fns.append({
AF_INET: lambda x: isinstance(x, java.net.Inet4Address),
AF_INET6: lambda x: isinstance(x, java.net.Inet6Address),
AF_UNSPEC: lambda x: isinstance(x, java.net.InetAddress),
}[family])
if host == "":
host = java.net.InetAddress.getLocalHost().getHostName()
if isinstance(host, unicode):
host = _encode_idna(host)
passive_mode = flags is not None and flags & AI_PASSIVE
canonname_mode = flags is not None and flags & AI_CANONNAME
results = []
for a in java.net.InetAddress.getAllByName(host):
if len([f for f in filter_fns if f(a)]):
family = {java.net.Inet4Address: AF_INET, java.net.Inet6Address: AF_INET6}[a.getClass()]
if passive_mode and not canonname_mode:
canonname = ""
else:
canonname = asPyString(a.getCanonicalHostName())
if host is None and passive_mode and not canonname_mode:
sockaddr = INADDR_ANY
else:
sockaddr = asPyString(a.getHostAddress())
# TODO: Include flowinfo and scopeid in a 4-tuple for IPv6 addresses
sock_tuple = {AF_INET : _ipv4_address_t, AF_INET6 : _ipv6_address_t}[family](sockaddr, port, a)
results.append((family, socktype, proto, canonname, sock_tuple))
return results
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def getnameinfo(sock_addr, flags):
raise NotImplementedError("getnameinfo not yet supported on jython.")
def getdefaulttimeout():
return _defaulttimeout
def _calctimeoutvalue(value):
if value is None:
return None
try:
floatvalue = float(value)
except:
raise TypeError('Socket timeout value must be a number or None')
if floatvalue < 0.0:
raise ValueError("Socket timeout value cannot be negative")
if floatvalue < 0.000001:
return 0.0
return floatvalue
def setdefaulttimeout(timeout):
global _defaulttimeout
try:
_defaulttimeout = _calctimeoutvalue(timeout)
finally:
_nonblocking_api_mixin.timeout = _defaulttimeout
def htons(x): return x
def htonl(x): return x
def ntohs(x): return x
def ntohl(x): return x
def inet_pton(family, ip_string):
try:
ia = java.net.InetAddress.getByName(ip_string)
bytes = []
for byte in ia.getAddress():
if byte < 0:
bytes.append(byte+256)
else:
bytes.append(byte)
return "".join([chr(byte) for byte in bytes])
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def inet_ntop(family, packed_ip):
try:
jByteArray = jarray.array(packed_ip, 'b')
ia = java.net.InetAddress.getByAddress(jByteArray)
return ia.getHostAddress()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def inet_aton(ip_string):
return inet_pton(AF_INET, ip_string)
def inet_ntoa(packed_ip):
return inet_ntop(AF_INET, packed_ip)
class _nonblocking_api_mixin:
mode = MODE_BLOCKING
reference_count = 0
close_lock = threading.Lock()
def __init__(self):
self.timeout = _defaulttimeout
if self.timeout is not None:
self.mode = MODE_TIMEOUT
self.pending_options = {
(SOL_SOCKET, SO_REUSEADDR): 0,
}
def gettimeout(self):
return self.timeout
def settimeout(self, timeout):
self.timeout = _calctimeoutvalue(timeout)
if self.timeout is None:
self.mode = MODE_BLOCKING
elif self.timeout < 0.000001:
self.mode = MODE_NONBLOCKING
else:
self.mode = MODE_TIMEOUT
self._config()
def setblocking(self, flag):
if flag:
self.mode = MODE_BLOCKING
self.timeout = None
else:
self.mode = MODE_NONBLOCKING
self.timeout = 0.0
self._config()
def getblocking(self):
return self.mode == MODE_BLOCKING
def setsockopt(self, level, optname, value):
try:
if self.sock_impl:
self.sock_impl.setsockopt(level, optname, value)
else:
self.pending_options[ (level, optname) ] = value
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def getsockopt(self, level, optname):
try:
if self.sock_impl:
return self.sock_impl.getsockopt(level, optname)
else:
return self.pending_options.get( (level, optname), None)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def shutdown(self, how):
assert how in (SHUT_RD, SHUT_WR, SHUT_RDWR)
if not self.sock_impl:
raise error(errno.ENOTCONN, "Transport endpoint is not connected")
try:
self.sock_impl.shutdown(how)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def close(self):
try:
if self.sock_impl:
self.sock_impl.close()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _config(self):
assert self.mode in _permitted_modes
if self.sock_impl:
self.sock_impl.config(self.mode, self.timeout)
for level, optname in self.pending_options.keys():
if optname != SO_REUSEADDR:
self.sock_impl.setsockopt(level, optname, self.pending_options[ (level, optname) ])
def getchannel(self):
if not self.sock_impl:
return None
return self.sock_impl.getchannel()
def fileno(self):
if not self.sock_impl:
return None
return self.sock_impl.fileno()
def _get_jsocket(self):
return self.sock_impl.jsocket
class _tcpsocket(_nonblocking_api_mixin):
sock_impl = None
istream = None
ostream = None
local_addr = None
server = 0
def __init__(self):
_nonblocking_api_mixin.__init__(self)
def bind(self, addr):
assert not self.sock_impl
assert not self.local_addr
# Do the address format check
_get_jsockaddr(addr)
self.local_addr = addr
def listen(self, backlog):
"This signifies a server socket"
try:
assert not self.sock_impl
self.server = 1
self.sock_impl = _server_socket_impl(_get_jsockaddr(self.local_addr), backlog, self.pending_options[ (SOL_SOCKET, SO_REUSEADDR) ])
self._config()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def accept(self):
"This signifies a server socket"
try:
if not self.sock_impl:
self.listen()
assert self.server
new_sock = self.sock_impl.accept()
if not new_sock:
raise would_block_error()
cliconn = _tcpsocket()
cliconn.pending_options[ (SOL_SOCKET, SO_REUSEADDR) ] = new_sock.jsocket.getReuseAddress()
cliconn.sock_impl = new_sock
cliconn._setup()
return cliconn, new_sock.getpeername()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _do_connect(self, addr):
try:
assert not self.sock_impl
self.sock_impl = _client_socket_impl()
if self.local_addr: # Has the socket been bound to a local address?
self.sock_impl.bind(_get_jsockaddr(self.local_addr), self.pending_options[ (SOL_SOCKET, SO_REUSEADDR) ])
self._config() # Configure timeouts, etc, now that the socket exists
self.sock_impl.connect(_get_jsockaddr(addr))
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def connect(self, addr):
"This signifies a client socket"
self._do_connect(addr)
self._setup()
def connect_ex(self, addr):
"This signifies a client socket"
if not self.sock_impl:
self._do_connect(addr)
if self.sock_impl.finish_connect():
self._setup()
if self.mode == MODE_NONBLOCKING:
return errno.EISCONN
return 0
return errno.EINPROGRESS
def _setup(self):
if self.mode != MODE_NONBLOCKING:
self.istream = self.sock_impl.jsocket.getInputStream()
self.ostream = self.sock_impl.jsocket.getOutputStream()
def recv(self, n):
try:
if not self.sock_impl: raise error(errno.ENOTCONN, 'Socket is not connected')
if self.sock_impl.jchannel.isConnectionPending():
self.sock_impl.jchannel.finishConnect()
data = jarray.zeros(n, 'b')
m = self.sock_impl.read(data)
if m == -1:#indicates EOF has been reached, so we just return the empty string
return ""
elif m <= 0:
if self.mode == MODE_NONBLOCKING:
raise would_block_error()
return ""
if m < n:
data = data[:m]
return data.tostring()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def recvfrom(self, n):
return self.recv(n), None
def send(self, s):
try:
if not self.sock_impl: raise error(errno.ENOTCONN, 'Socket is not connected')
if self.sock_impl.jchannel.isConnectionPending():
self.sock_impl.jchannel.finishConnect()
numwritten = self.sock_impl.write(s)
if numwritten == 0 and self.mode == MODE_NONBLOCKING:
raise would_block_error()
return numwritten
except java.lang.Exception, jlx:
raise _map_exception(jlx)
sendall = send
def getsockname(self):
try:
if not self.sock_impl:
host, port = self.local_addr or ("", 0)
host = java.net.InetAddress.getByName(host).getHostAddress()
else:
if self.server:
host = self.sock_impl.jsocket.getInetAddress().getHostAddress()
else:
host = self.sock_impl.jsocket.getLocalAddress().getHostAddress()
port = self.sock_impl.jsocket.getLocalPort()
return (host, port)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def getpeername(self):
try:
assert self.sock_impl
assert not self.server
host = self.sock_impl.jsocket.getInetAddress().getHostAddress()
port = self.sock_impl.jsocket.getPort()
return (host, port)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def close(self):
try:
if self.istream:
self.istream.close()
if self.ostream:
self.ostream.close()
if self.sock_impl:
self.sock_impl.close()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
class _udpsocket(_nonblocking_api_mixin):
sock_impl = None
connected = False
def __init__(self):
_nonblocking_api_mixin.__init__(self)
def bind(self, addr):
try:
assert not self.sock_impl
self.sock_impl = _datagram_socket_impl(_get_jsockaddr(addr, True), self.pending_options[ (SOL_SOCKET, SO_REUSEADDR) ])
self._config()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _do_connect(self, addr):
try:
assert not self.connected, "Datagram Socket is already connected"
if not self.sock_impl:
self.sock_impl = _datagram_socket_impl()
self._config()
self.sock_impl.connect(_get_jsockaddr(addr))
self.connected = True
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def connect(self, addr):
self._do_connect(addr)
def connect_ex(self, addr):
if not self.sock_impl:
self._do_connect(addr)
return 0
def sendto(self, data, p1, p2=None):
try:
if not p2:
flags, addr = 0, p1
else:
flags, addr = 0, p2
if not self.sock_impl:
self.sock_impl = _datagram_socket_impl()
self._config()
byte_array = java.lang.String(data).getBytes('iso-8859-1')
result = self.sock_impl.sendto(byte_array, _get_jsockaddr(addr, True), flags)
return result
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def send(self, data, flags=None):
if not self.connected: raise error(errno.ENOTCONN, "Socket is not connected")
byte_array = java.lang.String(data).getBytes('iso-8859-1')
return self.sock_impl.send(byte_array, flags)
def recvfrom(self, num_bytes, flags=None):
"""
There is some disagreement as to what the behaviour should be if
a recvfrom operation is requested on an unbound socket.
See the following links for more information
http://bugs.jython.org/issue1005
http://bugs.sun.com/view_bug.do?bug_id=6621689
"""
try:
# This is the old 2.1 behaviour
#assert self.sock_impl
# This is amak's preferred interpretation
#raise error(errno.ENOTCONN, "Recvfrom on unbound udp socket meaningless operation")
# And this is the option for cpython compatibility
if not self.sock_impl:
self.sock_impl = _datagram_socket_impl()
self._config()
return self.sock_impl.recvfrom(num_bytes, flags)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def recv(self, num_bytes, flags=None):
if not self.sock_impl: raise error(errno.ENOTCONN, "Socket is not connected")
try:
return self.sock_impl.recv(num_bytes, flags)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def getsockname(self):
try:
assert self.sock_impl
host = self.sock_impl.jsocket.getLocalAddress().getHostAddress()
port = self.sock_impl.jsocket.getLocalPort()
return (host, port)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def getpeername(self):
try:
assert self.sock
host = self.sock_impl.jsocket.getInetAddress().getHostAddress()
port = self.sock_impl.jsocket.getPort()
return (host, port)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def __del__(self):
self.close()
_socketmethods = (
'bind', 'connect', 'connect_ex', 'fileno', 'listen',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'sendall', 'setblocking',
'settimeout', 'gettimeout', 'shutdown', 'getchannel')
# All the method names that must be delegated to either the real socket
# object or the _closedsocket object.
_delegate_methods = ("recv", "recvfrom", "recv_into", "recvfrom_into",
"send", "sendto")
class _closedsocket(object):
__slots__ = []
def _dummy(*args):
raise error(errno.EBADF, 'Bad file descriptor')
# All _delegate_methods must also be initialized here.
send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
__getattr__ = _dummy
_active_sockets = set()
def _closeActiveSockets():
for socket in _active_sockets.copy():
try:
socket.close()
except error:
msg = 'Problem closing socket: %s: %r' % (socket, sys.exc_info())
print >> sys.stderr, msg
class _socketobject(object):
__doc__ = _realsocket.__doc__
__slots__ = ["_sock", "__weakref__"] + list(_delegate_methods)
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
if _sock is None:
_sock = _realsocket(family, type, proto)
_sock.reference_count += 1
elif isinstance(_sock, _nonblocking_api_mixin):
_sock.reference_count += 1
self._sock = _sock
for method in _delegate_methods:
meth = getattr(_sock, method, None)
if meth:
setattr(self, method, meth)
_active_sockets.add(self)
def close(self):
try:
_active_sockets.remove(self)
except KeyError:
pass
_sock = self._sock
if isinstance(_sock, _nonblocking_api_mixin):
_sock.close_lock.acquire()
try:
_sock.reference_count -=1
if not _sock.reference_count:
_sock.close()
self._sock = _closedsocket()
dummy = self._sock._dummy
for method in _delegate_methods:
setattr(self, method, dummy)
self.send = self.recv = self.sendto = self.recvfrom = \
self._sock._dummy
finally:
_sock.close_lock.release()
#close.__doc__ = _realsocket.close.__doc__
def accept(self):
sock, addr = self._sock.accept()
return _socketobject(_sock=sock), addr
#accept.__doc__ = _realsocket.accept.__doc__
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource."""
_sock = self._sock
if not isinstance(_sock, _nonblocking_api_mixin):
return _socketobject(_sock=_sock)
_sock.close_lock.acquire()
try:
duped = _socketobject(_sock=_sock)
finally:
_sock.close_lock.release()
return duped
def makefile(self, mode='r', bufsize=-1):
"""makefile([mode[, bufsize]]) -> file object
Return a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function."""
_sock = self._sock
if not isinstance(_sock, _nonblocking_api_mixin):
return _fileobject(_sock, mode, bufsize)
_sock.close_lock.acquire()
try:
fileobject = _fileobject(_sock, mode, bufsize)
finally:
_sock.close_lock.release()
return fileobject
family = property(lambda self: self._sock.family, doc="the socket family")
type = property(lambda self: self._sock.type, doc="the socket type")
proto = property(lambda self: self._sock.proto, doc="the socket protocol")
_s = ("def %s(self, *args): return self._sock.%s(*args)\n\n"
#"%s.__doc__ = _realsocket.%s.__doc__\n")
)
for _m in _socketmethods:
#exec _s % (_m, _m, _m, _m)
exec _s % (_m, _m)
del _m, _s
socket = SocketType = _socketobject
class _fileobject(object):
"""Faux file object attached to a socket object."""
default_bufsize = 8192
name = "<socket>"
__slots__ = ["mode", "bufsize", "softspace",
# "closed" is a property, see below
"_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf",
"_close"]
def __init__(self, sock, mode='rb', bufsize=-1, close=False):
self._sock = sock
if isinstance(sock, _nonblocking_api_mixin):
sock.reference_count += 1
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
self._rbuf = "" # A string
self._wbuf = [] # A list of strings
self._close = close
def _getclosed(self):
return self._sock is None
closed = property(_getclosed, doc="True if the file is closed")
def close(self):
try:
if self._sock:
self.flush()
finally:
if self._sock:
if isinstance(self._sock, _nonblocking_api_mixin):
self._sock.reference_count -= 1
if not self._sock.reference_count or self._close:
self._sock.close()
elif self._close:
self._sock.close()
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def flush(self):
if self._wbuf:
buffer = "".join(self._wbuf)
self._wbuf = []
self._sock.sendall(buffer)
def fileno(self):
return self._sock.fileno()
def write(self, data):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def writelines(self, list):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
self._wbuf.extend(filter(None, map(str, list)))
if (self._wbufsize <= 1 or
self._get_wbuf_len() >= self._wbufsize):
self.flush()
def _get_wbuf_len(self):
buf_len = 0
for x in self._wbuf:
buf_len += len(x)
return buf_len
def read(self, size=-1):
data = self._rbuf
if size < 0:
# Read until EOF
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
if self._rbufsize <= 1:
recv_size = self.default_bufsize
else:
recv_size = self._rbufsize
while True:
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
return "".join(buffers)
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
left = size - buf_len
recv_size = max(self._rbufsize, left)
data = self._sock.recv(recv_size)
if not data:
break
buffers.append(data)
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readline(self, size=-1):
data = self._rbuf
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
assert data == ""
buffers = []
recv = self._sock.recv
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
return "".join(buffers)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
nl = data.find('\n')
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
return "".join(buffers)
else:
# Read until size bytes or \n or EOF seen, whichever comes first
nl = data.find('\n', 0, size)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
return data[:nl]
buf_len = len(data)
if buf_len >= size:
self._rbuf = data[size:]
return data[:size]
buffers = []
if data:
buffers.append(data)
self._rbuf = ""
while True:
data = self._sock.recv(self._rbufsize)
if not data:
break
buffers.append(data)
left = size - buf_len
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
self._rbuf = data[nl:]
buffers[-1] = data[:nl]
break
n = len(data)
if n >= left:
self._rbuf = data[left:]
buffers[-1] = data[:left]
break
buf_len += n
return "".join(buffers)
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
# Iterator protocols
def __iter__(self):
return self
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
# Define the SSL support
class ssl:
def __init__(self, plain_sock, keyfile=None, certfile=None):
try:
self.ssl_sock = self._make_ssl_socket(plain_sock)
self._in_buf = java.io.BufferedInputStream(self.ssl_sock.getInputStream())
self._out_buf = java.io.BufferedOutputStream(self.ssl_sock.getOutputStream())
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _make_ssl_socket(self, plain_socket, auto_close=0):
java_net_socket = plain_socket._get_jsocket()
assert isinstance(java_net_socket, java.net.Socket)
host = java_net_socket.getInetAddress().getHostAddress()
port = java_net_socket.getPort()
factory = javax.net.ssl.SSLSocketFactory.getDefault();
ssl_socket = factory.createSocket(java_net_socket, host, port, auto_close)
ssl_socket.setEnabledCipherSuites(ssl_socket.getSupportedCipherSuites())
ssl_socket.startHandshake()
return ssl_socket
def read(self, n=4096):
try:
data = jarray.zeros(n, 'b')
m = self._in_buf.read(data, 0, n)
if m <= 0:
return ""
if m < n:
data = data[:m]
return data.tostring()
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def write(self, s):
try:
self._out_buf.write(s)
self._out_buf.flush()
return len(s)
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def _get_server_cert(self):
try:
return self.ssl_sock.getSession().getPeerCertificates()[0]
except java.lang.Exception, jlx:
raise _map_exception(jlx)
def server(self):
cert = self._get_server_cert()
return cert.getSubjectDN().toString()
def issuer(self):
cert = self._get_server_cert()
return cert.getIssuerDN().toString()
_realssl = ssl
def ssl(sock, keyfile=None, certfile=None):
if hasattr(sock, "_sock"):
sock = sock._sock
return _realssl(sock, keyfile, certfile)
def test():
s = socket(AF_INET, SOCK_STREAM)
s.connect(("", 80))
s.send("GET / HTTP/1.0\r\n\r\n")
while 1:
data = s.recv(2000)
print data
if not data:
break
if __name__ == '__main__':
test()
|
tseaver/google-cloud-python
|
refs/heads/master
|
core/tests/unit/test_client.py
|
2
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import unittest
import mock
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
class Test_ClientFactoryMixin(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.client import _ClientFactoryMixin
return _ClientFactoryMixin
def test_virtual(self):
klass = self._get_target_class()
self.assertFalse("__init__" in klass.__dict__)
class TestClient(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.client import Client
return Client
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_unpickleable(self):
import pickle
CREDENTIALS = _make_credentials()
HTTP = object()
client_obj = self._make_one(credentials=CREDENTIALS, _http=HTTP)
with self.assertRaises(pickle.PicklingError):
pickle.dumps(client_obj)
def test_constructor_defaults(self):
credentials = _make_credentials()
patch = mock.patch("google.auth.default", return_value=(credentials, None))
with patch as default:
client_obj = self._make_one()
self.assertIs(client_obj._credentials, credentials)
self.assertIsNone(client_obj._http_internal)
default.assert_called_once_with()
def test_constructor_explicit(self):
credentials = _make_credentials()
http = mock.sentinel.http
client_obj = self._make_one(credentials=credentials, _http=http)
self.assertIs(client_obj._credentials, credentials)
self.assertIs(client_obj._http_internal, http)
def test_constructor_bad_credentials(self):
credentials = mock.sentinel.credentials
with self.assertRaises(ValueError):
self._make_one(credentials=credentials)
def test_from_service_account_json(self):
from google.cloud import _helpers
klass = self._get_target_class()
# Mock both the file opening and the credentials constructor.
info = {"dummy": "value", "valid": "json"}
json_fi = io.StringIO(_helpers._bytes_to_unicode(json.dumps(info)))
file_open_patch = mock.patch("io.open", return_value=json_fi)
constructor_patch = mock.patch(
"google.oauth2.service_account.Credentials." "from_service_account_info",
return_value=_make_credentials(),
)
with file_open_patch as file_open:
with constructor_patch as constructor:
client_obj = klass.from_service_account_json(mock.sentinel.filename)
self.assertIs(client_obj._credentials, constructor.return_value)
self.assertIsNone(client_obj._http_internal)
# Check that mocks were called as expected.
file_open.assert_called_once_with(mock.sentinel.filename, "r", encoding="utf-8")
constructor.assert_called_once_with(info)
def test_from_service_account_json_bad_args(self):
KLASS = self._get_target_class()
with self.assertRaises(TypeError):
KLASS.from_service_account_json(
mock.sentinel.filename, credentials=mock.sentinel.credentials
)
def test__http_property_existing(self):
credentials = _make_credentials()
http = object()
client = self._make_one(credentials=credentials, _http=http)
self.assertIs(client._http_internal, http)
self.assertIs(client._http, http)
def test__http_property_new(self):
from google.cloud.client import _CREDENTIALS_REFRESH_TIMEOUT
credentials = _make_credentials()
client = self._make_one(credentials=credentials)
self.assertIsNone(client._http_internal)
authorized_session_patch = mock.patch(
"google.auth.transport.requests.AuthorizedSession",
return_value=mock.sentinel.http,
)
with authorized_session_patch as AuthorizedSession:
self.assertIs(client._http, mock.sentinel.http)
# Check the mock.
AuthorizedSession.assert_called_once_with(credentials, refresh_timeout=_CREDENTIALS_REFRESH_TIMEOUT)
# Make sure the cached value is used on subsequent access.
self.assertIs(client._http_internal, mock.sentinel.http)
self.assertIs(client._http, mock.sentinel.http)
self.assertEqual(AuthorizedSession.call_count, 1)
class TestClientWithProject(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.client import ClientWithProject
return ClientWithProject
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_constructor_defaults(self):
credentials = _make_credentials()
patch1 = mock.patch("google.auth.default", return_value=(credentials, None))
project = "prahj-ekt"
patch2 = mock.patch(
"google.cloud.client._determine_default_project", return_value=project
)
with patch1 as default:
with patch2 as _determine_default_project:
client_obj = self._make_one()
self.assertEqual(client_obj.project, project)
self.assertIs(client_obj._credentials, credentials)
self.assertIsNone(client_obj._http_internal)
default.assert_called_once_with()
_determine_default_project.assert_called_once_with(None)
def test_constructor_missing_project(self):
from google.cloud._testing import _Monkey
from google.cloud import client
FUNC_CALLS = []
def mock_determine_proj(project):
FUNC_CALLS.append((project, "_determine_default_project"))
return None
with _Monkey(client, _determine_default_project=mock_determine_proj):
self.assertRaises(EnvironmentError, self._make_one)
self.assertEqual(FUNC_CALLS, [(None, "_determine_default_project")])
def test_constructor_w_invalid_project(self):
CREDENTIALS = _make_credentials()
HTTP = object()
with self.assertRaises(ValueError):
self._make_one(project=object(), credentials=CREDENTIALS, _http=HTTP)
def _explicit_ctor_helper(self, project):
import six
CREDENTIALS = _make_credentials()
HTTP = object()
client_obj = self._make_one(
project=project, credentials=CREDENTIALS, _http=HTTP
)
if isinstance(project, six.binary_type):
self.assertEqual(client_obj.project, project.decode("utf-8"))
else:
self.assertEqual(client_obj.project, project)
self.assertIs(client_obj._credentials, CREDENTIALS)
self.assertIs(client_obj._http_internal, HTTP)
def test_constructor_explicit_bytes(self):
PROJECT = b"PROJECT"
self._explicit_ctor_helper(PROJECT)
def test_constructor_explicit_unicode(self):
PROJECT = u"PROJECT"
self._explicit_ctor_helper(PROJECT)
def _from_service_account_json_helper(self, project=None):
from google.cloud import _helpers
klass = self._get_target_class()
info = {"dummy": "value", "valid": "json"}
if project is None:
expected_project = "eye-d-of-project"
else:
expected_project = project
info["project_id"] = expected_project
# Mock both the file opening and the credentials constructor.
json_fi = io.StringIO(_helpers._bytes_to_unicode(json.dumps(info)))
file_open_patch = mock.patch("io.open", return_value=json_fi)
constructor_patch = mock.patch(
"google.oauth2.service_account.Credentials." "from_service_account_info",
return_value=_make_credentials(),
)
with file_open_patch as file_open:
with constructor_patch as constructor:
kwargs = {}
if project is not None:
kwargs["project"] = project
client_obj = klass.from_service_account_json(
mock.sentinel.filename, **kwargs
)
self.assertIs(client_obj._credentials, constructor.return_value)
self.assertIsNone(client_obj._http_internal)
self.assertEqual(client_obj.project, expected_project)
# Check that mocks were called as expected.
file_open.assert_called_once_with(mock.sentinel.filename, "r", encoding="utf-8")
constructor.assert_called_once_with(info)
def test_from_service_account_json(self):
self._from_service_account_json_helper()
def test_from_service_account_json_project_set(self):
self._from_service_account_json_helper(project="prah-jekt")
|
leppa/home-assistant
|
refs/heads/dev
|
homeassistant/components/caldav/__init__.py
|
36
|
"""The caldav component."""
|
jcshen007/cloudstack
|
refs/heads/master
|
systemvm/test/python/TestCsRoute.py
|
3
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from cs.CsRoute import CsRoute
import merge
class TestCsRoute(unittest.TestCase):
def setUp(self):
merge.DataBag.DPATH = "."
def test_init(self):
csroute = CsRoute()
self.assertIsInstance(csroute, CsRoute)
def test_defaultroute_exists(self):
csroute = CsRoute()
self.assertFalse(csroute.defaultroute_exists())
def test_add_defaultroute(self):
csroute = CsRoute()
self.assertTrue(csroute.add_defaultroute("192.168.1.1"))
def test_get_tablename(self):
csroute = CsRoute()
name = "eth1"
self.assertEqual("Table_eth1", csroute.get_tablename(name))
if __name__ == '__main__':
unittest.main()
|
Onager/l2tdevtools
|
refs/heads/master
|
l2tdevtools/py2to3.py
|
2
|
# -*- coding: utf-8 -*-
"""The Python 2 and 3 compatible type definitions."""
import sys
# pylint: disable=invalid-name,undefined-variable
if sys.version_info[0] < 3:
BYTES_TYPE = str
INTEGER_TYPES = (int, long)
STRING_TYPES = (basestring, )
UNICODE_TYPE = unicode
else:
BYTES_TYPE = bytes
INTEGER_TYPES = (int, )
STRING_TYPES = (str, )
UNICODE_TYPE = str
|
defionscode/ansible-modules-extras
|
refs/heads/devel
|
cloud/cloudstack/cs_instance.py
|
11
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_instance
short_description: Manages instances and virtual machines on Apache CloudStack based clouds.
description:
- Deploy, start, update, scale, restart, restore, stop and destroy instances.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Host name of the instance. C(name) can only contain ASCII letters.
- Name will be generated (UUID) by CloudStack if not specified and can not be changed afterwards.
- Either C(name) or C(display_name) is required.
required: false
default: null
display_name:
description:
- Custom display name of the instances.
- Display name will be set to C(name) if not specified.
- Either C(name) or C(display_name) is required.
required: false
default: null
group:
description:
- Group in where the new instance should be in.
required: false
default: null
state:
description:
- State of the instance.
required: false
default: 'present'
choices: [ 'deployed', 'started', 'stopped', 'restarted', 'restored', 'destroyed', 'expunged', 'present', 'absent' ]
service_offering:
description:
- Name or id of the service offering of the new instance.
- If not set, first found service offering is used.
required: false
default: null
cpu:
description:
- The number of CPUs to allocate to the instance, used with custom service offerings
required: false
default: null
cpu_speed:
description:
- The clock speed/shares allocated to the instance, used with custom service offerings
required: false
default: null
memory:
description:
- The memory allocated to the instance, used with custom service offerings
required: false
default: null
template:
description:
- Name or id of the template to be used for creating the new instance.
- Required when using C(state=present).
- Mutually exclusive with C(ISO) option.
required: false
default: null
iso:
description:
- Name or id of the ISO to be used for creating the new instance.
- Required when using C(state=present).
- Mutually exclusive with C(template) option.
required: false
default: null
hypervisor:
description:
- Name the hypervisor to be used for creating the new instance.
- Relevant when using C(state=present), but only considered if not set on ISO/template.
- If not set or found on ISO/template, first found hypervisor will be used.
required: false
default: null
choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
keyboard:
description:
- Keyboard device type for the instance.
required: false
default: null
choices: [ 'de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us' ]
networks:
description:
- List of networks to use for the new instance.
required: false
default: []
aliases: [ 'network' ]
ip_address:
description:
- IPv4 address for default instance's network during creation.
required: false
default: null
ip6_address:
description:
- IPv6 address for default instance's network.
required: false
default: null
ip_to_networks:
description:
- "List of mappings in the form {'network': NetworkName, 'ip': 1.2.3.4}"
- Mutually exclusive with C(networks) option.
required: false
default: null
aliases: [ 'ip_to_network' ]
disk_offering:
description:
- Name of the disk offering to be used.
required: false
default: null
disk_size:
description:
- Disk size in GByte required if deploying instance from ISO.
required: false
default: null
root_disk_size:
description:
- Root disk size in GByte required if deploying instance with KVM hypervisor and want resize the root disk size at startup (need CloudStack >= 4.4, cloud-initramfs-growroot installed and enabled in the template)
required: false
default: null
security_groups:
description:
- List of security groups the instance to be applied to.
required: false
default: null
aliases: [ 'security_group' ]
domain:
description:
- Domain the instance is related to.
required: false
default: null
account:
description:
- Account the instance is related to.
required: false
default: null
project:
description:
- Name of the project the instance to be deployed in.
required: false
default: null
zone:
description:
- Name of the zone in which the instance shoud be deployed.
- If not set, default zone is used.
required: false
default: null
ssh_key:
description:
- Name of the SSH key to be deployed on the new instance.
required: false
default: null
affinity_groups:
description:
- Affinity groups names to be applied to the new instance.
required: false
default: []
aliases: [ 'affinity_group' ]
user_data:
description:
- Optional data (ASCII) that can be sent to the instance upon a successful deployment.
- The data will be automatically base64 encoded.
- Consider switching to HTTP_POST by using C(CLOUDSTACK_METHOD=post) to increase the HTTP_GET size limit of 2KB to 32 KB.
required: false
default: null
force:
description:
- Force stop/start the instance if required to apply changes, otherwise a running instance will not be changed.
required: false
default: false
tags:
description:
- List of tags. Tags are a list of dictionaries having keys C(key) and C(value).
- "If you want to delete all tags, set a empty list e.g. C(tags: [])."
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Create a instance from an ISO
# NOTE: Names of offerings and ISOs depending on the CloudStack configuration.
- local_action:
module: cs_instance
name: web-vm-1
iso: Linux Debian 7 64-bit
hypervisor: VMware
project: Integration
zone: ch-zrh-ix-01
service_offering: 1cpu_1gb
disk_offering: PerfPlus Storage
disk_size: 20
networks:
- Server Integration
- Sync Integration
- Storage Integration
# For changing a running instance, use the 'force' parameter
- local_action:
module: cs_instance
name: web-vm-1
display_name: web-vm-01.example.com
iso: Linux Debian 7 64-bit
service_offering: 2cpu_2gb
force: yes
# Create or update a instance on Exoscale's public cloud using display_name.
# Note: user_data can be used to kickstart the instance using cloud-init yaml config.
- local_action:
module: cs_instance
display_name: web-vm-1
template: Linux Debian 7 64-bit
service_offering: Tiny
ssh_key: john@example.com
tags:
- { key: admin, value: john }
- { key: foo, value: bar }
user_data: |
#cloud-config
packages:
- nginx
# Create an instance with multiple interfaces specifying the IP addresses
- local_action:
module: cs_instance
name: web-vm-1
template: Linux Debian 7 64-bit
service_offering: Tiny
ip_to_networks:
- {'network': NetworkA, 'ip': '10.1.1.1'}
- {'network': NetworkB, 'ip': '192.168.1.1'}
# Ensure an instance is stopped
- local_action: cs_instance name=web-vm-1 state=stopped
# Ensure an instance is running
- local_action: cs_instance name=web-vm-1 state=started
# Remove an instance
- local_action: cs_instance name=web-vm-1 state=absent
'''
RETURN = '''
---
id:
description: UUID of the instance.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the instance.
returned: success
type: string
sample: web-01
display_name:
description: Display name of the instance.
returned: success
type: string
sample: web-01
group:
description: Group name of the instance is related.
returned: success
type: string
sample: web
created:
description: Date of the instance was created.
returned: success
type: string
sample: 2014-12-01T14:57:57+0100
password_enabled:
description: True if password setting is enabled.
returned: success
type: boolean
sample: true
password:
description: The password of the instance if exists.
returned: success
type: string
sample: Ge2oe7Do
ssh_key:
description: Name of SSH key deployed to instance.
returned: success
type: string
sample: key@work
domain:
description: Domain the instance is related to.
returned: success
type: string
sample: example domain
account:
description: Account the instance is related to.
returned: success
type: string
sample: example account
project:
description: Name of project the instance is related to.
returned: success
type: string
sample: Production
default_ip:
description: Default IP address of the instance.
returned: success
type: string
sample: 10.23.37.42
public_ip:
description: Public IP address with instance via static NAT rule.
returned: success
type: string
sample: 1.2.3.4
iso:
description: Name of ISO the instance was deployed with.
returned: success
type: string
sample: Debian-8-64bit
template:
description: Name of template the instance was deployed with.
returned: success
type: string
sample: Debian-8-64bit
service_offering:
description: Name of the service offering the instance has.
returned: success
type: string
sample: 2cpu_2gb
zone:
description: Name of zone the instance is in.
returned: success
type: string
sample: ch-gva-2
state:
description: State of the instance.
returned: success
type: string
sample: Running
security_groups:
description: Security groups the instance is in.
returned: success
type: list
sample: '[ "default" ]'
affinity_groups:
description: Affinity groups the instance is in.
returned: success
type: list
sample: '[ "webservers" ]'
tags:
description: List of resource tags associated with the instance.
returned: success
type: dict
sample: '[ { "key": "foo", "value": "bar" } ]'
hypervisor:
description: Hypervisor related to this instance.
returned: success
type: string
sample: KVM
instance_name:
description: Internal name of the instance (ROOT admin only).
returned: success
type: string
sample: i-44-3992-VM
'''
import base64
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackInstance(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackInstance, self).__init__(module)
self.returns = {
'group': 'group',
'hypervisor': 'hypervisor',
'instancename': 'instance_name',
'publicip': 'public_ip',
'passwordenabled': 'password_enabled',
'password': 'password',
'serviceofferingname': 'service_offering',
'isoname': 'iso',
'templatename': 'template',
'keypair': 'ssh_key',
}
self.instance = None
self.template = None
self.iso = None
def get_service_offering_id(self):
service_offering = self.module.params.get('service_offering')
service_offerings = self.cs.listServiceOfferings()
if service_offerings:
if not service_offering:
return service_offerings['serviceoffering'][0]['id']
for s in service_offerings['serviceoffering']:
if service_offering in [ s['name'], s['id'] ]:
return s['id']
self.module.fail_json(msg="Service offering '%s' not found" % service_offering)
def get_template_or_iso(self, key=None):
template = self.module.params.get('template')
iso = self.module.params.get('iso')
if not template and not iso:
return None
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['zoneid'] = self.get_zone(key='id')
args['isrecursive'] = True
if template:
if self.template:
return self._get_by_key(key, self.template)
args['templatefilter'] = 'executable'
templates = self.cs.listTemplates(**args)
if templates:
for t in templates['template']:
if template in [ t['displaytext'], t['name'], t['id'] ]:
self.template = t
return self._get_by_key(key, self.template)
self.module.fail_json(msg="Template '%s' not found" % template)
elif iso:
if self.iso:
return self._get_by_key(key, self.iso)
args['isofilter'] = 'executable'
isos = self.cs.listIsos(**args)
if isos:
for i in isos['iso']:
if iso in [ i['displaytext'], i['name'], i['id'] ]:
self.iso = i
return self._get_by_key(key, self.iso)
self.module.fail_json(msg="ISO '%s' not found" % iso)
def get_disk_offering_id(self):
disk_offering = self.module.params.get('disk_offering')
if not disk_offering:
return None
disk_offerings = self.cs.listDiskOfferings()
if disk_offerings:
for d in disk_offerings['diskoffering']:
if disk_offering in [ d['displaytext'], d['name'], d['id'] ]:
return d['id']
self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering)
def get_instance(self):
instance = self.instance
if not instance:
instance_name = self.get_or_fallback('name', 'display_name')
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
# Do not pass zoneid, as the instance name must be unique across zones.
instances = self.cs.listVirtualMachines(**args)
if instances:
for v in instances['virtualmachine']:
if instance_name.lower() in [ v['name'].lower(), v['displayname'].lower(), v['id'] ]:
self.instance = v
break
return self.instance
def get_iptonetwork_mappings(self):
network_mappings = self.module.params.get('ip_to_networks')
if network_mappings is None:
return
if network_mappings and self.module.params.get('networks'):
self.module.fail_json(msg="networks and ip_to_networks are mutually exclusive.")
network_names = [n['network'] for n in network_mappings]
ids = self.get_network_ids(network_names)
res = []
for i, data in enumerate(network_mappings):
res.append({'networkid': ids[i], 'ip': data['ip']})
return res
def security_groups_has_changed(self):
security_groups = self.module.params.get('security_groups')
if security_groups is None:
return False
security_groups = [s.lower() for s in security_groups]
instance_security_groups = self.instance.get('securitygroup',[])
instance_security_group_names = []
for instance_security_group in instance_security_groups:
if instance_security_group['name'].lower() not in security_groups:
return True
else:
instance_security_group_names.append(instance_security_group['name'].lower())
for security_group in security_groups:
if security_group not in instance_security_group_names:
return True
return False
def get_network_ids(self, network_names=None):
if network_names is None:
network_names = self.module.params.get('networks')
if not network_names:
return None
args = {}
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['zoneid'] = self.get_zone(key='id')
networks = self.cs.listNetworks(**args)
if not networks:
self.module.fail_json(msg="No networks available")
network_ids = []
network_displaytexts = []
for network_name in network_names:
for n in networks['network']:
if network_name in [ n['displaytext'], n['name'], n['id'] ]:
network_ids.append(n['id'])
network_displaytexts.append(n['name'])
break
if len(network_ids) != len(network_names):
self.module.fail_json(msg="Could not find all networks, networks list found: %s" % network_displaytexts)
return network_ids
def present_instance(self, start_vm=True):
instance = self.get_instance()
if not instance:
instance = self.deploy_instance(start_vm=start_vm)
else:
instance = self.recover_instance(instance=instance)
instance = self.update_instance(instance=instance, start_vm=start_vm)
# In check mode, we do not necessarely have an instance
if instance:
instance = self.ensure_tags(resource=instance, resource_type='UserVm')
# refresh instance data
self.instance = instance
return instance
def get_user_data(self):
user_data = self.module.params.get('user_data')
if user_data is not None:
user_data = base64.b64encode(str(user_data))
return user_data
def get_details(self):
res = None
cpu = self.module.params.get('cpu')
cpu_speed = self.module.params.get('cpu_speed')
memory = self.module.params.get('memory')
if all([cpu, cpu_speed, memory]):
res = [{
'cpuNumber': cpu,
'cpuSpeed': cpu_speed,
'memory': memory,
}]
return res
def deploy_instance(self, start_vm=True):
self.result['changed'] = True
networkids = self.get_network_ids()
if networkids is not None:
networkids = ','.join(networkids)
args = {}
args['templateid'] = self.get_template_or_iso(key='id')
if not args['templateid']:
self.module.fail_json(msg="Template or ISO is required.")
args['zoneid'] = self.get_zone(key='id')
args['serviceofferingid'] = self.get_service_offering_id()
args['account'] = self.get_account(key='name')
args['domainid'] = self.get_domain(key='id')
args['projectid'] = self.get_project(key='id')
args['diskofferingid'] = self.get_disk_offering_id()
args['networkids'] = networkids
args['iptonetworklist'] = self.get_iptonetwork_mappings()
args['userdata'] = self.get_user_data()
args['keyboard'] = self.module.params.get('keyboard')
args['ipaddress'] = self.module.params.get('ip_address')
args['ip6address'] = self.module.params.get('ip6_address')
args['name'] = self.module.params.get('name')
args['displayname'] = self.get_or_fallback('display_name', 'name')
args['group'] = self.module.params.get('group')
args['keypair'] = self.module.params.get('ssh_key')
args['size'] = self.module.params.get('disk_size')
args['startvm'] = start_vm
args['rootdisksize'] = self.module.params.get('root_disk_size')
args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups'))
args['details'] = self.get_details()
security_groups = self.module.params.get('security_groups')
if security_groups is not None:
args['securitygroupnames'] = ','.join(security_groups)
template_iso = self.get_template_or_iso()
if 'hypervisor' not in template_iso:
args['hypervisor'] = self.get_hypervisor()
instance = None
if not self.module.check_mode:
instance = self.cs.deployVirtualMachine(**args)
if 'errortext' in instance:
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self._poll_job(instance, 'virtualmachine')
return instance
def update_instance(self, instance, start_vm=True):
# Service offering data
args_service_offering = {}
args_service_offering['id'] = instance['id']
if self.module.params.get('service_offering'):
args_service_offering['serviceofferingid'] = self.get_service_offering_id()
service_offering_changed = self._has_changed(args_service_offering, instance)
# Instance data
args_instance_update = {}
args_instance_update['id'] = instance['id']
args_instance_update['userdata'] = self.get_user_data()
args_instance_update['ostypeid'] = self.get_os_type(key='id')
if self.module.params.get('group'):
args_instance_update['group'] = self.module.params.get('group')
if self.module.params.get('display_name'):
args_instance_update['displayname'] = self.module.params.get('display_name')
instance_changed = self._has_changed(args_instance_update, instance)
# SSH key data
args_ssh_key = {}
args_ssh_key['id'] = instance['id']
args_ssh_key['projectid'] = self.get_project(key='id')
if self.module.params.get('ssh_key'):
args_ssh_key['keypair'] = self.module.params.get('ssh_key')
ssh_key_changed = self._has_changed(args_ssh_key, instance)
security_groups_changed = self.security_groups_has_changed()
changed = [
service_offering_changed,
instance_changed,
security_groups_changed,
ssh_key_changed,
]
if True in changed:
force = self.module.params.get('force')
instance_state = instance['state'].lower()
if instance_state == 'stopped' or force:
self.result['changed'] = True
if not self.module.check_mode:
# Ensure VM has stopped
instance = self.stop_instance()
instance = self._poll_job(instance, 'virtualmachine')
self.instance = instance
# Change service offering
if service_offering_changed:
res = self.cs.changeServiceForVirtualMachine(**args_service_offering)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
instance = res['virtualmachine']
self.instance = instance
# Update VM
if instance_changed or security_groups_changed:
if security_groups_changed:
args_instance_update['securitygroupnames'] = ','.join(self.module.params.get('security_groups'))
res = self.cs.updateVirtualMachine(**args_instance_update)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
instance = res['virtualmachine']
self.instance = instance
# Reset SSH key
if ssh_key_changed:
instance = self.cs.resetSSHKeyForVirtualMachine(**args_ssh_key)
if 'errortext' in instance:
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
instance = self._poll_job(instance, 'virtualmachine')
self.instance = instance
# Start VM again if it was running before
if instance_state == 'running' and start_vm:
instance = self.start_instance()
return instance
def recover_instance(self, instance):
if instance['state'].lower() in [ 'destroying', 'destroyed' ]:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.recoverVirtualMachine(id=instance['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
instance = res['virtualmachine']
return instance
def absent_instance(self):
instance = self.get_instance()
if instance:
if instance['state'].lower() not in ['expunging', 'destroying', 'destroyed']:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.destroyVirtualMachine(id=instance['id'])
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self._poll_job(res, 'virtualmachine')
return instance
def expunge_instance(self):
instance = self.get_instance()
if instance:
res = {}
if instance['state'].lower() in [ 'destroying', 'destroyed' ]:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True)
elif instance['state'].lower() not in [ 'expunging' ]:
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.destroyVirtualMachine(id=instance['id'], expunge=True)
if res and 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
res = self._poll_job(res, 'virtualmachine')
return instance
def stop_instance(self):
instance = self.get_instance()
# in check mode intance may not be instanciated
if instance:
if instance['state'].lower() in ['stopping', 'stopped']:
return instance
if instance['state'].lower() in ['starting', 'running']:
self.result['changed'] = True
if not self.module.check_mode:
instance = self.cs.stopVirtualMachine(id=instance['id'])
if 'errortext' in instance:
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self._poll_job(instance, 'virtualmachine')
return instance
def start_instance(self):
instance = self.get_instance()
# in check mode intance may not be instanciated
if instance:
if instance['state'].lower() in ['starting', 'running']:
return instance
if instance['state'].lower() in ['stopped', 'stopping']:
self.result['changed'] = True
if not self.module.check_mode:
instance = self.cs.startVirtualMachine(id=instance['id'])
if 'errortext' in instance:
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self._poll_job(instance, 'virtualmachine')
return instance
def restart_instance(self):
instance = self.get_instance()
# in check mode intance may not be instanciated
if instance:
if instance['state'].lower() in [ 'running', 'starting' ]:
self.result['changed'] = True
if not self.module.check_mode:
instance = self.cs.rebootVirtualMachine(id=instance['id'])
if 'errortext' in instance:
self.module.fail_json(msg="Failed: '%s'" % instance['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self._poll_job(instance, 'virtualmachine')
elif instance['state'].lower() in [ 'stopping', 'stopped' ]:
instance = self.start_instance()
return instance
def restore_instance(self):
instance = self.get_instance()
self.result['changed'] = True
# in check mode intance may not be instanciated
if instance:
args = {}
args['templateid'] = self.get_template_or_iso(key='id')
args['virtualmachineid'] = instance['id']
res = self.cs.restoreVirtualMachine(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
instance = self._poll_job(res, 'virtualmachine')
return instance
def get_result(self, instance):
super(AnsibleCloudStackInstance, self).get_result(instance)
if instance:
if 'securitygroup' in instance:
security_groups = []
for securitygroup in instance['securitygroup']:
security_groups.append(securitygroup['name'])
self.result['security_groups'] = security_groups
if 'affinitygroup' in instance:
affinity_groups = []
for affinitygroup in instance['affinitygroup']:
affinity_groups.append(affinitygroup['name'])
self.result['affinity_groups'] = affinity_groups
if 'nic' in instance:
for nic in instance['nic']:
if nic['isdefault'] and 'ipaddress' in nic:
self.result['default_ip'] = nic['ipaddress']
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(default=None),
display_name = dict(default=None),
group = dict(default=None),
state = dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'restored', 'absent', 'destroyed', 'expunged'], default='present'),
service_offering = dict(default=None),
cpu = dict(default=None, type='int'),
cpu_speed = dict(default=None, type='int'),
memory = dict(default=None, type='int'),
template = dict(default=None),
iso = dict(default=None),
networks = dict(type='list', aliases=[ 'network' ], default=None),
ip_to_networks = dict(type='list', aliases=['ip_to_network'], default=None),
ip_address = dict(defaul=None),
ip6_address = dict(defaul=None),
disk_offering = dict(default=None),
disk_size = dict(type='int', default=None),
root_disk_size = dict(type='int', default=None),
keyboard = dict(choices=['de', 'de-ch', 'es', 'fi', 'fr', 'fr-be', 'fr-ch', 'is', 'it', 'jp', 'nl-be', 'no', 'pt', 'uk', 'us'], default=None),
hypervisor = dict(choices=CS_HYPERVISORS, default=None),
security_groups = dict(type='list', aliases=[ 'security_group' ], default=None),
affinity_groups = dict(type='list', aliases=[ 'affinity_group' ], default=[]),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
user_data = dict(default=None),
zone = dict(default=None),
ssh_key = dict(default=None),
force = dict(type='bool', default=False),
tags = dict(type='list', aliases=[ 'tag' ], default=None),
poll_async = dict(type='bool', default=True),
))
required_together = cs_required_together()
required_together.extend([
['cpu', 'cpu_speed', 'memory'],
])
module = AnsibleModule(
argument_spec=argument_spec,
required_together=required_together,
required_one_of = (
['display_name', 'name'],
),
mutually_exclusive = (
['template', 'iso'],
),
supports_check_mode=True
)
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
try:
acs_instance = AnsibleCloudStackInstance(module)
state = module.params.get('state')
if state in ['absent', 'destroyed']:
instance = acs_instance.absent_instance()
elif state in ['expunged']:
instance = acs_instance.expunge_instance()
elif state in ['restored']:
acs_instance.present_instance()
instance = acs_instance.restore_instance()
elif state in ['present', 'deployed']:
instance = acs_instance.present_instance()
elif state in ['stopped']:
acs_instance.present_instance(start_vm=False)
instance = acs_instance.stop_instance()
elif state in ['started']:
acs_instance.present_instance()
instance = acs_instance.start_instance()
elif state in ['restarted']:
acs_instance.present_instance()
instance = acs_instance.restart_instance()
if instance and 'state' in instance and instance['state'].lower() == 'error':
module.fail_json(msg="Instance named '%s' in error state." % module.params.get('name'))
result = acs_instance.get_result(instance)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
ahmetdaglarbas/e-commerce
|
refs/heads/tez
|
tests/integration/customer/test_custom_user_model.py
|
39
|
from django.test import TestCase
from oscar.apps.customer.forms import ProfileForm
from oscar.core.compat import get_user_model, existing_user_fields
class TestACustomUserModel(TestCase):
def setUp(self):
self.user_klass = get_user_model()
def test_can_be_created_without_error(self):
try:
self.user_klass.objects.create_user('_', 'a@a.com', 'pa55w0rd')
except Exception as e:
self.fail("Unable to create user model: %s" % e)
def test_extra_field_is_accessible(self):
self.assertTrue('extra_field' in existing_user_fields(['extra_field']))
self.assertTrue(hasattr(self.user_klass(), 'extra_field'))
def test_profile_form_doesnt_expose_extra_field(self):
form = ProfileForm(self.user_klass())
expected_fields = set(['first_name', 'last_name', 'email'])
self.assertTrue(expected_fields == set(form.fields))
|
WafaaT/spark-tk
|
refs/heads/master
|
python/sparktk/graph/ops/__init__.py
|
137
|
# vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparktk.loggers import log_load; log_load(__name__); del log_load
|
ashutrix03/inteygrate_flaskapp-master
|
refs/heads/master
|
yowsup/layers/logger/layer.py
|
67
|
from yowsup.layers import YowLayer
import logging
logger = logging.getLogger(__name__)
class YowLoggerLayer(YowLayer):
def send(self, data):
ldata = list(data) if type(data) is bytearray else data
logger.debug("tx:\n%s" % ldata)
self.toLower(data)
def receive(self, data):
ldata = list(data) if type(data) is bytearray else data
logger.debug("rx:\n%s" % ldata)
self.toUpper(data)
def __str__(self):
return "Logger Layer"
|
tuxfux-hlp-notes/python-batches
|
refs/heads/master
|
archieves/batch-62/files/myenv/lib/python2.7/site-packages/pip/_vendor/html5lib/_trie/__init__.py
|
456
|
from __future__ import absolute_import, division, unicode_literals
from .py import Trie as PyTrie
Trie = PyTrie
# pylint:disable=wrong-import-position
try:
from .datrie import Trie as DATrie
except ImportError:
pass
else:
Trie = DATrie
# pylint:enable=wrong-import-position
|
strands-project/strands_social
|
refs/heads/hydro-devel
|
strands_tweets/scripts/tweet_test.py
|
3
|
#!/usr/bin/env python
import roslib; roslib.load_manifest('strands_tweets')
import sys
import rospy
from strands_tweets.srv import *
def tweet_client(text):
rospy.wait_for_service('/strands_tweets/Tweet')
try:
tweet = rospy.ServiceProxy('/strands_tweets/Tweet', Tweet)
resp1 = tweet(text,True)
return resp1.result
except rospy.ServiceException, e:
print "Service call failed: %s"%e
if __name__ == "__main__":
print sys.argv
text0 = " ".join(sys.argv[1:])
#text0 = "Hello ROS World"
print "Tweeting %s"%(text0)
resp=tweet_client(text0)
print resp
|
andrewcbennett/iris
|
refs/heads/placeholder
|
lib/iris/fileformats/cf.py
|
3
|
# (C) British Crown Copyright 2010 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Provides the capability to load netCDF files and interprete them
according to the 'NetCDF Climate and Forecast (CF) Metadata Conventions'.
References:
[CF] NetCDF Climate and Forecast (CF) Metadata conventions, Version 1.5, October, 2010.
[NUG] NetCDF User's Guide, http://www.unidata.ucar.edu/software/netcdf/docs/netcdf.html
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
import six
from abc import ABCMeta, abstractmethod
from collections import Iterable, MutableMapping
import os
import re
import warnings
import netCDF4
import numpy as np
import numpy.ma as ma
import iris.util
#
# CF parse pattern common to both formula terms and measure CF variables.
#
_CF_PARSE = re.compile(r'''
\s*
(?P<lhs>[\w_]+)
\s*:\s*
(?P<rhs>[\w_]+)
\s*
''', re.VERBOSE)
# NetCDF variable attributes handled by the netCDF4 module and
# therefore automatically classed as "used" attributes.
_CF_ATTRS_IGNORE = set(['_FillValue', 'add_offset', 'missing_value', 'scale_factor', ])
#: Supported dimensionless vertical coordinate reference surface/phemomenon
#: formula terms. Ref: [CF] Appendix D.
reference_terms = dict(atmosphere_sigma_coordinate=['ps'],
atmosphere_hybrid_sigma_pressure_coordinate=['ps'],
atmosphere_hybrid_height_coordinate=['orog'],
atmosphere_sleve_coordinate=['zsurf1', 'zsurf2'],
ocean_sigma_coordinate=['eta', 'depth'],
ocean_s_coordinate=['eta', 'depth'],
ocean_sigma_z_coordinate=['eta', 'depth'],
ocean_s_coordinate_g1=['eta', 'depth'],
ocean_s_coordinate_g2=['eta', 'depth'])
# NetCDF returns a different type for strings depending on Python version.
def _is_str_dtype(var):
return ((six.PY2 and np.issubdtype(var.dtype, np.str)) or
(six.PY3 and np.issubdtype(var.dtype, np.bytes_)))
################################################################################
class CFVariable(six.with_metaclass(ABCMeta, object)):
"""Abstract base class wrapper for a CF-netCDF variable."""
#: Name of the netCDF variable attribute that identifies this
#: CF-netCDF variable.
cf_identity = None
def __init__(self, name, data):
# Accessing the list of netCDF attributes is surprisingly slow.
# Since it's used repeatedly, caching the list makes things
# quite a bit faster.
self._nc_attrs = data.ncattrs()
#: NetCDF variable name.
self.cf_name = name
#: NetCDF4 Variable data instance.
self.cf_data = data
#: Collection of CF-netCDF variables associated with this variable.
self.cf_group = None
#: CF-netCDF formula terms that his variable participates in.
self.cf_terms_by_root = {}
self.cf_attrs_reset()
@staticmethod
def _identify_common(variables, ignore, target):
if ignore is None:
ignore = []
if target is None:
target = variables
elif isinstance(target, six.string_types):
if target not in variables:
raise ValueError('Cannot identify unknown target CF-netCDF variable %r' % target)
target = {target: variables[target]}
else:
raise TypeError('Expect a target CF-netCDF variable name')
return (ignore, target)
@abstractmethod
def identify(self, variables, ignore=None, target=None, warn=True):
"""
Identify all variables that match the criterion for this CF-netCDF variable class.
Args:
* variables:
Dictionary of netCDF4.Variable instance by variable name.
Kwargs:
* ignore:
List of variable names to ignore.
* target:
Name of a single variable to check.
* warn:
Issue a warning if a missing variable is referenced.
Returns:
Dictionary of CFVariable instance by variable name.
"""
pass
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
result = set(self.dimensions).issubset(cf_variable.dimensions)
return result
def __eq__(self, other):
# CF variable names are unique.
return self.cf_name == other.cf_name
def __ne__(self, other):
# CF variable names are unique.
return self.cf_name != other.cf_name
def __hash__(self):
# CF variable names are unique.
return hash(self.cf_name)
def __getattr__(self, name):
# Accessing netCDF attributes is surprisingly slow. Since
# they're often read repeatedly, caching the values makes things
# quite a bit faster.
if name in self._nc_attrs:
self._cf_attrs.add(name)
value = getattr(self.cf_data, name)
setattr(self, name, value)
return value
def __getitem__(self, key):
return self.cf_data.__getitem__(key)
def __len__(self):
return self.cf_data.__len__()
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.cf_name, self.cf_data)
def cf_attrs(self):
"""Return a list of all attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr))
for attr in sorted(self._nc_attrs))
def cf_attrs_ignored(self):
"""Return a list of all ignored attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr)) for attr in
sorted(set(self._nc_attrs) & _CF_ATTRS_IGNORE))
def cf_attrs_used(self):
"""Return a list of all accessed attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr)) for attr in
sorted(self._cf_attrs))
def cf_attrs_unused(self):
"""Return a list of all non-accessed attribute name and value pairs of the CF-netCDF variable."""
return tuple((attr, self.getncattr(attr)) for attr in
sorted(set(self._nc_attrs) - self._cf_attrs))
def cf_attrs_reset(self):
"""Reset the history of accessed attribute names of the CF-netCDF variable."""
self._cf_attrs = set([item[0] for item in self.cf_attrs_ignored()])
def add_formula_term(self, root, term):
"""
Register the participation of this CF-netCDF variable in a CF-netCDF formula term.
Args:
* root (string):
The name of CF-netCDF variable that defines the CF-netCDF formula_terms attribute.
* term (string):
The associated term name of this variable in the formula_terms definition.
Returns:
None.
"""
self.cf_terms_by_root[root] = term
def has_formula_terms(self):
"""
Determine whether this CF-netCDF variable participates in a CF-netcdf formula term.
Returns:
Boolean.
"""
return bool(self.cf_terms_by_root)
class CFAncillaryDataVariable(CFVariable):
"""
A CF-netCDF ancillary data variable is a variable that provides metadata
about the individual values of another data variable.
Identified by the CF-netCDF variable attribute 'ancillary_variables'.
Ref: [CF] Section 3.4. Ancillary Data.
"""
cf_identity = 'ancillary_variables'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF ancillary data variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for ancillary data variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF ancillary data variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFAncillaryDataVariable(name, variables[name])
return result
class CFAuxiliaryCoordinateVariable(CFVariable):
"""
A CF-netCDF auxiliary coordinate variable is any netCDF variable that contains
coordinate data, but is not a CF-netCDF coordinate variable by definition.
There is no relationship between the name of a CF-netCDF auxiliary coordinate
variable and the name(s) of its dimension(s).
Identified by the CF-netCDF variable attribute 'coordinates'.
Also see :class:`iris.fileformats.cf.CFLabelVariable`.
Ref: [CF] Chapter 5. Coordinate Systems.
[CF] Section 6.2. Alternative Coordinates.
"""
cf_identity = 'coordinates'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF auxiliary coordinate variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for auxiliary coordinate variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF auxiliary coordinate variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
# Restrict to non-string type i.e. not a CFLabelVariable.
if not _is_str_dtype(variables[name]):
result[name] = CFAuxiliaryCoordinateVariable(name, variables[name])
return result
class CFBoundaryVariable(CFVariable):
"""
A CF-netCDF boundary variable is associated with a CF-netCDF variable that contains
coordinate data. When a data value provides information about conditions in a cell
occupying a region of space/time or some other dimension, the boundary variable
provides a description of cell extent.
A CF-netCDF boundary variable will have one more dimension than its associated
CF-netCDF coordinate variable or CF-netCDF auxiliary coordinate variable.
Identified by the CF-netCDF variable attribute 'bounds'.
Ref: [CF] Section 7.1. Cell Boundaries.
"""
cf_identity = 'bounds'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF boundary variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for a boundary variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF boundary variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFBoundaryVariable(name, variables[name])
return result
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore the bounds extent dimension.
result = set(source[:-1]).issubset(target) or \
set(source[1:]).issubset(target)
return result
class CFClimatologyVariable(CFVariable):
"""
A CF-netCDF climatology variable is associated with a CF-netCDF variable that contains
coordinate data. When a data value provides information about conditions in a cell
occupying a region of space/time or some other dimension, the climatology variable
provides a climatological description of cell extent.
A CF-netCDF climatology variable will have one more dimension than its associated
CF-netCDF coordinate variable.
Identified by the CF-netCDF variable attribute 'climatology'.
Ref: [CF] Section 7.4. Climatological Statistics
"""
cf_identity = 'climatology'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF climatology variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for a climatology variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF climatology variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFClimatologyVariable(name, variables[name])
return result
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore the climatology extent dimension.
result = set(source[:-1]).issubset(target) or \
set(source[1:]).issubset(target)
return result
class CFCoordinateVariable(CFVariable):
"""
A CF-netCDF coordinate variable is a one-dimensional variable with the same name
as its dimension, and it is defined as a numeric data type with values that are
ordered monotonically. Missing values are not allowed in CF-netCDF coordinate
variables. Also see [NUG] Section 2.3.1.
Identified by the above criterion, there is no associated CF-netCDF variable
attribute.
Ref: [CF] 1.2. Terminology.
"""
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True, monotonic=False):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF coordinate variables.
for nc_var_name, nc_var in six.iteritems(target):
if nc_var_name in ignore:
continue
# String variables can't be coordinates
if _is_str_dtype(nc_var):
continue
# Restrict to one-dimensional with name as dimension OR zero-dimensional scalar
if not ((nc_var.ndim == 1 and nc_var_name in nc_var.dimensions) or (nc_var.ndim == 0)):
continue
# Restrict to monotonic?
if monotonic:
data = nc_var[:]
# Gracefully fill a masked coordinate.
if ma.isMaskedArray(data):
data = ma.filled(data)
if nc_var.shape == () or nc_var.shape == (1,) or iris.util.monotonic(data):
result[nc_var_name] = CFCoordinateVariable(nc_var_name, nc_var)
else:
result[nc_var_name] = CFCoordinateVariable(nc_var_name, nc_var)
return result
class CFDataVariable(CFVariable):
"""
A CF-netCDF variable containing data pay-load that maps to an Iris :class:`iris.cube.Cube`.
"""
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
raise NotImplementedError
class _CFFormulaTermsVariable(CFVariable):
"""
A CF-netCDF formula terms variable corresponds to a term in a formula that
allows dimensional vertical coordinate values to be computed from dimensionless
vertical coordinate values and associated variables at specific grid points.
Identified by the CF-netCDF variable attribute 'formula_terms'.
Ref: [CF] Section 4.3.2. Dimensional Vertical Coordinate.
[CF] Appendix D. Dimensionless Vertical Coordinates.
"""
cf_identity = 'formula_terms'
def __init__(self, name, data, formula_root, formula_term):
CFVariable.__init__(self, name, data)
# Register the formula root and term relationship.
self.add_formula_term(formula_root, formula_term)
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF formula terms variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for formula terms variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for match_item in _CF_PARSE.finditer(nc_var_att):
match_group = match_item.groupdict()
# Ensure that term name is lower case, as expected.
term_name = match_group['lhs'].lower()
variable_name = match_group['rhs']
if variable_name not in ignore:
if variable_name not in variables:
if warn:
message = 'Missing CF-netCDF formula term variable %r, referenced by netCDF variable %r'
warnings.warn(message % (variable_name, nc_var_name))
else:
if variable_name not in result:
result[variable_name] = _CFFormulaTermsVariable(variable_name,
variables[variable_name],
nc_var_name, term_name)
else:
result[variable_name].add_formula_term(nc_var_name, term_name)
return result
def __repr__(self):
return '%s(%r, %r, %r)' % (self.__class__.__name__,
self.cf_name, self.cf_data,
self.cf_terms_by_root)
class CFGridMappingVariable(CFVariable):
"""
A CF-netCDF grid mapping variable contains a list of specific attributes that
define a particular grid mapping. A CF-netCDF grid mapping variable must contain
the attribute 'grid_mapping_name'.
Based on the value of the 'grid_mapping_name' attribute, there are associated
standard names of CF-netCDF coordinate variables that contain the mapping's
independent variables.
Identified by the CF-netCDF variable attribute 'grid_mapping'.
Ref: [CF] Section 5.6. Horizontal Coordinate Reference Systems, Grid Mappings, and Projections.
[CF] Appendix F. Grid Mappings.
"""
cf_identity = 'grid_mapping'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all grid mapping variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for a grid mapping variable reference.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
name = nc_var_att.strip()
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF grid mapping variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
result[name] = CFGridMappingVariable(name, variables[name])
return result
class CFLabelVariable(CFVariable):
"""
A CF-netCDF CF label variable is any netCDF variable that contain string
textual information, or labels.
Identified by the CF-netCDF variable attribute 'coordinates'.
Also see :class:`iris.fileformats.cf.CFAuxiliaryCoordinateVariable`.
Ref: [CF] Section 6.1. Labels.
"""
cf_identity = 'coordinates'
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF label variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for label variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for name in nc_var_att.split():
if name not in ignore:
if name not in variables:
if warn:
message = 'Missing CF-netCDF label variable %r, referenced by netCDF variable %r'
warnings.warn(message % (name, nc_var_name))
else:
# Register variable, but only allow string type.
var = variables[name]
if _is_str_dtype(var):
result[name] = CFLabelVariable(name, var)
return result
def cf_label_data(self, cf_data_var):
"""
Return the associated CF-netCDF label variable strings.
Args:
* cf_data_var (:class:`iris.fileformats.cf.CFDataVariable`):
The CF-netCDF data variable which the CF-netCDF label variable describes.
Returns:
String labels.
"""
if not isinstance(cf_data_var, CFDataVariable):
raise TypeError('cf_data_var argument should be of type CFDataVariable. Got %r.' % type(cf_data_var))
# Determine the name of the label string (or length) dimension by
# finding the dimension name that doesn't exist within the data dimensions.
str_dim_name = list(set(self.dimensions) - set(cf_data_var.dimensions))
if len(str_dim_name) != 1:
raise ValueError('Invalid string dimensions for CF-netCDF label variable %r' % self.cf_name)
str_dim_name = str_dim_name[0]
label_data = self[:]
if isinstance(label_data, ma.MaskedArray):
label_data = label_data.filled()
# Determine whether we have a string-valued scalar label
# i.e. a character variable that only has one dimension (the length of the string).
if self.ndim == 1:
data = np.array([''.join(label_data).strip()])
else:
# Determine the index of the string dimension.
str_dim = self.dimensions.index(str_dim_name)
# Calculate new label data shape (without string dimension) and create payload array.
new_shape = tuple(dim_len for i, dim_len in enumerate(self.shape) if i != str_dim)
string_basetype = '|S%d' if six.PY2 else '|U%d'
string_dtype = string_basetype % self.shape[str_dim]
data = np.empty(new_shape, dtype=string_dtype)
for index in np.ndindex(new_shape):
# Create the slice for the label data.
if str_dim == 0:
label_index = (slice(None, None),) + index
else:
label_index = index + (slice(None, None),)
label_string = b''.join(label_data[label_index]).strip()
if six.PY3:
label_string = label_string.decode('utf8')
data[index] = label_string
return data
def cf_label_dimensions(self, cf_data_var):
"""
Return the name of the associated CF-netCDF label variable data dimensions.
Args:
* cf_data_var (:class:`iris.fileformats.cf.CFDataVariable`):
The CF-netCDF data variable which the CF-netCDF label variable describes.
Returns:
Tuple of label data dimension names.
"""
if not isinstance(cf_data_var, CFDataVariable):
raise TypeError('cf_data_var argument should be of type CFDataVariable. Got %r.' % type(cf_data_var))
return tuple([dim_name for dim_name in self.dimensions if dim_name in cf_data_var.dimensions])
def spans(self, cf_variable):
"""
Determine whether the dimensionality of this variable
is a subset of the specified target variable.
Note that, by default scalar variables always span the
dimensionality of the target variable.
Args:
* cf_variable:
Compare dimensionality with the :class:`CFVariable`.
Returns:
Boolean.
"""
# Scalar variables always span the target variable.
result = True
if self.dimensions:
source = self.dimensions
target = cf_variable.dimensions
# Ignore label string length dimension.
result = set(source[:-1]).issubset(target) or \
set(source[1:]).issubset(target)
return result
class CFMeasureVariable(CFVariable):
"""
A CF-netCDF measure variable is a variable that contains cell areas or volumes.
Identified by the CF-netCDF variable attribute 'cell_measures'.
Ref: [CF] Section 7.2. Cell Measures.
"""
cf_identity = 'cell_measures'
def __init__(self, name, data, measure):
CFVariable.__init__(self, name, data)
#: Associated cell measure of the cell variable
self.cf_measure = measure
@classmethod
def identify(cls, variables, ignore=None, target=None, warn=True):
result = {}
ignore, target = cls._identify_common(variables, ignore, target)
# Identify all CF measure variables.
for nc_var_name, nc_var in six.iteritems(target):
# Check for measure variable references.
nc_var_att = getattr(nc_var, cls.cf_identity, None)
if nc_var_att is not None:
for match_item in _CF_PARSE.finditer(nc_var_att):
match_group = match_item.groupdict()
measure = match_group['lhs']
variable_name = match_group['rhs']
if variable_name not in ignore:
if variable_name not in variables:
if warn:
message = 'Missing CF-netCDF measure variable %r, referenced by netCDF variable %r'
warnings.warn(message % (variable_name, nc_var_name))
else:
result[variable_name] = CFMeasureVariable(variable_name, variables[variable_name], measure)
return result
################################################################################
class CFGroup(MutableMapping, object):
"""
Represents a collection of 'NetCDF Climate and Forecast (CF) Metadata
Conventions' variables and netCDF global attributes.
"""
def __init__(self):
#: Collection of CF-netCDF variables
self._cf_variables = {}
#: Collection of netCDF global attributes
self.global_attributes = {}
#: Collection of CF-netCDF variables promoted to a CFDataVariable.
self.promoted = {}
def _cf_getter(self, cls):
# Generate dictionary with dictionary comprehension.
return {cf_name: cf_var
for cf_name, cf_var in six.iteritems(self._cf_variables)
if isinstance(cf_var, cls)}
@property
def ancillary_variables(self):
"""Collection of CF-netCDF ancillary variables."""
return self._cf_getter(CFAncillaryDataVariable)
@property
def auxiliary_coordinates(self):
"""Collection of CF-netCDF auxiliary coordinate variables."""
return self._cf_getter(CFAuxiliaryCoordinateVariable)
@property
def bounds(self):
"""Collection of CF-netCDF boundary variables."""
return self._cf_getter(CFBoundaryVariable)
@property
def climatology(self):
"""Collection of CF-netCDF climatology variables."""
return self._cf_getter(CFClimatologyVariable)
@property
def coordinates(self):
"""Collection of CF-netCDF coordinate variables."""
return self._cf_getter(CFCoordinateVariable)
@property
def data_variables(self):
"""Collection of CF-netCDF data pay-load variables."""
return self._cf_getter(CFDataVariable)
@property
def formula_terms(self):
"""Collection of CF-netCDF variables that participate in a CF-netCDF formula term."""
return {cf_name: cf_var
for cf_name, cf_var in six.iteritems(self._cf_variables)
if cf_var.has_formula_terms()}
@property
def grid_mappings(self):
"""Collection of CF-netCDF grid mapping variables."""
return self._cf_getter(CFGridMappingVariable)
@property
def labels(self):
"""Collection of CF-netCDF label variables."""
return self._cf_getter(CFLabelVariable)
@property
def cell_measures(self):
"""Collection of CF-netCDF measure variables."""
return self._cf_getter(CFMeasureVariable)
def keys(self):
"""Return the names of all the CF-netCDF variables in the group."""
return self._cf_variables.keys()
def __len__(self):
return len(self._cf_variables)
def __iter__(self):
for item in self._cf_variables:
yield item
def __setitem__(self, name, variable):
if not isinstance(variable, CFVariable):
raise TypeError('Attempted to add an invalid CF-netCDF variable to the %s' % self.__class__.__name__)
if name != variable.cf_name:
raise ValueError('Mismatch between key name %r and CF-netCDF variable name %r' % (str(name), variable.cf_name))
self._cf_variables[name] = variable
def __getitem__(self, name):
if name not in self._cf_variables:
raise KeyError('Cannot get unknown CF-netCDF variable name %r' % str(name))
return self._cf_variables[name]
def __delitem__(self, name):
if name not in self._cf_variables:
raise KeyError('Cannot delete unknown CF-netcdf variable name %r' % str(name))
del self._cf_variables[name]
def __repr__(self):
result = []
result.append('variables:%d' % len(self._cf_variables))
result.append('global_attributes:%d' % len(self.global_attributes))
result.append('promoted:%d' % len(self.promoted))
return '<%s of %s>' % (self.__class__.__name__, ', '.join(result))
################################################################################
class CFReader(object):
"""
This class allows the contents of a netCDF file to be interpreted according
to the 'NetCDF Climate and Forecast (CF) Metadata Conventions'.
"""
def __init__(self, filename, warn=False, monotonic=False):
self._filename = os.path.expanduser(filename)
# All CF variable types EXCEPT for the "special cases" of
# CFDataVariable, CFCoordinateVariable and _CFFormulaTermsVariable.
self._variable_types = (CFAncillaryDataVariable, CFAuxiliaryCoordinateVariable,
CFBoundaryVariable, CFClimatologyVariable,
CFGridMappingVariable, CFLabelVariable, CFMeasureVariable)
#: Collection of CF-netCDF variables associated with this netCDF file
self.cf_group = CFGroup()
self._dataset = netCDF4.Dataset(self._filename, mode='r')
# Issue load optimisation warning.
if warn and self._dataset.file_format in ['NETCDF3_CLASSIC', 'NETCDF3_64BIT']:
warnings.warn('Optimise CF-netCDF loading by converting data from NetCDF3 ' \
'to NetCDF4 file format using the "nccopy" command.')
self._check_monotonic = monotonic
self._translate()
self._build_cf_groups()
self._reset()
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self._filename)
def _translate(self):
"""Classify the netCDF variables into CF-netCDF variables."""
netcdf_variable_names = list(self._dataset.variables.keys())
# Identify all CF coordinate variables first. This must be done
# first as, by CF convention, the definition of a CF auxiliary
# coordinate variable may include a scalar CF coordinate variable,
# whereas we want these two types of variables to be mutually exclusive.
coords = CFCoordinateVariable.identify(self._dataset.variables,
monotonic=self._check_monotonic)
self.cf_group.update(coords)
coordinate_names = list(self.cf_group.coordinates.keys())
# Identify all CF variables EXCEPT for the "special cases".
for variable_type in self._variable_types:
# Prevent grid mapping variables being mis-identified as CF coordinate variables.
ignore = None if issubclass(variable_type, CFGridMappingVariable) else coordinate_names
self.cf_group.update(variable_type.identify(self._dataset.variables, ignore=ignore))
# Identify global netCDF attributes.
attr_dict = {attr_name: _getncattr(self._dataset, attr_name, '') for
attr_name in self._dataset.ncattrs()}
self.cf_group.global_attributes.update(attr_dict)
# Identify and register all CF formula terms.
formula_terms = _CFFormulaTermsVariable.identify(self._dataset.variables)
for cf_var in six.itervalues(formula_terms):
for cf_root, cf_term in six.iteritems(cf_var.cf_terms_by_root):
# Ignore formula terms owned by a bounds variable.
if cf_root not in self.cf_group.bounds:
cf_name = cf_var.cf_name
if cf_var.cf_name not in self.cf_group:
self.cf_group[cf_name] = CFAuxiliaryCoordinateVariable(cf_name, cf_var.cf_data)
self.cf_group[cf_name].add_formula_term(cf_root, cf_term)
# Determine the CF data variables.
data_variable_names = set(netcdf_variable_names) - set(self.cf_group.ancillary_variables) - \
set(self.cf_group.auxiliary_coordinates) - set(self.cf_group.bounds) - \
set(self.cf_group.climatology) - set(self.cf_group.coordinates) - \
set(self.cf_group.grid_mappings) - set(self.cf_group.labels) - \
set(self.cf_group.cell_measures)
for name in data_variable_names:
self.cf_group[name] = CFDataVariable(name, self._dataset.variables[name])
def _build_cf_groups(self):
"""Build the first order relationships between CF-netCDF variables."""
def _build(cf_variable):
coordinate_names = list(self.cf_group.coordinates.keys())
cf_group = CFGroup()
# Build CF variable relationships.
for variable_type in self._variable_types:
# Prevent grid mapping variables being mis-identified as
# CF coordinate variables.
ignore = None if issubclass(variable_type, CFGridMappingVariable) else coordinate_names
match = variable_type.identify(self._dataset.variables, ignore=ignore,
target=cf_variable.cf_name, warn=False)
# Sanity check dimensionality coverage.
for cf_name, cf_var in six.iteritems(match):
if cf_var.spans(cf_variable):
cf_group[cf_name] = self.cf_group[cf_name]
else:
# Register the ignored variable.
# N.B. 'ignored' variable from enclosing scope.
ignored.add(cf_name)
msg = 'Ignoring variable {!r} referenced ' \
'by variable {!r}: Dimensions {!r} do not ' \
'span {!r}'.format(cf_name,
cf_variable.cf_name,
cf_var.dimensions,
cf_variable.dimensions)
warnings.warn(msg)
# Build CF data variable relationships.
if isinstance(cf_variable, CFDataVariable):
# Add global netCDF attributes.
cf_group.global_attributes.update(self.cf_group.global_attributes)
# Add appropriate "dimensioned" CF coordinate variables.
cf_group.update({cf_name: self.cf_group[cf_name] for cf_name
in cf_variable.dimensions if cf_name in
self.cf_group.coordinates})
# Add appropriate "dimensionless" CF coordinate variables.
coordinates_attr = getattr(cf_variable, 'coordinates', '')
cf_group.update({cf_name: self.cf_group[cf_name] for cf_name
in coordinates_attr.split() if cf_name in
self.cf_group.coordinates})
# Add appropriate formula terms.
for cf_var in six.itervalues(self.cf_group.formula_terms):
for cf_root in cf_var.cf_terms_by_root:
if cf_root in cf_group and cf_var.cf_name not in cf_group:
# Sanity check dimensionality.
if cf_var.spans(cf_variable):
cf_group[cf_var.cf_name] = cf_var
else:
# Register the ignored variable.
# N.B. 'ignored' variable from enclosing scope.
ignored.add(cf_var.cf_name)
msg = 'Ignoring formula terms variable {!r} ' \
'referenced by data variable {!r} via ' \
'variable {!r}: Dimensions {!r} do not ' \
'span {!r}'.format(cf_var.cf_name,
cf_variable.cf_name,
cf_root,
cf_var.dimensions,
cf_variable.dimensions)
warnings.warn(msg)
# Add the CF group to the variable.
cf_variable.cf_group = cf_group
# Ignored variables are those that cannot be attached to a
# data variable as the dimensionality of that variable is not
# a subset of the dimensionality of the data variable.
ignored = set()
for cf_variable in six.itervalues(self.cf_group):
_build(cf_variable)
# Determine whether there are any formula terms that
# may be promoted to a CFDataVariable.
if iris.FUTURE.netcdf_promote:
# Restrict promotion to only those formula terms
# that are reference surface/phenomenon.
for cf_var in six.itervalues(self.cf_group.formula_terms):
for cf_root, cf_term in six.iteritems(cf_var.cf_terms_by_root):
cf_root_var = self.cf_group[cf_root]
name = cf_root_var.standard_name or cf_root_var.long_name
terms = reference_terms.get(name, [])
if isinstance(terms, six.string_types) or \
not isinstance(terms, Iterable):
terms = [terms]
cf_var_name = cf_var.cf_name
if cf_term in terms and \
cf_var_name not in self.cf_group.promoted:
data_var = CFDataVariable(cf_var_name, cf_var.cf_data)
self.cf_group.promoted[cf_var_name] = data_var
_build(data_var)
break
# Promote any ignored variables.
promoted = set()
not_promoted = ignored.difference(promoted)
while not_promoted:
cf_name = not_promoted.pop()
if cf_name not in self.cf_group.data_variables and \
cf_name not in self.cf_group.promoted:
data_var = CFDataVariable(cf_name,
self.cf_group[cf_name].cf_data)
self.cf_group.promoted[cf_name] = data_var
_build(data_var)
# Determine whether there are still any ignored variables
# yet to be promoted.
promoted.add(cf_name)
not_promoted = ignored.difference(promoted)
else:
_netcdf_promote_warning()
def _reset(self):
"""Reset the attribute touch history of each variable."""
for nc_var_name in six.iterkeys(self._dataset.variables):
self.cf_group[nc_var_name].cf_attrs_reset()
def __del__(self):
# Explicitly close dataset to prevent file remaining open.
self._dataset.close()
def _getncattr(dataset, attr, default=None):
"""
Simple wrapper round `netCDF4.Dataset.getncattr` to make it behave
more like `getattr`.
"""
try:
value = dataset.getncattr(attr)
except AttributeError:
value = default
return value
def _netcdf_promote_warning():
msg = ('NetCDF default loading behaviour currently does not expose '
'variables which define reference surfaces for dimensionless '
'vertical coordinates as independent Cubes. This behaviour is '
'deprecated in favour of automatic promotion to Cubes. To switch '
'to the new behaviour, set iris.FUTURE.netcdf_promote to True.')
warnings.warn(msg)
|
orekyuu/intellij-community
|
refs/heads/master
|
python/testData/resolve/pyToJava/FieldType.py
|
83
|
from java.lang import System as javasystem
javasystem.out.p<ref>rintln("Hello")
|
leopittelli/Django-on-App-Engine-Example
|
refs/heads/master
|
django_summernote/settings.py
|
5
|
import os
import uuid
from datetime import datetime
from django.conf import settings
def uploaded_filepath(instance, filename):
ext = filename.split('.')[-1]
filename = "%s.%s" % (uuid.uuid4(), ext)
today = datetime.now().strftime('%Y-%m-%d')
return os.path.join('django-summernote', today, filename)
SETTINGS_USER = getattr(settings, 'SUMMERNOTE_CONFIG', {})
SETTINGS_DEFAULT = {
'iframe': True,
'airMode': False,
'empty': ('<p><br/></p>', '<p><br></p>'),
'width': 720,
'height': 480,
'toolbar': [
['style', ['style']],
['font', ['bold', 'italic', 'underline', 'clear']],
['fontname', ['fontname']],
['fontsize', ['fontsize']],
['color', ['color']],
['para', ['ul', 'ol', 'paragraph']],
['height', ['height']],
['table', ['table']],
['insert', ['link', 'picture', 'video']],
['view', ['fullscreen', 'codeview']],
['help', ['help']],
],
'lang': 'en-US',
'attachment_upload_to': uploaded_filepath,
'attachment_storage_class': None,
'attachment_filesize_limit': 1024 * 1024,
'inplacewidget_external_css': (
'//netdna.bootstrapcdn.com/twitter-bootstrap/2.3.1/css/bootstrap.no-icons.min.css',
'//netdna.bootstrapcdn.com/font-awesome/3.2.1/css/font-awesome.min.css',
),
'inplacewidget_external_js': (
'//code.jquery.com/jquery-1.9.1.min.js',
'//netdna.bootstrapcdn.com/twitter-bootstrap/2.3.1/js/bootstrap.min.js',
),
}
summernote_config = SETTINGS_DEFAULT.copy()
summernote_config.update(SETTINGS_USER)
|
primiano/edison-kernel
|
refs/heads/master
|
tools/perf/scripts/python/sctop.py
|
11180
|
# system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
svn2github/audacity
|
refs/heads/master
|
lib-src/lv2/lv2/plugins/eg01-amp.lv2/waflib/Context.py
|
70
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,imp,sys
from waflib import Utils,Errors,Logs
import waflib.Node
HEXVERSION=0x1070b00
WAFVERSION="1.7.11"
WAFREVISION="50f631bc5e00bdda966c68094229b99be9a21084"
ABI=98
DBFILE='.wafpickle-%s-%d-%d'%(sys.platform,sys.hexversion,ABI)
APPNAME='APPNAME'
VERSION='VERSION'
TOP='top'
OUT='out'
WSCRIPT_FILE='wscript'
launch_dir=''
run_dir=''
top_dir=''
out_dir=''
waf_dir=''
local_repo=''
remote_repo='http://waf.googlecode.com/git/'
remote_locs=['waflib/extras','waflib/Tools']
g_module=None
STDOUT=1
STDERR=-1
BOTH=0
classes=[]
def create_context(cmd_name,*k,**kw):
global classes
for x in classes:
if x.cmd==cmd_name:
return x(*k,**kw)
ctx=Context(*k,**kw)
ctx.fun=cmd_name
return ctx
class store_context(type):
def __init__(cls,name,bases,dict):
super(store_context,cls).__init__(name,bases,dict)
name=cls.__name__
if name=='ctx'or name=='Context':
return
try:
cls.cmd
except AttributeError:
raise Errors.WafError('Missing command for the context class %r (cmd)'%name)
if not getattr(cls,'fun',None):
cls.fun=cls.cmd
global classes
classes.insert(0,cls)
ctx=store_context('ctx',(object,),{})
class Context(ctx):
errors=Errors
tools={}
def __init__(self,**kw):
try:
rd=kw['run_dir']
except KeyError:
global run_dir
rd=run_dir
class node_class(waflib.Node.Node):
pass
self.node_class=node_class
self.node_class.__module__="waflib.Node"
self.node_class.__name__="Nod3"
self.node_class.ctx=self
self.root=self.node_class('',None)
self.cur_script=None
self.path=self.root.find_dir(rd)
self.stack_path=[]
self.exec_dict={'ctx':self,'conf':self,'bld':self,'opt':self}
self.logger=None
def __hash__(self):
return id(self)
def load(self,tool_list,*k,**kw):
tools=Utils.to_list(tool_list)
path=Utils.to_list(kw.get('tooldir',''))
for t in tools:
module=load_tool(t,path)
fun=getattr(module,kw.get('name',self.fun),None)
if fun:
fun(self)
def execute(self):
global g_module
self.recurse([os.path.dirname(g_module.root_path)])
def pre_recurse(self,node):
self.stack_path.append(self.cur_script)
self.cur_script=node
self.path=node.parent
def post_recurse(self,node):
self.cur_script=self.stack_path.pop()
if self.cur_script:
self.path=self.cur_script.parent
def recurse(self,dirs,name=None,mandatory=True,once=True):
try:
cache=self.recurse_cache
except AttributeError:
cache=self.recurse_cache={}
for d in Utils.to_list(dirs):
if not os.path.isabs(d):
d=os.path.join(self.path.abspath(),d)
WSCRIPT=os.path.join(d,WSCRIPT_FILE)
WSCRIPT_FUN=WSCRIPT+'_'+(name or self.fun)
node=self.root.find_node(WSCRIPT_FUN)
if node and(not once or node not in cache):
cache[node]=True
self.pre_recurse(node)
try:
function_code=node.read('rU')
exec(compile(function_code,node.abspath(),'exec'),self.exec_dict)
finally:
self.post_recurse(node)
elif not node:
node=self.root.find_node(WSCRIPT)
tup=(node,name or self.fun)
if node and(not once or tup not in cache):
cache[tup]=True
self.pre_recurse(node)
try:
wscript_module=load_module(node.abspath())
user_function=getattr(wscript_module,(name or self.fun),None)
if not user_function:
if not mandatory:
continue
raise Errors.WafError('No function %s defined in %s'%(name or self.fun,node.abspath()))
user_function(self)
finally:
self.post_recurse(node)
elif not node:
if not mandatory:
continue
raise Errors.WafError('No wscript file in directory %s'%d)
def exec_command(self,cmd,**kw):
subprocess=Utils.subprocess
kw['shell']=isinstance(cmd,str)
Logs.debug('runner: %r'%cmd)
Logs.debug('runner_env: kw=%s'%kw)
if self.logger:
self.logger.info(cmd)
if'stdout'not in kw:
kw['stdout']=subprocess.PIPE
if'stderr'not in kw:
kw['stderr']=subprocess.PIPE
try:
if kw['stdout']or kw['stderr']:
p=subprocess.Popen(cmd,**kw)
(out,err)=p.communicate()
ret=p.returncode
else:
out,err=(None,None)
ret=subprocess.Popen(cmd,**kw).wait()
except Exception ,e:
raise Errors.WafError('Execution failure: %s'%str(e),ex=e)
if out:
if not isinstance(out,str):
out=out.decode(sys.stdout.encoding or'iso8859-1')
if self.logger:
self.logger.debug('out: %s'%out)
else:
sys.stdout.write(out)
if err:
if not isinstance(err,str):
err=err.decode(sys.stdout.encoding or'iso8859-1')
if self.logger:
self.logger.error('err: %s'%err)
else:
sys.stderr.write(err)
return ret
def cmd_and_log(self,cmd,**kw):
subprocess=Utils.subprocess
kw['shell']=isinstance(cmd,str)
Logs.debug('runner: %r'%cmd)
if'quiet'in kw:
quiet=kw['quiet']
del kw['quiet']
else:
quiet=None
if'output'in kw:
to_ret=kw['output']
del kw['output']
else:
to_ret=STDOUT
kw['stdout']=kw['stderr']=subprocess.PIPE
if quiet is None:
self.to_log(cmd)
try:
p=subprocess.Popen(cmd,**kw)
(out,err)=p.communicate()
except Exception ,e:
raise Errors.WafError('Execution failure: %s'%str(e),ex=e)
if not isinstance(out,str):
out=out.decode(sys.stdout.encoding or'iso8859-1')
if not isinstance(err,str):
err=err.decode(sys.stdout.encoding or'iso8859-1')
if out and quiet!=STDOUT and quiet!=BOTH:
self.to_log('out: %s'%out)
if err and quiet!=STDERR and quiet!=BOTH:
self.to_log('err: %s'%err)
if p.returncode:
e=Errors.WafError('Command %r returned %r'%(cmd,p.returncode))
e.returncode=p.returncode
e.stderr=err
e.stdout=out
raise e
if to_ret==BOTH:
return(out,err)
elif to_ret==STDERR:
return err
return out
def fatal(self,msg,ex=None):
if self.logger:
self.logger.info('from %s: %s'%(self.path.abspath(),msg))
try:
msg='%s\n(complete log in %s)'%(msg,self.logger.handlers[0].baseFilename)
except Exception:
pass
raise self.errors.ConfigurationError(msg,ex=ex)
def to_log(self,msg):
if not msg:
return
if self.logger:
self.logger.info(msg)
else:
sys.stderr.write(str(msg))
sys.stderr.flush()
def msg(self,msg,result,color=None):
self.start_msg(msg)
if not isinstance(color,str):
color=result and'GREEN'or'YELLOW'
self.end_msg(result,color)
def start_msg(self,msg):
try:
if self.in_msg:
self.in_msg+=1
return
except AttributeError:
self.in_msg=0
self.in_msg+=1
try:
self.line_just=max(self.line_just,len(msg))
except AttributeError:
self.line_just=max(40,len(msg))
for x in(self.line_just*'-',msg):
self.to_log(x)
Logs.pprint('NORMAL',"%s :"%msg.ljust(self.line_just),sep='')
def end_msg(self,result,color=None):
self.in_msg-=1
if self.in_msg:
return
defcolor='GREEN'
if result==True:
msg='ok'
elif result==False:
msg='not found'
defcolor='YELLOW'
else:
msg=str(result)
self.to_log(msg)
Logs.pprint(color or defcolor,msg)
def load_special_tools(self,var,ban=[]):
global waf_dir
lst=self.root.find_node(waf_dir).find_node('waflib/extras').ant_glob(var)
for x in lst:
if not x.name in ban:
load_tool(x.name.replace('.py',''))
cache_modules={}
def load_module(path):
try:
return cache_modules[path]
except KeyError:
pass
module=imp.new_module(WSCRIPT_FILE)
try:
code=Utils.readf(path,m='rU')
except(IOError,OSError):
raise Errors.WafError('Could not read the file %r'%path)
module_dir=os.path.dirname(path)
sys.path.insert(0,module_dir)
exec(compile(code,path,'exec'),module.__dict__)
sys.path.remove(module_dir)
cache_modules[path]=module
return module
def load_tool(tool,tooldir=None):
if tool=='java':
tool='javaw'
elif tool=='compiler_cc':
tool='compiler_c'
else:
tool=tool.replace('++','xx')
if tooldir:
assert isinstance(tooldir,list)
sys.path=tooldir+sys.path
try:
__import__(tool)
ret=sys.modules[tool]
Context.tools[tool]=ret
return ret
finally:
for d in tooldir:
sys.path.remove(d)
else:
global waf_dir
try:
os.stat(os.path.join(waf_dir,'waflib','extras',tool+'.py'))
except OSError:
try:
os.stat(os.path.join(waf_dir,'waflib','Tools',tool+'.py'))
except OSError:
d=tool
else:
d='waflib.Tools.%s'%tool
else:
d='waflib.extras.%s'%tool
__import__(d)
ret=sys.modules[d]
Context.tools[tool]=ret
return ret
|
almeidapaulopt/erpnext
|
refs/heads/develop
|
erpnext/patches/v4_2/update_project_milestones.py
|
121
|
from __future__ import unicode_literals
import frappe
def execute():
for project in frappe.db.sql_list("select name from tabProject"):
frappe.reload_doc("projects", "doctype", "project")
p = frappe.get_doc("Project", project)
p.update_milestones_completed()
p.db_set("percent_milestones_completed", p.percent_milestones_completed)
|
rm-hull/ssd1306
|
refs/heads/master
|
luma/__init__.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2019 Richard Hull and contributors
# See LICENSE.rst for details.
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
|
scalable-networks/gnuradio-3.7.0.1
|
refs/heads/master
|
gnuradio-runtime/examples/network/dial_tone_sink.py
|
58
|
#!/usr/bin/env python
#
# Copyright 2006,2007,2010 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, audio
from gnuradio import blocks
from gnuradio.eng_option import eng_option
from optparse import OptionParser
class dial_tone_sink(gr.top_block):
def __init__(self, host, port, pkt_size, sample_rate, eof):
gr.top_block.__init__(self, "dial_tone_sink")
udp = blocks.udp_source(gr.sizeof_float, host, port, pkt_size, eof=eof)
sink = audio.sink(sample_rate)
self.connect(udp, sink)
if __name__ == '__main__':
parser = OptionParser(option_class=eng_option)
parser.add_option("", "--host", type="string", default="0.0.0.0",
help="local host name (domain name or IP address)")
parser.add_option("", "--port", type="int", default=65500,
help="port value to listen to for connection")
parser.add_option("", "--packet-size", type="int", default=1472,
help="packet size.")
parser.add_option("-r", "--sample-rate", type="int", default=8000,
help="audio signal sample rate [default=%default]")
parser.add_option("", "--no-eof", action="store_true", default=False,
help="don't send EOF on disconnect")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
raise SystemExit, 1
# Create an instance of a hierarchical block
top_block = dial_tone_sink(options.host, options.port,
options.packet_size, options.sample_rate,
not options.no_eof)
try:
# Run forever
top_block.run()
except KeyboardInterrupt:
# Ctrl-C exits
pass
|
mattvick/phantomjs
|
refs/heads/master
|
src/breakpad/src/tools/gyp/test/subdirectory/gyptest-subdir-default.py
|
137
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a subsidiary dependent target from a .gyp file in a
subdirectory, without specifying an explicit output build directory,
and using the subdirectory's solution or project file as the entry point.
"""
import TestGyp
import errno
test = TestGyp.TestGyp()
test.run_gyp('prog1.gyp', chdir='src')
test.relocate('src', 'relocate/src')
chdir = 'relocate/src/subdir'
# Make can build sub-projects, but it's still through the top-level Makefile,
# and there is no 'default' or 'all' sub-project, so the target must be
# explicit.
# TODO(mmoss) Should make create self-contained, sub-project Makefiles,
# equilvalent to the sub-project .sln/SConstruct/etc. files of other generators?
if test.format == 'make':
chdir = 'relocate/src'
test.build('prog2.gyp', 'prog2', chdir=chdir)
else:
test.build('prog2.gyp', chdir=chdir)
test.built_file_must_not_exist('prog1', type=test.EXECUTABLE, chdir=chdir)
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
|
int0x19/android_kernel_xiaomi_msm8992
|
refs/heads/libra_miui_v2
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
zombi-x/android_kernel_lge_mako
|
refs/heads/mm6.0
|
scripts/tracing/draw_functrace.py
|
14679
|
#!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
twitter-forks/bazel
|
refs/heads/master
|
third_party/py/abseil/absl/testing/absltest.py
|
15
|
# Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base functionality for Abseil Python tests.
This module contains base classes and high-level functions for Abseil-style
tests.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import difflib
import errno
import getpass
import inspect
import itertools
import json
import os
import random
import re
import shlex
import signal
import subprocess
import sys
import tempfile
import textwrap
import unittest
try:
import faulthandler
except ImportError:
# We use faulthandler if it is available.
faulthandler = None
from absl import app
from absl import flags
from absl import logging
from absl.testing import xml_reporter
import six
from six.moves import urllib
from six.moves import xrange # pylint: disable=redefined-builtin
FLAGS = flags.FLAGS
_TEXT_OR_BINARY_TYPES = (six.text_type, six.binary_type)
# Many of the methods in this module have names like assertSameElements.
# This kind of name does not comply with PEP8 style,
# but it is consistent with the naming of methods in unittest.py.
# pylint: disable=invalid-name
def _get_default_test_random_seed():
random_seed = 301
value = os.environ.get('TEST_RANDOM_SEED', '')
try:
random_seed = int(value)
except ValueError:
pass
return random_seed
def get_default_test_srcdir():
"""Returns default test source dir."""
return os.environ.get('TEST_SRCDIR', '')
def get_default_test_tmpdir():
"""Returns default test temp dir."""
tmpdir = os.environ.get('TEST_TMPDIR', '')
if not tmpdir:
tmpdir = os.path.join(tempfile.gettempdir(), 'absl_testing')
return tmpdir
def _get_default_randomize_ordering_seed():
"""Returns default seed to use for randomizing test order.
This function first checks the --test_randomize_ordering_seed flag, and then
the TEST_RANDOMIZE_ORDERING_SEED environment variable. If the first value
we find is:
* (not set): disable test randomization
* 0: disable test randomization
* 'random': choose a random seed in [1, 4294967295] for test order
randomization
* positive integer: use this seed for test order randomization
(The values used are patterned after
https://docs.python.org/3/using/cmdline.html#envvar-PYTHONHASHSEED).
In principle, it would be simpler to return None if no override is provided;
however, the python random module has no `get_seed()`, only `getstate()`,
which returns far more data than we want to pass via an environment variable
or flag.
Returns:
A default value for test case randomization (int). 0 means do not randomize.
Raises:
ValueError: Raised when the flag or env value is not one of the options
above.
"""
if FLAGS.test_randomize_ordering_seed is not None:
randomize = FLAGS.test_randomize_ordering_seed
else:
randomize = os.environ.get('TEST_RANDOMIZE_ORDERING_SEED')
if randomize is None:
return 0
if randomize == 'random':
return random.Random().randint(1, 4294967295)
if randomize == '0':
return 0
try:
seed = int(randomize)
if seed > 0:
return seed
except ValueError:
pass
raise ValueError(
'Unknown test randomization seed value: {}'.format(randomize))
flags.DEFINE_integer('test_random_seed', _get_default_test_random_seed(),
'Random seed for testing. Some test frameworks may '
'change the default value of this flag between runs, so '
'it is not appropriate for seeding probabilistic tests.',
allow_override_cpp=True)
flags.DEFINE_string('test_srcdir',
get_default_test_srcdir(),
'Root of directory tree where source files live',
allow_override_cpp=True)
flags.DEFINE_string('test_tmpdir', get_default_test_tmpdir(),
'Directory for temporary testing files',
allow_override_cpp=True)
flags.DEFINE_string('test_randomize_ordering_seed', None,
'If positive, use this as a seed to randomize the '
'execution order for test cases. If "random", pick a '
'random seed to use. If 0 or not set, do not randomize '
'test case execution order. This flag also overrides '
'the TEST_RANDOMIZE_ORDERING_SEED environment variable.')
flags.DEFINE_string('xml_output_file', '',
'File to store XML test results')
# We might need to monkey-patch TestResult so that it stops considering an
# unexpected pass as a as a "successful result". For details, see
# http://bugs.python.org/issue20165
def _monkey_patch_test_result_for_unexpected_passes():
"""Workaround for <http://bugs.python.org/issue20165>."""
def wasSuccessful(self):
"""Tells whether or not this result was a success.
Any unexpected pass is to be counted as a non-success.
Args:
self: The TestResult instance.
Returns:
Whether or not this result was a success.
"""
return (len(self.failures) == len(self.errors) ==
len(self.unexpectedSuccesses) == 0)
test_result = unittest.result.TestResult()
test_result.addUnexpectedSuccess('test')
if test_result.wasSuccessful(): # The bug is present.
unittest.result.TestResult.wasSuccessful = wasSuccessful
if test_result.wasSuccessful(): # Warn the user if our hot-fix failed.
sys.stderr.write('unittest.result.TestResult monkey patch to report'
' unexpected passes as failures did not work.\n')
_monkey_patch_test_result_for_unexpected_passes()
class TestCase(unittest.TestCase):
"""Extension of unittest.TestCase providing more powerful assertions."""
maxDiff = 80 * 20
def shortDescription(self):
"""Formats both the test method name and the first line of its docstring.
If no docstring is given, only returns the method name.
This method overrides unittest.TestCase.shortDescription(), which
only returns the first line of the docstring, obscuring the name
of the test upon failure.
Returns:
desc: A short description of a test method.
"""
desc = str(self)
# NOTE: super() is used here instead of directly invoking
# unittest.TestCase.shortDescription(self), because of the
# following line that occurs later on:
# unittest.TestCase = TestCase
# Because of this, direct invocation of what we think is the
# superclass will actually cause infinite recursion.
doc_first_line = super(TestCase, self).shortDescription()
if doc_first_line is not None:
desc = '\n'.join((desc, doc_first_line))
return desc
def assertStartsWith(self, actual, expected_start, msg=None):
"""Asserts that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
self.fail('%r does not start with %r' % (actual, expected_start), msg)
def assertNotStartsWith(self, actual, unexpected_start, msg=None):
"""Asserts that actual.startswith(unexpected_start) is False.
Args:
actual: str
unexpected_start: str
msg: Optional message to report on failure.
"""
if actual.startswith(unexpected_start):
self.fail('%r does start with %r' % (actual, unexpected_start), msg)
def assertEndsWith(self, actual, expected_end, msg=None):
"""Asserts that actual.endswith(expected_end) is True.
Args:
actual: str
expected_end: str
msg: Optional message to report on failure.
"""
if not actual.endswith(expected_end):
self.fail('%r does not end with %r' % (actual, expected_end), msg)
def assertNotEndsWith(self, actual, unexpected_end, msg=None):
"""Asserts that actual.endswith(unexpected_end) is False.
Args:
actual: str
unexpected_end: str
msg: Optional message to report on failure.
"""
if actual.endswith(unexpected_end):
self.fail('%r does end with %r' % (actual, unexpected_end), msg)
def assertSequenceStartsWith(self, prefix, whole, msg=None):
"""An equality assertion for the beginning of ordered sequences.
If prefix is an empty sequence, it will raise an error unless whole is also
an empty sequence.
If prefix is not a sequence, it will raise an error if the first element of
whole does not match.
Args:
prefix: A sequence expected at the beginning of the whole parameter.
whole: The sequence in which to look for prefix.
msg: Optional message to report on failure.
"""
try:
prefix_len = len(prefix)
except (TypeError, NotImplementedError):
prefix = [prefix]
prefix_len = 1
try:
whole_len = len(whole)
except (TypeError, NotImplementedError):
self.fail('For whole: len(%s) is not supported, it appears to be type: '
'%s' % (whole, type(whole)), msg)
assert prefix_len <= whole_len, self._formatMessage(
msg,
'Prefix length (%d) is longer than whole length (%d).' %
(prefix_len, whole_len)
)
if not prefix_len and whole_len:
self.fail('Prefix length is 0 but whole length is %d: %s' %
(len(whole), whole), msg)
try:
self.assertSequenceEqual(prefix, whole[:prefix_len], msg)
except AssertionError:
self.fail('prefix: %s not found at start of whole: %s.' %
(prefix, whole), msg)
def assertEmpty(self, container, msg=None):
"""Asserts that an object has zero length.
Args:
container: Anything that implements the collections.Sized interface.
msg: Optional message to report on failure.
"""
if not isinstance(container, collections.Sized):
self.fail('Expected a Sized object, got: '
'{!r}'.format(type(container).__name__), msg)
# explicitly check the length since some Sized objects (e.g. numpy.ndarray)
# have strange __nonzero__/__bool__ behavior.
if len(container): # pylint: disable=g-explicit-length-test
self.fail('{!r} has length of {}.'.format(container, len(container)), msg)
def assertNotEmpty(self, container, msg=None):
"""Asserts that an object has non-zero length.
Args:
container: Anything that implements the collections.Sized interface.
msg: Optional message to report on failure.
"""
if not isinstance(container, collections.Sized):
self.fail('Expected a Sized object, got: '
'{!r}'.format(type(container).__name__), msg)
# explicitly check the length since some Sized objects (e.g. numpy.ndarray)
# have strange __nonzero__/__bool__ behavior.
if not len(container): # pylint: disable=g-explicit-length-test
self.fail('{!r} has length of 0.'.format(container), msg)
def assertLen(self, container, expected_len, msg=None):
"""Asserts that an object has the expected length.
Args:
container: Anything that implements the collections.Sized interface.
expected_len: The expected length of the container.
msg: Optional message to report on failure.
"""
if not isinstance(container, collections.Sized):
self.fail('Expected a Sized object, got: '
'{!r}'.format(type(container).__name__), msg)
if len(container) != expected_len:
container_repr = unittest.util.safe_repr(container)
self.fail('{} has length of {}, expected {}.'.format(
container_repr, len(container), expected_len), msg)
def assertSequenceAlmostEqual(self, expected_seq, actual_seq, places=None,
msg=None, delta=None):
"""An approximate equality assertion for ordered sequences.
Fail if the two sequences are unequal as determined by their value
differences rounded to the given number of decimal places (default 7) and
comparing to zero, or by comparing that the difference between each value
in the two sequences is more than the given delta.
Note that decimal places (from zero) are usually not the same as significant
digits (measured from the most signficant digit).
If the two sequences compare equal then they will automatically compare
almost equal.
Args:
expected_seq: A sequence containing elements we are expecting.
actual_seq: The sequence that we are testing.
places: The number of decimal places to compare.
msg: The message to be printed if the test fails.
delta: The OK difference between compared values.
"""
if len(expected_seq) != len(actual_seq):
self.fail('Sequence size mismatch: {} vs {}'.format(
len(expected_seq), len(actual_seq)), msg)
err_list = []
for idx, (exp_elem, act_elem) in enumerate(zip(expected_seq, actual_seq)):
try:
self.assertAlmostEqual(exp_elem, act_elem, places=places, msg=msg,
delta=delta)
except self.failureException as err:
err_list.append('At index {}: {}'.format(idx, err))
if err_list:
if len(err_list) > 30:
err_list = err_list[:30] + ['...']
msg = self._formatMessage(msg, '\n'.join(err_list))
self.fail(msg)
def assertContainsSubset(self, expected_subset, actual_set, msg=None):
"""Checks whether actual iterable is a superset of expected iterable."""
missing = set(expected_subset) - set(actual_set)
if not missing:
return
self.fail('Missing elements %s\nExpected: %s\nActual: %s' % (
missing, expected_subset, actual_set), msg)
def assertNoCommonElements(self, expected_seq, actual_seq, msg=None):
"""Checks whether actual iterable and expected iterable are disjoint."""
common = set(expected_seq) & set(actual_seq)
if not common:
return
self.fail('Common elements %s\nExpected: %s\nActual: %s' % (
common, expected_seq, actual_seq), msg)
def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
"""An unordered sequence specific comparison.
Equivalent to assertCountEqual(). This method is a compatibility layer
for Python 3k, since 2to3 does not convert assertItemsEqual() calls into
assertCountEqual() calls.
Args:
expected_seq: A sequence containing elements we are expecting.
actual_seq: The sequence that we are testing.
msg: The message to be printed if the test fails.
"""
if not hasattr(super(TestCase, self), 'assertItemsEqual'):
# The assertItemsEqual method was renamed assertCountEqual in Python 3.2
super(TestCase, self).assertCountEqual(expected_seq, actual_seq, msg)
return
super(TestCase, self).assertItemsEqual(expected_seq, actual_seq, msg)
def assertCountEqual(self, expected_seq, actual_seq, msg=None):
"""An unordered sequence specific comparison.
It asserts that actual_seq and expected_seq have the same element counts.
Equivalent to::
self.assertEqual(Counter(iter(actual_seq)),
Counter(iter(expected_seq)))
Asserts that each element has the same count in both sequences.
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.
Args:
expected_seq: A sequence containing elements we are expecting.
actual_seq: The sequence that we are testing.
msg: The message to be printed if the test fails.
"""
self.assertItemsEqual(expected_seq, actual_seq, msg)
def assertSameElements(self, expected_seq, actual_seq, msg=None):
"""Asserts that two sequences have the same elements (in any order).
This method, unlike assertCountEqual, doesn't care about any
duplicates in the expected and actual sequences.
>> assertSameElements([1, 1, 1, 0, 0, 0], [0, 1])
# Doesn't raise an AssertionError
If possible, you should use assertCountEqual instead of
assertSameElements.
Args:
expected_seq: A sequence containing elements we are expecting.
actual_seq: The sequence that we are testing.
msg: The message to be printed if the test fails.
"""
# `unittest2.TestCase` used to have assertSameElements, but it was
# removed in favor of assertItemsEqual. As there's a unit test
# that explicitly checks this behavior, I am leaving this method
# alone.
# Fail on strings: empirically, passing strings to this test method
# is almost always a bug. If comparing the character sets of two strings
# is desired, cast the inputs to sets or lists explicitly.
if (isinstance(expected_seq, _TEXT_OR_BINARY_TYPES) or
isinstance(actual_seq, _TEXT_OR_BINARY_TYPES)):
self.fail('Passing string/bytes to assertSameElements is usually a bug. '
'Did you mean to use assertEqual?\n'
'Expected: %s\nActual: %s' % (expected_seq, actual_seq))
try:
expected = dict([(element, None) for element in expected_seq])
actual = dict([(element, None) for element in actual_seq])
missing = [element for element in expected if element not in actual]
unexpected = [element for element in actual if element not in expected]
missing.sort()
unexpected.sort()
except TypeError:
# Fall back to slower list-compare if any of the objects are
# not hashable.
expected = list(expected_seq)
actual = list(actual_seq)
expected.sort()
actual.sort()
missing, unexpected = _sorted_list_difference(expected, actual)
errors = []
if msg:
errors.extend((msg, ':\n'))
if missing:
errors.append('Expected, but missing:\n %r\n' % missing)
if unexpected:
errors.append('Unexpected, but present:\n %r\n' % unexpected)
if missing or unexpected:
self.fail(''.join(errors))
# unittest.TestCase.assertMultiLineEqual works very similarly, but it
# has a different error format. However, I find this slightly more readable.
def assertMultiLineEqual(self, first, second, msg=None, **kwargs):
"""Asserts that two multi-line strings are equal."""
assert isinstance(first, six.string_types), (
'First argument is not a string: %r' % (first,))
assert isinstance(second, six.string_types), (
'Second argument is not a string: %r' % (second,))
line_limit = kwargs.pop('line_limit', 0)
if kwargs:
raise TypeError('Unexpected keyword args {}'.format(tuple(kwargs)))
if first == second:
return
if msg:
failure_message = [msg + ':\n']
else:
failure_message = ['\n']
if line_limit:
line_limit += len(failure_message)
for line in difflib.ndiff(first.splitlines(True), second.splitlines(True)):
failure_message.append(line)
if not line.endswith('\n'):
failure_message.append('\n')
if line_limit and len(failure_message) > line_limit:
n_omitted = len(failure_message) - line_limit
failure_message = failure_message[:line_limit]
failure_message.append(
'(... and {} more delta lines omitted for brevity.)\n'.format(
n_omitted))
raise self.failureException(''.join(failure_message))
def assertBetween(self, value, minv, maxv, msg=None):
"""Asserts that value is between minv and maxv (inclusive)."""
msg = self._formatMessage(msg,
'"%r" unexpectedly not between "%r" and "%r"' %
(value, minv, maxv))
self.assertTrue(minv <= value, msg)
self.assertTrue(maxv >= value, msg)
def assertRegexMatch(self, actual_str, regexes, message=None):
r"""Asserts that at least one regex in regexes matches str.
If possible you should use assertRegexpMatches, which is a simpler
version of this method. assertRegexpMatches takes a single regular
expression (a string or re compiled object) instead of a list.
Notes:
1. This function uses substring matching, i.e. the matching
succeeds if *any* substring of the error message matches *any*
regex in the list. This is more convenient for the user than
full-string matching.
2. If regexes is the empty list, the matching will always fail.
3. Use regexes=[''] for a regex that will always pass.
4. '.' matches any single character *except* the newline. To
match any character, use '(.|\n)'.
5. '^' matches the beginning of each line, not just the beginning
of the string. Similarly, '$' matches the end of each line.
6. An exception will be thrown if regexes contains an invalid
regex.
Args:
actual_str: The string we try to match with the items in regexes.
regexes: The regular expressions we want to match against str.
See "Notes" above for detailed notes on how this is interpreted.
message: The message to be printed if the test fails.
"""
if isinstance(regexes, _TEXT_OR_BINARY_TYPES):
self.fail('regexes is string or bytes; use assertRegexpMatches instead.',
message)
if not regexes:
self.fail('No regexes specified.', message)
regex_type = type(regexes[0])
for regex in regexes[1:]:
if type(regex) is not regex_type: # pylint: disable=unidiomatic-typecheck
self.fail('regexes list must all be the same type.', message)
if regex_type is bytes and isinstance(actual_str, six.text_type):
regexes = [regex.decode('utf-8') for regex in regexes]
regex_type = six.text_type
elif regex_type is six.text_type and isinstance(actual_str, bytes):
regexes = [regex.encode('utf-8') for regex in regexes]
regex_type = bytes
if regex_type is six.text_type:
regex = u'(?:%s)' % u')|(?:'.join(regexes)
elif regex_type is bytes:
regex = b'(?:' + (b')|(?:'.join(regexes)) + b')'
else:
self.fail('Only know how to deal with unicode str or bytes regexes.',
message)
if not re.search(regex, actual_str, re.MULTILINE):
self.fail('"%s" does not contain any of these regexes: %s.' %
(actual_str, regexes), message)
def assertCommandSucceeds(self, command, regexes=(b'',), env=None,
close_fds=True, msg=None):
"""Asserts that a shell command succeeds (i.e. exits with code 0).
Args:
command: List or string representing the command to run.
regexes: List of regular expression byte strings that match success.
env: Dictionary of environment variable settings. If None, no environment
variables will be set for the child process. This is to make tests
more hermetic. NOTE: this behavior is different than the standard
subprocess module.
close_fds: Whether or not to close all open fd's in the child after
forking.
msg: Optional message to report on failure.
"""
(ret_code, err) = get_command_stderr(command, env, close_fds)
# We need bytes regexes here because `err` is bytes.
# Accommodate code which listed their output regexes w/o the b'' prefix by
# converting them to bytes for the user.
if isinstance(regexes[0], six.text_type):
regexes = [regex.encode('utf-8') for regex in regexes]
command_string = get_command_string(command)
self.assertEqual(
ret_code, 0,
self._formatMessage(msg,
'Running command\n'
'%s failed with error code %s and message\n'
'%s' % (_quote_long_string(command_string),
ret_code,
_quote_long_string(err)))
)
self.assertRegexMatch(
err,
regexes,
message=self._formatMessage(
msg,
'Running command\n'
'%s failed with error code %s and message\n'
'%s which matches no regex in %s' % (
_quote_long_string(command_string),
ret_code,
_quote_long_string(err),
regexes)))
def assertCommandFails(self, command, regexes, env=None, close_fds=True,
msg=None):
"""Asserts a shell command fails and the error matches a regex in a list.
Args:
command: List or string representing the command to run.
regexes: the list of regular expression strings.
env: Dictionary of environment variable settings. If None, no environment
variables will be set for the child process. This is to make tests
more hermetic. NOTE: this behavior is different than the standard
subprocess module.
close_fds: Whether or not to close all open fd's in the child after
forking.
msg: Optional message to report on failure.
"""
(ret_code, err) = get_command_stderr(command, env, close_fds)
# We need bytes regexes here because `err` is bytes.
# Accommodate code which listed their output regexes w/o the b'' prefix by
# converting them to bytes for the user.
if isinstance(regexes[0], six.text_type):
regexes = [regex.encode('utf-8') for regex in regexes]
command_string = get_command_string(command)
self.assertNotEqual(
ret_code, 0,
self._formatMessage(msg, 'The following command succeeded '
'while expected to fail:\n%s' %
_quote_long_string(command_string)))
self.assertRegexMatch(
err,
regexes,
message=self._formatMessage(
msg,
'Running command\n'
'%s failed with error code %s and message\n'
'%s which matches no regex in %s' % (
_quote_long_string(command_string),
ret_code,
_quote_long_string(err),
regexes)))
class _AssertRaisesContext(object):
def __init__(self, expected_exception, test_case, test_func, msg=None):
self.expected_exception = expected_exception
self.test_case = test_case
self.test_func = test_func
self.msg = msg
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
if exc_type is None:
self.test_case.fail(self.expected_exception.__name__ + ' not raised',
self.msg)
if not issubclass(exc_type, self.expected_exception):
return False
self.test_func(exc_value)
return True
def assertRaisesWithPredicateMatch(self, expected_exception, predicate,
callable_obj=None, *args, **kwargs):
"""Asserts that exception is thrown and predicate(exception) is true.
Args:
expected_exception: Exception class expected to be raised.
predicate: Function of one argument that inspects the passed-in exception
and returns True (success) or False (please fail the test).
callable_obj: Function to be called.
*args: Extra args.
**kwargs: Extra keyword args.
Returns:
A context manager if callable_obj is None. Otherwise, None.
Raises:
self.failureException if callable_obj does not raise a matching exception.
"""
def Check(err):
self.assertTrue(predicate(err),
'%r does not match predicate %r' % (err, predicate))
context = self._AssertRaisesContext(expected_exception, self, Check)
if callable_obj is None:
return context
with context:
callable_obj(*args, **kwargs)
def assertRaisesWithLiteralMatch(self, expected_exception,
expected_exception_message,
callable_obj=None, *args, **kwargs):
"""Asserts that the message in a raised exception equals the given string.
Unlike assertRaisesRegexp, this method takes a literal string, not
a regular expression.
with self.assertRaisesWithLiteralMatch(ExType, 'message'):
DoSomething()
Args:
expected_exception: Exception class expected to be raised.
expected_exception_message: String message expected in the raised
exception. For a raise exception e, expected_exception_message must
equal str(e).
callable_obj: Function to be called, or None to return a context.
*args: Extra args.
**kwargs: Extra kwargs.
Returns:
A context manager if callable_obj is None. Otherwise, None.
Raises:
self.failureException if callable_obj does not raise a matching exception.
"""
def Check(err):
actual_exception_message = str(err)
self.assertTrue(expected_exception_message == actual_exception_message,
'Exception message does not match.\n'
'Expected: %r\n'
'Actual: %r' % (expected_exception_message,
actual_exception_message))
context = self._AssertRaisesContext(expected_exception, self, Check)
if callable_obj is None:
return context
with context:
callable_obj(*args, **kwargs)
def assertContainsInOrder(self, strings, target, msg=None):
"""Asserts that the strings provided are found in the target in order.
This may be useful for checking HTML output.
Args:
strings: A list of strings, such as [ 'fox', 'dog' ]
target: A target string in which to look for the strings, such as
'The quick brown fox jumped over the lazy dog'.
msg: Optional message to report on failure.
"""
if isinstance(strings, (bytes, unicode if str is bytes else str)):
strings = (strings,)
current_index = 0
last_string = None
for string in strings:
index = target.find(str(string), current_index)
if index == -1 and current_index == 0:
self.fail("Did not find '%s' in '%s'" %
(string, target), msg)
elif index == -1:
self.fail("Did not find '%s' after '%s' in '%s'" %
(string, last_string, target), msg)
last_string = string
current_index = index
def assertContainsSubsequence(self, container, subsequence, msg=None):
"""Asserts that "container" contains "subsequence" as a subsequence.
Asserts that "container" contains all the elements of "subsequence", in
order, but possibly with other elements interspersed. For example, [1, 2, 3]
is a subsequence of [0, 0, 1, 2, 0, 3, 0] but not of [0, 0, 1, 3, 0, 2, 0].
Args:
container: the list we're testing for subsequence inclusion.
subsequence: the list we hope will be a subsequence of container.
msg: Optional message to report on failure.
"""
first_nonmatching = None
reversed_container = list(reversed(container))
subsequence = list(subsequence)
for e in subsequence:
if e not in reversed_container:
first_nonmatching = e
break
while e != reversed_container.pop():
pass
if first_nonmatching is not None:
self.fail('%s not a subsequence of %s. First non-matching element: %s' %
(subsequence, container, first_nonmatching), msg)
def assertContainsExactSubsequence(self, container, subsequence, msg=None):
"""Asserts that "container" contains "subsequence" as an exact subsequence.
Asserts that "container" contains all the elements of "subsequence", in
order, and without other elements interspersed. For example, [1, 2, 3] is an
exact subsequence of [0, 0, 1, 2, 3, 0] but not of [0, 0, 1, 2, 0, 3, 0].
Args:
container: the list we're testing for subsequence inclusion.
subsequence: the list we hope will be an exact subsequence of container.
msg: Optional message to report on failure.
"""
container = list(container)
subsequence = list(subsequence)
longest_match = 0
for start in xrange(1 + len(container) - len(subsequence)):
if longest_match == len(subsequence):
break
index = 0
while (index < len(subsequence) and
subsequence[index] == container[start + index]):
index += 1
longest_match = max(longest_match, index)
if longest_match < len(subsequence):
self.fail('%s not an exact subsequence of %s. '
'Longest matching prefix: %s' %
(subsequence, container, subsequence[:longest_match]), msg)
def assertTotallyOrdered(self, *groups, **kwargs):
"""Asserts that total ordering has been implemented correctly.
For example, say you have a class A that compares only on its attribute x.
Comparators other than __lt__ are omitted for brevity.
class A(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __hash__(self):
return hash(self.x)
def __lt__(self, other):
try:
return self.x < other.x
except AttributeError:
return NotImplemented
assertTotallyOrdered will check that instances can be ordered correctly.
For example,
self.assertTotallyOrdered(
[None], # None should come before everything else.
[1], # Integers sort earlier.
[A(1, 'a')],
[A(2, 'b')], # 2 is after 1.
[A(3, 'c'), A(3, 'd')], # The second argument is irrelevant.
[A(4, 'z')],
['foo']) # Strings sort last.
Args:
*groups: A list of groups of elements. Each group of elements is a list
of objects that are equal. The elements in each group must be less
than the elements in the group after it. For example, these groups are
totally ordered: [None], [1], [2, 2], [3].
**kwargs: optional msg keyword argument can be passed.
"""
def CheckOrder(small, big):
"""Ensures small is ordered before big."""
self.assertFalse(small == big,
self._formatMessage(msg, '%r unexpectedly equals %r' %
(small, big)))
self.assertTrue(small != big,
self._formatMessage(msg, '%r unexpectedly equals %r' %
(small, big)))
self.assertLess(small, big, msg)
self.assertFalse(big < small,
self._formatMessage(msg,
'%r unexpectedly less than %r' %
(big, small)))
self.assertLessEqual(small, big, msg)
self.assertFalse(big <= small, self._formatMessage(
'%r unexpectedly less than or equal to %r' % (big, small), msg
))
self.assertGreater(big, small, msg)
self.assertFalse(small > big,
self._formatMessage(msg,
'%r unexpectedly greater than %r' %
(small, big)))
self.assertGreaterEqual(big, small)
self.assertFalse(small >= big, self._formatMessage(
msg,
'%r unexpectedly greater than or equal to %r' % (small, big)))
def CheckEqual(a, b):
"""Ensures that a and b are equal."""
self.assertEqual(a, b, msg)
self.assertFalse(a != b,
self._formatMessage(msg, '%r unexpectedly unequals %r' %
(a, b)))
self.assertEqual(hash(a), hash(b), self._formatMessage(
msg,
'hash %d of %r unexpectedly not equal to hash %d of %r' %
(hash(a), a, hash(b), b)))
self.assertFalse(a < b,
self._formatMessage(msg,
'%r unexpectedly less than %r' %
(a, b)))
self.assertFalse(b < a,
self._formatMessage(msg,
'%r unexpectedly less than %r' %
(b, a)))
self.assertLessEqual(a, b, msg)
self.assertLessEqual(b, a, msg)
self.assertFalse(a > b,
self._formatMessage(msg,
'%r unexpectedly greater than %r' %
(a, b)))
self.assertFalse(b > a,
self._formatMessage(msg,
'%r unexpectedly greater than %r' %
(b, a)))
self.assertGreaterEqual(a, b, msg)
self.assertGreaterEqual(b, a, msg)
msg = kwargs.get('msg')
# For every combination of elements, check the order of every pair of
# elements.
for elements in itertools.product(*groups):
elements = list(elements)
for index, small in enumerate(elements[:-1]):
for big in elements[index + 1:]:
CheckOrder(small, big)
# Check that every element in each group is equal.
for group in groups:
for a in group:
CheckEqual(a, a)
for a, b in itertools.product(group, group):
CheckEqual(a, b)
def assertDictEqual(self, a, b, msg=None):
"""Raises AssertionError if a and b are not equal dictionaries.
Args:
a: A dict, the expected value.
b: A dict, the actual value.
msg: An optional str, the associated message.
Raises:
AssertionError: if the dictionaries are not equal.
"""
self.assertIsInstance(a, dict, self._formatMessage(
msg,
'First argument is not a dictionary'
))
self.assertIsInstance(b, dict, self._formatMessage(
msg,
'Second argument is not a dictionary'
))
def Sorted(list_of_items):
try:
return sorted(list_of_items) # In 3.3, unordered are possible.
except TypeError:
return list_of_items
if a == b:
return
a_items = Sorted(list(six.iteritems(a)))
b_items = Sorted(list(six.iteritems(b)))
unexpected = []
missing = []
different = []
safe_repr = unittest.util.safe_repr
def Repr(dikt):
"""Deterministic repr for dict."""
# Sort the entries based on their repr, not based on their sort order,
# which will be non-deterministic across executions, for many types.
entries = sorted((safe_repr(k), safe_repr(v))
for k, v in six.iteritems(dikt))
return '{%s}' % (', '.join('%s: %s' % pair for pair in entries))
message = ['%s != %s%s' % (Repr(a), Repr(b), ' (%s)' % msg if msg else '')]
# The standard library default output confounds lexical difference with
# value difference; treat them separately.
for a_key, a_value in a_items:
if a_key not in b:
missing.append((a_key, a_value))
elif a_value != b[a_key]:
different.append((a_key, a_value, b[a_key]))
for b_key, b_value in b_items:
if b_key not in a:
unexpected.append((b_key, b_value))
if unexpected:
message.append(
'Unexpected, but present entries:\n%s' % ''.join(
'%s: %s\n' % (safe_repr(k), safe_repr(v)) for k, v in unexpected))
if different:
message.append(
'repr() of differing entries:\n%s' % ''.join(
'%s: %s != %s\n' % (safe_repr(k), safe_repr(a_value),
safe_repr(b_value))
for k, a_value, b_value in different))
if missing:
message.append(
'Missing entries:\n%s' % ''.join(
('%s: %s\n' % (safe_repr(k), safe_repr(v)) for k, v in missing)))
raise self.failureException('\n'.join(message))
def assertUrlEqual(self, a, b, msg=None):
"""Asserts that urls are equal, ignoring ordering of query params."""
parsed_a = urllib.parse.urlparse(a)
parsed_b = urllib.parse.urlparse(b)
self.assertEqual(parsed_a.scheme, parsed_b.scheme, msg)
self.assertEqual(parsed_a.netloc, parsed_b.netloc, msg)
self.assertEqual(parsed_a.path, parsed_b.path, msg)
self.assertEqual(parsed_a.fragment, parsed_b.fragment, msg)
self.assertEqual(sorted(parsed_a.params.split(';')),
sorted(parsed_b.params.split(';')), msg)
self.assertDictEqual(
urllib.parse.parse_qs(parsed_a.query, keep_blank_values=True),
urllib.parse.parse_qs(parsed_b.query, keep_blank_values=True), msg)
def assertSameStructure(self, a, b, aname='a', bname='b', msg=None):
"""Asserts that two values contain the same structural content.
The two arguments should be data trees consisting of trees of dicts and
lists. They will be deeply compared by walking into the contents of dicts
and lists; other items will be compared using the == operator.
If the two structures differ in content, the failure message will indicate
the location within the structures where the first difference is found.
This may be helpful when comparing large structures.
Args:
a: The first structure to compare.
b: The second structure to compare.
aname: Variable name to use for the first structure in assertion messages.
bname: Variable name to use for the second structure.
msg: Additional text to include in the failure message.
"""
# Accumulate all the problems found so we can report all of them at once
# rather than just stopping at the first
problems = []
_walk_structure_for_problems(a, b, aname, bname, problems)
# Avoid spamming the user toooo much
if self.maxDiff is not None:
max_problems_to_show = self.maxDiff // 80
if len(problems) > max_problems_to_show:
problems = problems[0:max_problems_to_show-1] + ['...']
if problems:
self.fail('; '.join(problems), msg)
def assertJsonEqual(self, first, second, msg=None):
"""Asserts that the JSON objects defined in two strings are equal.
A summary of the differences will be included in the failure message
using assertSameStructure.
Args:
first: A string contining JSON to decode and compare to second.
second: A string contining JSON to decode and compare to first.
msg: Additional text to include in the failure message.
"""
try:
first_structured = json.loads(first)
except ValueError as e:
raise ValueError(self._formatMessage(
msg,
'could not decode first JSON value %s: %s' % (first, e)))
try:
second_structured = json.loads(second)
except ValueError as e:
raise ValueError(self._formatMessage(
msg,
'could not decode second JSON value %s: %s' % (second, e)))
self.assertSameStructure(first_structured, second_structured,
aname='first', bname='second', msg=msg)
def _getAssertEqualityFunc(self, first, second):
try:
return super(TestCase, self)._getAssertEqualityFunc(first, second)
except AttributeError:
# This is a workaround if unittest.TestCase.__init__ was never run.
# It usually means that somebody created a subclass just for the
# assertions and has overridden __init__. "assertTrue" is a safe
# value that will not make __init__ raise a ValueError.
test_method = getattr(self, '_testMethodName', 'assertTrue')
super(TestCase, self).__init__(test_method)
return super(TestCase, self)._getAssertEqualityFunc(first, second)
def fail(self, msg=None, prefix=None):
"""Fail immediately with the given message, optionally prefixed."""
return super(TestCase, self).fail(self._formatMessage(prefix, msg))
def _sorted_list_difference(expected, actual):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a two-element tuple of lists. The first list contains those
elements in the "expected" list but not in the "actual" list, and the
second contains those elements in the "actual" list but not in the
"expected" list. Duplicate elements in either input list are ignored.
Args:
expected: The list we expected.
actual: The list we actualy got.
Returns:
(missing, unexpected)
missing: items in expected that are not in actual.
unexpected: items in actual that are not in expected.
"""
i = j = 0
missing = []
unexpected = []
while True:
try:
e = expected[i]
a = actual[j]
if e < a:
missing.append(e)
i += 1
while expected[i] == e:
i += 1
elif e > a:
unexpected.append(a)
j += 1
while actual[j] == a:
j += 1
else:
i += 1
try:
while expected[i] == e:
i += 1
finally:
j += 1
while actual[j] == a:
j += 1
except IndexError:
missing.extend(expected[i:])
unexpected.extend(actual[j:])
break
return missing, unexpected
def _walk_structure_for_problems(a, b, aname, bname, problem_list):
"""The recursive comparison behind assertSameStructure."""
if type(a) != type(b) and not ( # pylint: disable=unidiomatic-typecheck
isinstance(a, six.integer_types) and isinstance(b, six.integer_types)):
# We do not distinguish between int and long types as 99.99% of Python 2
# code should never care. They collapse into a single type in Python 3.
problem_list.append('%s is a %r but %s is a %r' %
(aname, type(a), bname, type(b)))
# If they have different types there's no point continuing
return
if isinstance(a, collections.Mapping):
for k in a:
if k in b:
_walk_structure_for_problems(
a[k], b[k], '%s[%r]' % (aname, k), '%s[%r]' % (bname, k),
problem_list)
else:
problem_list.append(
"%s has [%r] with value %r but it's missing in %s" %
(aname, k, a[k], bname))
for k in b:
if k not in a:
problem_list.append(
'%s lacks [%r] but %s has it with value %r' %
(aname, k, bname, b[k]))
# Strings/bytes are Sequences but we'll just do those with regular !=
elif (isinstance(a, collections.Sequence) and
not isinstance(a, _TEXT_OR_BINARY_TYPES)):
minlen = min(len(a), len(b))
for i in xrange(minlen):
_walk_structure_for_problems(
a[i], b[i], '%s[%d]' % (aname, i), '%s[%d]' % (bname, i),
problem_list)
for i in xrange(minlen, len(a)):
problem_list.append('%s has [%i] with value %r but %s does not' %
(aname, i, a[i], bname))
for i in xrange(minlen, len(b)):
problem_list.append('%s lacks [%i] but %s has it with value %r' %
(aname, i, bname, b[i]))
else:
if a != b:
problem_list.append('%s is %r but %s is %r' % (aname, a, bname, b))
def get_command_string(command):
"""Returns an escaped string that can be used as a shell command.
Args:
command: List or string representing the command to run.
Returns:
A string suitable for use as a shell command.
"""
if isinstance(command, six.string_types):
return command
else:
if os.name == 'nt':
return ' '.join(command)
else:
# The following is identical to Python 3's shlex.quote function.
command_string = ''
for word in command:
# Single quote word, and replace each ' in word with '"'"'
command_string += "'" + word.replace("'", "'\"'\"'") + "' "
return command_string[:-1]
def get_command_stderr(command, env=None, close_fds=True):
"""Runs the given shell command and returns a tuple.
Args:
command: List or string representing the command to run.
env: Dictionary of environment variable settings. If None, no environment
variables will be set for the child process. This is to make tests
more hermetic. NOTE: this behavior is different than the standard
subprocess module.
close_fds: Whether or not to close all open fd's in the child after forking.
On Windows, this is ignored and close_fds is always False.
Returns:
Tuple of (exit status, text printed to stdout and stderr by the command).
"""
if env is None: env = {}
if os.name == 'nt':
# Windows does not support setting close_fds to True while also redirecting
# standard handles.
close_fds = False
use_shell = isinstance(command, six.string_types)
process = subprocess.Popen(
command,
close_fds=close_fds,
env=env,
shell=use_shell,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
output = process.communicate()[0]
exit_status = process.wait()
return (exit_status, output)
def _quote_long_string(s):
"""Quotes a potentially multi-line string to make the start and end obvious.
Args:
s: A string.
Returns:
The quoted string.
"""
if isinstance(s, (bytes, bytearray)):
try:
s = s.decode('utf-8')
except UnicodeDecodeError:
s = str(s)
return ('8<-----------\n' +
s + '\n' +
'----------->8\n')
class _TestProgramManualRun(unittest.TestProgram):
"""A TestProgram which runs the tests manually."""
def runTests(self, do_run=False):
"""Runs the tests."""
if do_run:
unittest.TestProgram.runTests(self)
def print_python_version():
# Having this in the test output logs by default helps debugging when all
# you've got is the log and no other idea of which Python was used.
sys.stderr.write('Running tests under Python {0[0]}.{0[1]}.{0[2]}: '
'{1}\n'.format(
sys.version_info,
sys.executable if sys.executable else 'embedded.'))
def main(*args, **kwargs):
"""Executes a set of Python unit tests.
Usually this function is called without arguments, so the
unittest.TestProgram instance will get created with the default settings,
so it will run all test methods of all TestCase classes in the __main__
module.
Args:
*args: Positional arguments passed through to unittest.TestProgram.__init__.
**kwargs: Keyword arguments passed through to unittest.TestProgram.__init__.
"""
print_python_version()
_run_in_app(run_tests, args, kwargs)
def _is_in_app_main():
"""Returns True iff app.run is active."""
f = sys._getframe().f_back # pylint: disable=protected-access
while f:
if f.f_code == six.get_function_code(app.run):
return True
f = f.f_back
return False
class _SavedFlag(object):
"""Helper class for saving and restoring a flag value."""
def __init__(self, flag):
self.flag = flag
self.value = flag.value
self.present = flag.present
def restore_flag(self):
self.flag.value = self.value
self.flag.present = self.present
def _register_sigterm_with_faulthandler():
"""Have faulthandler dump stacks on SIGTERM. Useful to diagnose timeouts."""
if faulthandler and getattr(faulthandler, 'register', None):
# faulthandler.register is not avaiable on Windows.
# faulthandler.enable() is already called by app.run.
try:
faulthandler.register(signal.SIGTERM, chain=True)
except Exception as e: # pylint: disable=broad-except
sys.stderr.write('faulthandler.register(SIGTERM) failed '
'%r; ignoring.\n' % e)
def _run_in_app(function, args, kwargs):
"""Executes a set of Python unit tests, ensuring app.run.
This is a private function, users should call absltest.main().
_run_in_app calculates argv to be the command-line arguments of this program
(without the flags), sets the default of FLAGS.alsologtostderr to True,
then it calls function(argv, args, kwargs), making sure that `function'
will get called within app.run(). _run_in_app does this by checking whether
it is called by app.run(), or by calling app.run() explicitly.
The reason why app.run has to be ensured is to make sure that
flags are parsed and stripped properly, and other initializations done by
the app module are also carried out, no matter if absltest.run() is called
from within or outside app.run().
If _run_in_app is called from within app.run(), then it will reparse
sys.argv and pass the result without command-line flags into the argv
argument of `function'. The reason why this parsing is needed is that
__main__.main() calls absltest.main() without passing its argv. So the
only way _run_in_app could get to know the argv without the flags is that
it reparses sys.argv.
_run_in_app changes the default of FLAGS.alsologtostderr to True so that the
test program's stderr will contain all the log messages unless otherwise
specified on the command-line. This overrides any explicit assignment to
FLAGS.alsologtostderr by the test program prior to the call to _run_in_app()
(e.g. in __main__.main).
Please note that _run_in_app (and the function it calls) is allowed to make
changes to kwargs.
Args:
function: absltest.run_tests or a similar function. It will be called as
function(argv, args, kwargs) where argv is a list containing the
elements of sys.argv without the command-line flags.
args: Positional arguments passed through to unittest.TestProgram.__init__.
kwargs: Keyword arguments passed through to unittest.TestProgram.__init__.
"""
if _is_in_app_main():
_register_sigterm_with_faulthandler()
# Save command-line flags so the side effects of FLAGS(sys.argv) can be
# undone.
flag_objects = (FLAGS[name] for name in FLAGS)
saved_flags = dict((f.name, _SavedFlag(f)) for f in flag_objects)
# Change the default of alsologtostderr from False to True, so the test
# programs's stderr will contain all the log messages.
# If --alsologtostderr=false is specified in the command-line, or user
# has called FLAGS.alsologtostderr = False before, then the value is kept
# False.
FLAGS.set_default('alsologtostderr', True)
# Remove it from saved flags so it doesn't get restored later.
del saved_flags['alsologtostderr']
# The call FLAGS(sys.argv) parses sys.argv, returns the arguments
# without the flags, and -- as a side effect -- modifies flag values in
# FLAGS. We don't want the side effect, because we don't want to
# override flag changes the program did (e.g. in __main__.main)
# after the command-line has been parsed. So we have the for loop below
# to change back flags to their old values.
argv = FLAGS(sys.argv)
for saved_flag in six.itervalues(saved_flags):
saved_flag.restore_flag()
function(argv, args, kwargs)
else:
# Send logging to stderr. Use --alsologtostderr instead of --logtostderr
# in case tests are reading their own logs.
FLAGS.set_default('alsologtostderr', True)
def main_function(argv):
_register_sigterm_with_faulthandler()
function(argv, args, kwargs)
app.run(main=main_function)
def _is_suspicious_attribute(testCaseClass, name):
"""Returns True if an attribute is a method named like a test method."""
if name.startswith('Test') and len(name) > 4 and name[4].isupper():
attr = getattr(testCaseClass, name)
if inspect.isfunction(attr) or inspect.ismethod(attr):
args = inspect.getargspec(attr)
return (len(args.args) == 1 and args.args[0] == 'self'
and args.varargs is None and args.keywords is None)
return False
class TestLoader(unittest.TestLoader):
"""A test loader which supports common test features.
Supported features include:
* Banning untested methods with test-like names: methods attached to this
testCase with names starting with `Test` are ignored by the test runner,
and often represent mistakenly-omitted test cases. This loader will raise
a TypeError when attempting to load a TestCase with such methods.
* Randomization of test case execution order (optional).
"""
_ERROR_MSG = textwrap.dedent("""Method '%s' is named like a test case but
is not one. This is often a bug. If you want it to be a test method,
name it with 'test' in lowercase. If not, rename the method to not begin
with 'Test'.""")
def __init__(self, *args, **kwds):
super(TestLoader, self).__init__(*args, **kwds)
seed = _get_default_randomize_ordering_seed()
if seed:
self._seed = seed
self._random = random.Random(self._seed)
else:
self._seed = None
self._random = None
def getTestCaseNames(self, testCaseClass): # pylint:disable=invalid-name
"""Validates and returns a (possibly randomized) list of test case names."""
for name in dir(testCaseClass):
if _is_suspicious_attribute(testCaseClass, name):
raise TypeError(TestLoader._ERROR_MSG % name)
names = super(TestLoader, self).getTestCaseNames(testCaseClass)
if self._seed is not None:
logging.info('Randomizing test order with seed: %d', self._seed)
logging.info('To reproduce this order, re-run with '
'--test_randomize_ordering_seed=%d', self._seed)
self._random.shuffle(names)
return names
def get_default_xml_output_filename():
if os.environ.get('XML_OUTPUT_FILE'):
return os.environ['XML_OUTPUT_FILE']
elif os.environ.get('RUNNING_UNDER_TEST_DAEMON'):
return os.path.join(os.path.dirname(FLAGS.test_tmpdir), 'test_detail.xml')
elif os.environ.get('TEST_XMLOUTPUTDIR'):
return os.path.join(
os.environ['TEST_XMLOUTPUTDIR'],
os.path.splitext(os.path.basename(sys.argv[0]))[0] + '.xml')
def _setup_filtering(argv):
"""Implements the bazel test filtering protocol.
The following environment variable is used in this method:
TESTBRIDGE_TEST_ONLY: string, if set, is forwarded to the unittest
framework to use as a test filter. Its value is split with shlex
before being passed as positional arguments on argv.
Args:
argv: the argv to mutate in-place.
"""
test_filter = os.environ.get('TESTBRIDGE_TEST_ONLY')
if argv is None or not test_filter:
return
argv[1:1] = shlex.split(test_filter)
def _setup_sharding(custom_loader=None):
"""Implements the bazel sharding protocol.
The following environment variables are used in this method:
TEST_SHARD_STATUS_FILE: string, if set, points to a file. We write a blank
file to tell the test runner that this test implements the test sharding
protocol.
TEST_TOTAL_SHARDS: int, if set, sharding is requested.
TEST_SHARD_INDEX: int, must be set if TEST_TOTAL_SHARDS is set. Specifies
the shard index for this instance of the test process. Must satisfy:
0 <= TEST_SHARD_INDEX < TEST_TOTAL_SHARDS.
Args:
custom_loader: A TestLoader to be made sharded.
Returns:
The test loader for shard-filtering or the standard test loader, depending
on the sharding environment variables.
"""
# It may be useful to write the shard file even if the other sharding
# environment variables are not set. Test runners may use this functionality
# to query whether a test binary implements the test sharding protocol.
if 'TEST_SHARD_STATUS_FILE' in os.environ:
try:
f = None
try:
f = open(os.environ['TEST_SHARD_STATUS_FILE'], 'w')
f.write('')
except IOError:
sys.stderr.write('Error opening TEST_SHARD_STATUS_FILE (%s). Exiting.'
% os.environ['TEST_SHARD_STATUS_FILE'])
sys.exit(1)
finally:
if f is not None: f.close()
base_loader = custom_loader or TestLoader()
if 'TEST_TOTAL_SHARDS' not in os.environ:
# Not using sharding, use the expected test loader.
return base_loader
total_shards = int(os.environ['TEST_TOTAL_SHARDS'])
shard_index = int(os.environ['TEST_SHARD_INDEX'])
if shard_index < 0 or shard_index >= total_shards:
sys.stderr.write('ERROR: Bad sharding values. index=%d, total=%d\n' %
(shard_index, total_shards))
sys.exit(1)
# Replace the original getTestCaseNames with one that returns
# the test case names for this shard.
delegate_get_names = base_loader.getTestCaseNames
bucket_iterator = itertools.cycle(xrange(total_shards))
def getShardedTestCaseNames(testCaseClass):
filtered_names = []
for testcase in sorted(delegate_get_names(testCaseClass)):
bucket = next(bucket_iterator)
if bucket == shard_index:
filtered_names.append(testcase)
return filtered_names
base_loader.getTestCaseNames = getShardedTestCaseNames
return base_loader
def _run_and_get_tests_result(argv, args, kwargs, xml_test_runner_class):
"""Executes a set of Python unit tests and returns the result."""
# Set up test filtering if requested in environment.
_setup_filtering(argv)
# Shard the (default or custom) loader if sharding is turned on.
kwargs['testLoader'] = _setup_sharding(kwargs.get('testLoader', None))
# XML file name is based upon (sorted by priority):
# --xml_output_file flag, XML_OUTPUT_FILE variable,
# TEST_XMLOUTPUTDIR variable or RUNNING_UNDER_TEST_DAEMON variable.
if not FLAGS.xml_output_file:
FLAGS.xml_output_file = get_default_xml_output_filename()
xml_output_file = FLAGS.xml_output_file
xml_output = None
if xml_output_file:
xml_output_dir = os.path.dirname(xml_output_file)
if xml_output_dir and not os.path.isdir(xml_output_dir):
try:
os.makedirs(xml_output_dir)
except OSError as e:
# File exists error can occur with concurrent tests
if e.errno != errno.EEXIST:
raise
if sys.version_info.major == 2:
xml_output = open(xml_output_file, 'w')
else:
xml_output = open(xml_output_file, 'w', encoding='utf-8')
# We can reuse testRunner if it supports XML output (e. g. by inheriting
# from xml_reporter.TextAndXMLTestRunner). Otherwise we need to use
# xml_reporter.TextAndXMLTestRunner.
if (kwargs.get('testRunner') is not None
and not hasattr(kwargs['testRunner'], 'set_default_xml_stream')):
sys.stderr.write('WARNING: XML_OUTPUT_FILE or --xml_output_file setting '
'overrides testRunner=%r setting (possibly from --pdb)'
% (kwargs['testRunner']))
# Passing a class object here allows TestProgram to initialize
# instances based on its kwargs and/or parsed command-line args.
kwargs['testRunner'] = xml_test_runner_class
if kwargs.get('testRunner') is None:
kwargs['testRunner'] = xml_test_runner_class
kwargs['testRunner'].set_default_xml_stream(xml_output)
# Make sure tmpdir exists.
if not os.path.isdir(FLAGS.test_tmpdir):
try:
os.makedirs(FLAGS.test_tmpdir)
except OSError as e:
# Concurrent test might have created the directory.
if e.errno != errno.EEXIST:
raise
# Let unittest.TestProgram.__init__ do its own argv parsing, e.g. for '-v',
# on argv, which is sys.argv without the command-line flags.
kwargs.setdefault('argv', argv)
try:
test_program = unittest.TestProgram(*args, **kwargs)
return test_program.result
finally:
if xml_output:
xml_output.close()
def run_tests(argv, args, kwargs):
"""Executes a set of Python unit tests.
Most users should call absltest.main() instead of run_tests.
Please note that run_tests should be called from app.run.
Calling absltest.main() would ensure that.
Please note that run_tests is allowed to make changes to kwargs.
Args:
argv: sys.argv with the command-line flags removed from the front, i.e. the
argv with which app.run() has called __main__.main.
args: Positional arguments passed through to unittest.TestProgram.__init__.
kwargs: Keyword arguments passed through to unittest.TestProgram.__init__.
"""
result = _run_and_get_tests_result(
argv, args, kwargs, xml_reporter.TextAndXMLTestRunner)
sys.exit(not result.wasSuccessful())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.