code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
onetonine = len("onetwothreefourfivesixseveneightnine")
onetoten = onetonine + len("ten")
eleventotwenty = len("eleventwelvethirteenfourteenfifteensixteenseventeeneighteennineteen")
twenties = len("twenty")*10 + onetonine
thirties = len("thirty")*10 + onetonine
forties = len("forty")*10 + onetonine
fifties = len("fifty")*10 + onetonine
sixties = len("sixty")*10 + onetonine
seventies = len("seventy")*10 + onetonine
eighties = len("eighty")*10 + onetonine
nineties = len("ninety")*10 + onetonine
hundred = onetoten + eleventotwenty + twenties + thirties + forties + fifties + sixties + seventies + eighties + nineties
onehundreds = len("onehundredand") * 100 - 3 + hundred
twohundreds = len("twohundredand") * 100 - 3 + hundred
threehundreds = len("threehundredand") * 100 - 3 + hundred
fourhundreds = len("fourhundredand") * 100 - 3 + hundred
fivehundreds = len("fivehundredand") * 100 - 3 + hundred
sixhundreds = len("sixhundredand") * 100 - 3 + hundred
sevenhundreds = len("sevenhundredand") * 100 - 3 + hundred
eighthundreds = len("eighthundredand") * 100 - 3 + hundred
ninehundreds = len("ninehundredand") * 100 - 3 + hundred
thousands = len("onethousand") + ninehundreds + eighthundreds + sevenhundreds +\
sixhundreds + fivehundreds + fourhundreds + threehundreds + twohundreds +\
onehundreds + hundred
print thousands
| jreese/euler | python/problem17a.py | Python | mit | 1,333 |
from poliastro.twobody.thrust.change_a_inc import change_a_inc
from poliastro.twobody.thrust.change_argp import change_argp
from poliastro.twobody.thrust.change_ecc_inc import change_ecc_inc
from poliastro.twobody.thrust.change_ecc_quasioptimal import change_ecc_quasioptimal
__all__ = ["change_a_inc", "change_argp", "change_ecc_quasioptimal", "change_ecc_inc"]
| poliastro/poliastro | src/poliastro/twobody/thrust/__init__.py | Python | mit | 364 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Integration tests for tickets between 2000 and 2999.
Copyright 2010-2013 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import time
import test.integration.library as lib
import pytest
from omero.rtypes import *
class TestTickets3000(lib.ITest):
def test2396(self):
uuid = self.uuid()
# create image
img = self.new_image()
img.setName(rstring('test2396-img-%s' % (uuid)))
img = self.update.saveAndReturnObject(img)
img.unload()
format = "txt"
binary = "12345678910"
oFile = omero.model.OriginalFileI()
oFile.setName(rstring(str("txt-name")));
oFile.setPath(rstring(str("txt-name")));
oFile.setSize(rlong(len(binary)));
oFile.setHash(rstring("pending"));
oFile.setMimetype(rstring(str(format)));
of = self.update.saveAndReturnObject(oFile);
store = self.client.sf.createRawFileStore()
store.setFileId(of.id.val);
store.write(binary, 0, 0)
of = store.save() # See ticket:1501
store.close()
fa = omero.model.FileAnnotationI()
fa.setFile(of)
l_ia = omero.model.ImageAnnotationLinkI()
l_ia.setParent(img)
l_ia.setChild(fa)
self.update.saveObject(l_ia)
# Alternatively, unload the file
of = self.update.saveAndReturnObject(oFile);
of.unload()
store = self.client.sf.createRawFileStore()
store.setFileId(of.id.val);
store.write(binary, 0, 0)
# Don't capture from save, but will be saved anyway.
store.close()
fa = omero.model.FileAnnotationI()
fa.setFile(of)
l_ia = omero.model.ImageAnnotationLinkI()
l_ia.setParent(img)
l_ia.setChild(fa)
self.update.saveObject(l_ia)
# This test is no longer valid as it should not be possible to remove
# users from their only remaining group. It would be easy to make the
# test pass by adding extra groups but that would defeat the purpose
# of this test. Marking as xfail until the test has been reviewed.
@pytest.mark.xfail(reason="Is this test still valid? See #11465")
def test2547(self):
admin = self.root.sf.getAdminService()
user = self.new_user()
grps = admin.containedGroups(user.id.val)
assert 2 == len(grps)
non_user = [x for x in grps if x.id.val != 1][0]
grp = self.new_group()
admin.addGroups(user, [grp])
admin.removeGroups(user, [non_user])
admin.lookupExperimenters()
def test2628(self):
q = self.root.sf.getQueryService()
sql = "select s.uuid "\
"from EventLog evl join evl.event ev join ev.session s"
# This was never supported
with pytest.raises((Ice.UnmarshalOutOfBoundsException, Ice.UnknownUserException)):
q.findAllByQuery(sql, None)
p1 = omero.sys.Parameters()
f1 = omero.sys.Filter()
f1.limit = rint(100)
p1.theFilter = f1
# Nor was this
with pytest.raises(Ice.UnknownUserException):
q.findAllByQuery(sql, p1)
# Only IQuery.projection can return non-IObject types
q.projection(sql, p1)
@pytest.mark.xfail(reason="See ticket #11539")
def test2952(self):
la = omero.model.LongAnnotationI()
la.longValue = rlong(123456789)
la = self.client.sf.getUpdateService().saveAndReturnObject(la)
self.index(la)
search = self.client.sf.createSearchService()
search.onlyType("LongAnnotation")
s = "%s" % la.longValue.val
search.byFullText(s)
res = search.results()
assert la.id.val in [x.id.val for x in res]
@pytest.mark.xfail(reason="See ticket #11539")
def test2762(self):
"""
Test that the page (limit/offset) settings on a ParametersI
are properly handled by IQuery.findAllByFullText
"""
uuid = self.uuid().replace("-","")
tas = []
for x in range(15):
ta = omero.model.TagAnnotationI()
ta.setNs(rstring(uuid))
ta = self.update.saveAndReturnObject(ta)
tas.append(ta)
self.root.sf.getUpdateService().indexObject(ta)
results = self.query.findAllByFullText("TagAnnotation", uuid, None)
assert len(tas) == len(results)
params = omero.sys.ParametersI()
params.page(0, 10)
results = self.query.findAllByFullText("TagAnnotation", uuid, params)
assert 10 == len(results)
| jballanc/openmicroscopy | components/tools/OmeroPy/test/integration/test_tickets3000.py | Python | gpl-2.0 | 4,675 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from morphforge.morphology.core import MorphologyTree
from morphforge.morphology.exporter.morphologyexporter import MorphologyExporter
from Cheetah.Template import Template
from morphforge.core import FileIO
from morphforge.morphology.visitor import SectionIndexerWithOffsetDF
from morphforge.morphology.conversion.region_to_int_bimap import AutoRegionToIntMapTable
swc_templ = """
#---------------------------------
# SWC File Generated by morphforge
# --------------------------------
### Dummy Section:
#set dummy = $morph.get_dummy_section
$ids[dummy] 0 $dummy.d_x $dummy.d_y $dummy.d_z $dummy.d_r -1
#for $seg in $morph :
$ids[seg] $region_type_map[$seg] $seg.d_x $seg.d_y $seg.d_z $seg.d_r $ids[$seg.parent]
#end for
#---------------------------------
"""
class SWCTreeWriter(object):
@classmethod
def to_str(cls, morph=None, morphs=None, regionname_to_int_map=None):
assert (morph or morphs) and not (morph and morphs)
if morph:
return cls._to_str_multi(morphs=[morph],
regionname_to_int_map=regionname_to_int_map)
else:
return cls._to_str_multi(morphs=morphs,
regionname_to_int_map=regionname_to_int_map)
@classmethod
def to_file(cls, filename, morph=None, morphs=None, regionname_to_int_map=None):
assert (morph or morphs) and not(morph and morphs)
if morph:
return cls._to_file_multi(
morphs=[morph],
filename=filename,
regionname_to_int_map=regionname_to_int_map)
else:
return cls._to_file_multi(morphs=morphs, filename=filename,
regionname_to_int_map=regionname_to_int_map)
@classmethod
def _to_file_multi(cls, filename, morphs, regionname_to_int_map=None):
return FileIO.write_to_file(txt=cls._to_str_multi(morphs, regionname_to_int_map=regionname_to_int_map) , filename=filename)
@classmethod
def _to_str_multi(cls, morphs, regionname_to_int_map=None):
offset = 0
output = ''
for morph in morphs:
offset = offset + 1
# Add an additional section for the dummy section:
dummy_offset = offset
offset = offset + 1
id_map = SectionIndexerWithOffsetDF(morph=morph, offset=offset)()
id_map[morph.get_dummy_section()] = dummy_offset
if regionname_to_int_map is None:
regionname_to_int_map = AutoRegionToIntMapTable()
region_type_map = dict((s, 0) if not s.region else (s, regionname_to_int_map.region_name_to_int(s.region.name)) for s in morph)
context = [{'morph': morph, 'ids': id_map,
'region_type_map': region_type_map}]
new_op = Template(swc_templ, context).respond()
output += new_op
offset += len(id_map)
return output
MorphologyExporter.register("toSWCFile", lambda filename, morphology: SWCTreeWriter.to_file(filename=filename, morph=morphology), allow_override=False, from_type=MorphologyTree)
MorphologyExporter.register("toSWCStr", lambda morphology: SWCTreeWriter.to_str(morph=morphology), allow_override=False, from_type=MorphologyTree)
| mikehulluk/morphforge | src/morphforge/morphology/exporter/export_tree_swc.py | Python | bsd-2-clause | 4,788 |
import test.support, unittest
from test.support import TESTFN, unlink
import os, sys
class CodingTest(unittest.TestCase):
def test_bad_coding(self):
module_name = 'bad_coding'
self.verify_bad_module(module_name)
def test_bad_coding2(self):
module_name = 'bad_coding2'
self.verify_bad_module(module_name)
def verify_bad_module(self, module_name):
self.assertRaises(SyntaxError, __import__, 'test.' + module_name)
path = os.path.dirname(__file__)
filename = os.path.join(path, module_name + '.py')
fp = open(filename, "rb")
bytes = fp.read()
fp.close()
self.assertRaises(SyntaxError, compile, bytes, filename, 'exec')
def test_exec_valid_coding(self):
d = {}
exec('# coding: cp949\na = 5\n', d)
self.assertEqual(d['a'], 5)
def test_file_parse(self):
# issue1134: all encodings outside latin-1 and utf-8 fail on
# multiline strings and long lines (>512 columns)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
sys.path.insert(0, ".")
filename = TESTFN + ".py"
f = open(filename, "w")
try:
f.write("# -*- coding: cp1252 -*-\n")
f.write("'''A short string\n")
f.write("'''\n")
f.write("'A very long string %s'\n" % ("X" * 1000))
f.close()
__import__(TESTFN)
finally:
f.close()
unlink(TESTFN+".py")
unlink(TESTFN+".pyc")
sys.path.pop(0)
def test_error_from_string(self):
# See http://bugs.python.org/issue6289
input = "# coding: ascii\n\N{SNOWMAN}".encode('utf-8')
try:
compile(input, "<string>", "exec")
except SyntaxError as e:
expected = "'ascii' codec can't decode byte 0xe2 in position 16: " \
"ordinal not in range(128)"
self.assertTrue(str(e).startswith(expected))
else:
self.fail("didn't raise")
def test_main():
test.support.run_unittest(CodingTest)
if __name__ == "__main__":
test_main()
| mancoast/CPythonPyc_test | fail/314_test_coding.py | Python | gpl-3.0 | 2,149 |
#stdlib
import traceback
from time import sleep
#local
from logger import Logger
from utils.http import Callback
def do(func_list, metadata_class, retry_limit=None, log_name='/var/log/do.log', callback=None, **kwargs):
'''
Logging/retry logic wrapper.
Inputs:
func_list: list of tuples [(func, (args), {kwargs})] - args vs kwargs works on type identification
metadata_class - instance of metadata.MetadataBase
retry_limit: int, maximum retries for a given function
log_name: string name of log file that do should write to
callback: string callback json from http.Callback.to_json to execute upon func_list completion
'''
logger = Logger('do', log_name).get_logger()
md = metadata_class()
##iterate through the given functions
for f_data in func_list:
func, args, fkwargs = _get_args_kwargs(f_data)
f_name = '.'.join((func.__module__, func.__name__))
if 'ref_id' in fkwargs:
fkwargs.update(md.get_metadata(fkwargs['ref_id']))
elif args:
fkwargs.update(md.get_metadata(args[0]))
fkwargs.update(kwargs)
logger.info('%s\tBEGIN\targs: %s\tkwargs: %s', f_name, args, fkwargs)
retry = retry_limit if retry_limit else 1
success = False
for i in xrange(retry):
sleep(30*i)
logger.info('%s\tATTEMPT %s', f_name, i+1)
try:
result = func(*args, **fkwargs)
logger.info('%s\tRETURNED %s', f_name, result)
success = True
break
except:
logger.error('%s\tRAISED AN EXCEPTION', f_name)
logger.error(traceback.format_exc())
md.set_error(traceback.format_exc())
if not success:
raise Exception('Maximum retries exceeded')
if callback is not None:
logger.info("Executing Callback")
cb = Callback.from_json(callback)
cb.execute()
logger.info('Callback complete')
logger.info('DONE')
def _get_args_kwargs(f_data):
'''
Given a tuple of a length of 1-3 return a tuple of func, args, kwargs
Inputs:
f_data - tuple (function, (args) or {kwargs}, {kwargs} or (args)) -- id'd off of type
Returns:
a tuple of func, args, kwargs
'''
func = f_data[0]
if isinstance(func, (str, unicode)):
func = __get_function(func)
assert callable(func)
arg_chunk = f_data[1:]
args = ()
kwargs = {}
if len(arg_chunk) == 0:
pass
elif len(arg_chunk) == 1:
tmp = arg_chunk[0]
if isinstance(tmp, (list, tuple)):
args = tmp
elif isinstance(tmp, dict):
kwargs.update(tmp)
else:
raise Exception('args mapping failure: %s'%str(arg_chunk))
elif len(arg_chunk) == 2:
for args_item in arg_chunk:
if isinstance(args_item, (list, tuple)) and not args:
args = args_item
elif isinstance(args_item, dict):
kwargs.update(args_item)
else:
raise Exception('args mapping failure: %s'%str(arg_chunk))
else:
raise Exception('args mapping failure: %s'%str(arg_chunk))
return func, args, kwargs
def __get_function(name):
"""Return an attribute from a dotted path name (e.g. "path.to.func")."""
module_name, obj, method = name.rsplit('.', 2)
module = import_module(module_name)
var = getattr(module, obj)
return getattr(var, method)
| timeartist/ufyr | ufyr/do.py | Python | unlicense | 3,735 |
"""
Search indexes for Haystack.
"""
from haystack import indexes
from sphinxdoc.models import Document
class DocumentIndex(indexes.SearchIndex, indexes.Indexable):
"""Index for :class:`~sphinxdoc.models.Document`.
"""
text = indexes.CharField(document=True, use_template=True)
title = indexes.CharField(model_attr='title')
project = indexes.IntegerField(model_attr='project_id')
def get_model(self):
return Document
| kamni/django-sphinxdoc | sphinxdoc/search_indexes.py | Python | mit | 455 |
# -*- coding: utf-8 -*-
"""Usage:
acli account
-h, --help
"""
from __future__ import (absolute_import, print_function, unicode_literals)
from docopt import docopt
if __name__ == '__main__':
print(docopt(__doc__))
| jonhadfield/acli | lib/acli/commands/account.py | Python | mit | 227 |
from __future__ import print_function
from string import Template
# ignore these, they're in Hw++ already # TODO reset Hw++ settings instead
SMPARTICLES = {
1:'d',
2:'u',
3:'s',
4:'c',
5:'b',
6:'t', # think about this one later
11:'e-',
12:'nu_e',
13:'mu-',
14:'nu_mu',
15:'tau-',
16:'nu_tau',
21:'g',
22:'gamma',
23:'Z0',
24:'W+',
-1:'dbar',
-2:'ubar',
-3:'sbar',
-4:'cbar',
-5:'bbar',
-6:'tbar',
-11:'e+',
-12:'nu_ebar',
-13:'mu+',
-14:'nu_mubar',
-15:'tau+',
-16:'nu_taubar',
-24:'W-',
}
particleT = Template(
"""
create ThePEG::ParticleData $name
# values set to 999999 are recalculated later from other model parameters
setup $name $pdg_code $name $mass $width $wcut $ctau $charge $color $spin 0
"""
)
class ParticleConverter:
'Convert a FR particle to extract the information ThePEG needs.'
def __init__(self,p,parmsubs,modelparameters):
self.name = p.name
self.pdg_code = p.pdg_code
self.spin = p.spin
self.color = p.color
if self.color == 1:
self.color = 0
self.selfconjugate = 0
self.mass = parmsubs[str(p.mass)]
if type(self.mass) == str:
value = modelparameters[self.mass]
try:
value = value.real
except:
pass
newname = '%s_ABS' % self.mass
self.mass = '${%s}' % newname
modelparameters[newname] = abs(value)
else:
try:
self.mass = self.mass.real
except:
pass
self.mass = 999999. # abs(self.mass)
hbarc = 197.3269631e-15 # GeV mm (I hope ;-) )
self.width = parmsubs[str(p.width)]
if type(self.width) == str:
width = modelparameters[self.width]
ctau = (hbarc / width) if width != 0 else 0
newname = '%s_CTAU' % self.width
self.ctau = '${%s}' % newname
modelparameters[newname] = ctau
wcut = 10 * width
newname = '%s_WCUT' % self.width
self.wcut = '${%s}' % newname
modelparameters[newname] = wcut
self.width = '${%s}' % self.width
else:
self.ctau = 999999. # (hbarc / self.width) if self.width != 0 else 0
self.wcut = 999999. #10.0 * self.width
self.width = 999999. # was blank line before
self.charge = int(3 * p.charge)
def subs(self):
return self.__dict__
def check_effective_vertex(FR,p,ig) :
for vertex in FR.all_vertices:
if(len(vertex.particles) != 3) : continue
if(p not in vertex.particles ) : continue
ng=0
for part in vertex.particles :
if(part.pdg_code==ig) : ng+=1
if(ng==2) :
return False
return True
def thepeg_particles(FR,parameters,modelname,modelparameters,forbidden_names,hw_higgs):
plist = []
antis = {}
names = []
splittings = []
done_splitting_QCD=[]
done_splitting_QED=[]
for p in FR.all_particles:
if p.spin == -1:
continue
gsnames = ['goldstone',
'goldstoneboson',
'GoldstoneBoson']
def gstest(name):
try:
return getattr(p,name)
except AttributeError:
return False
if any(map(gstest, gsnames)):
continue
if p.pdg_code in SMPARTICLES:
continue
if p.pdg_code == 25 and not hw_higgs:
plist.append(
"""
set /Herwig/Particles/h0:Mass_generator NULL
set /Herwig/Particles/h0:Width_generator NULL
rm /Herwig/Masses/HiggsMass
rm /Herwig/Widths/hWidth
"""
)
if p.name in forbidden_names:
print('RENAMING PARTICLE',p.name,'as ',p.name+'_UFO')
p.name +="_UFO"
subs = ParticleConverter(p,parameters,modelparameters).subs()
if not (p.pdg_code == 25 and hw_higgs) :
plist.append( particleT.substitute(subs) )
pdg, name = subs['pdg_code'], subs['name']
names.append(name)
if -pdg in antis:
plist.append( 'makeanti %s %s\n' % (antis[-pdg], name) )
elif not (p.pdg_code == 25 and hw_higgs) :
plist.append( 'insert /Herwig/NewPhysics/NewModel:DecayParticles 0 %s\n' % name )
plist.append( 'insert /Herwig/Shower/ShowerHandler:DecayInShower 0 %s # %s' % (abs(pdg), name) )
antis[pdg] = name
selfconjugate = 1
class SkipMe(Exception):
pass
def spin_name(s):
spins = { 1 : 'Zero',
2 : 'Half',
3 : 'One' }
if s not in spins:
raise SkipMe()
else:
return spins[s]
def col_name(c):
cols = { 3 : 'Triplet',
6 : 'Sextet',
8 : 'Octet' }
return cols[c]
try:
# QCD splitting functions
if p.color in [3,6,8] and abs(pdg) not in done_splitting_QCD: # which colors?
done_splitting_QCD.append(abs(pdg))
splitname = '{name}SplitFnQCD'.format(name=p.name)
sudname = '{name}SudakovQCD'.format(name=p.name)
splittings.append(
"""
create Herwig::{s}{s}OneSplitFn {name}
set {name}:InteractionType QCD
set {name}:ColourStructure {c}{c}Octet
cp /Herwig/Shower/SudakovCommon {sudname}
set {sudname}:SplittingFunction {name}
do /Herwig/Shower/SplittingGenerator:AddFinalSplitting {pname}->{pname},g; {sudname}
""".format(s=spin_name(p.spin), name=splitname,
c=col_name(p.color), pname=p.name, sudname=sudname)
)
except SkipMe:
pass
# QED splitting functions
try:
if p.charge != 0 and abs(pdg) not in done_splitting_QED:
done_splitting_QED.append(abs(pdg))
splitname = '{name}SplitFnQED'.format(name=p.name)
sudname = '{name}SudakovQED'.format(name=p.name)
splittings.append(
"""
create Herwig::{s}{s}OneSplitFn {name}
set {name}:InteractionType QED
set {name}:ColourStructure ChargedChargedNeutral
cp /Herwig/Shower/SudakovCommon {sudname}
set {sudname}:SplittingFunction {name}
set {sudname}:Alpha /Herwig/Shower/AlphaQED
do /Herwig/Shower/SplittingGenerator:AddFinalSplitting {pname}->{pname},gamma; {sudname}
""".format(s=spin_name(p.spin), name=splitname, pname=p.name, sudname=sudname)
)
except SkipMe:
pass
if p.charge == 0 and p.color == 1 and p.spin == 1 and not (p.pdg_code == 25 and hw_higgs) :
if(check_effective_vertex(FR,p,21)) :
plist.append(
"""
insert /Herwig/{ModelName}/V_GenericHGG:Bosons 0 {pname}
""".format(pname=p.name, ModelName=modelname)
)
if(check_effective_vertex(FR,p,22)) :
plist.append(
"""
insert /Herwig/{ModelName}/V_GenericHPP:Bosons 0 {pname}
""".format(pname=p.name, ModelName=modelname)
)
return ''.join(plist)+''.join(splittings), names
| hep-mirrors/herwig | Models/Feynrules/python/ufo2peg/particles.py | Python | gpl-3.0 | 7,132 |
#!/usr/bin/env python
"""libPoMo.main
===============
This library contains functions that are used by PoMo.
"""
import argparse
import random
from scipy.misc import comb as choose
import libPoMo as lp
import os
import pdb
import time
# define PoMo10 states
codons = ["aaa", "aac", "aag", "aat", "aca", "acc", "acg", "act",
"aga", "agc", "agg", "agt", "ata", "atc", "atg", "att",
"caa", "cac", "cag", "cat", "cca", "ccc", "ccg", "cct",
"cga", "cgc", "cgg", "cgt", "cta", "ctc", "ctg", "ctt",
"gaa", "gac", "gag", "gat", "gca", "gcc", "gcg", "gct",
"gga", "ggc", "ggg", "ggt", "gta", "gtc", "gtg", "gtt",
"taa", "tac", "tag", "tat", "tca", "tcc", "tcg", "tct",
"tga", "tgc"]
nucs = ["A", "C", "G", "T"]
# Define mutation models.
mutmod = {}
mutmod["F81"] = ["global mu=0.01;\n", "mac:=mu;\n", "mag:=mu;\n",
"mat:=mu;\n", "mca:=mu;\n", "mct:=mu;\n",
"mcg:=mu;\n", "mgc:=mu;\n", "mga:=mu;\n",
"mgt:=mu;\n", "mta:=mu;\n", "mtc:=mu;\n",
"mtg:=mu;\n"]
mutmod["HKY"] = ["global kappa=0.01;\n", "global mu=0.01;\n",
"mac:=mu;\n", "mag:=kappa;\n", "mat:=mu;\n",
"mca:=mu;\n", "mct:=kappa;\n", "mcg:=mu;\n",
"mgc:=mu;\n", "mga:=kappa;\n", "mgt:=mu;\n",
"mta:=mu;\n", "mtc:=kappa;\n", "mtg:=mu;\n"]
mutmod["GTR"] = ["global muac=0.01;\n", "global muag=0.01;\n",
"global muat=0.01;\n", "global mucg=0.01;\n",
"global muct=0.01;\n", "global mugt=0.01;\n",
"mac:=muac;\n", "mag:=muag;\n", "mat:=muat;\n",
"mca:=muac;\n", "mct:=muct;\n", "mcg:=mucg;\n",
"mgc:=mucg;\n", "mga:=muag;\n", "mgt:=mugt;\n",
"mta:=muat;\n", "mtc:=muct;\n", "mtg:=mugt;\n"]
mutmod["NONREV"] = ["global mac=0.01;\n", "global mag=0.01;\n",
"global mat=0.01;\n", "global mcg=0.01;\n",
"global mct=0.01;\n", "global mgt=0.01;\n",
"global mca=0.01;\n", "global mga=0.01;\n",
"global mta=0.01;\n", "global mgc=0.01;\n",
"global mtc=0.01;\n", "global mtg=0.01;\n"]
# Define selection models.
selmod = {}
selmod["NoSel"] = ["sc := 0.0;\n", "sa := 0.0;\n", "st := 0.0;\n",
"sg := 0.0;\n"]
selmod["GCvsAT"] = ["global Sgc=0.0001;\n", "sc := Sgc;\n", "sa := 0.0;\n",
"st := 0.0;\n", "sg := Sgc;\n"]
selmod["AllNuc"] = ["global sc=0.0003;\n", "global sg=0.0003;\n",
"sa := 0.0;\n", "global st=0.0001;\n"]
def mutModel(mm):
"""Mutation model **type** for argparse."""
value = str(mm)
if not (mm in mutmod.keys()):
msg = "%r is not a valid mutation model" % mm
raise argparse.ArgumentTypeError(msg)
return value
def selModel(sm):
"""Selection model **type** for argparse."""
value = str(sm)
if not (sm in selmod.keys()):
msg = "%r is not a valid selection model" % sm
raise argparse.ArgumentTypeError(msg)
return value
def dsRatio(dsR):
"""Downsampling ratio **type** for argparse."""
value = float(dsR)
if not (0 < value <= 1):
msg = "%r is not a valid downsampling ratio" % dsR
raise argparse.ArgumentTypeError(msg)
return value
def setGM(gm):
"""Set variable mutation rate, if `gm` is given."""
if gm > 0:
mutgamma = ["global shape;\n",
"category rateCatMut =(" + str(gm) +
", EQUAL, MEAN, GammaDist(_x_,shape,shape), "
"CGammaDist(_x_,shape,shape),0,1e25);\n"]
else:
mutgamma = ["rateCatMut := 1.0;\n"]
return mutgamma
def setGS(gs):
"""Set fixation bias, if `gs` is given."""
if gs > 0:
selgamma = ["global shape2;\n",
"category rateCatSel =(" + str(gs) +
", EQUAL, MEAN, GammaDist(_x_,shape2,shape2), "
"CGammaDist(_x_,shape2,shape2),0,1e25);\n"]
else:
selgamma = ["rateCatSel := 1.0;\n"]
return selgamma
def a(n):
"""Calculate the Watterson's Theta coefficient."""
ret = 0
for i in range(n-1):
ret += (float(1.0)/(i+1))
return ret
def is_number(s):
"""Determine if value is an integer."""
try:
int(s)
return True
except ValueError:
return False
def binom(s, p, n):
"""Binomial Distribution
Calculate the binomial sampling probability (not very efficient,
but not much effieciency is needed with small samples).
"""
prob = (choose(n, s) * p**s * (1-p)**(n-s))
return prob
def probability_matrix(n):
"""Create probability matrices for the HyPhy batch file."""
o = n-1
#ignore values below this threshold (keeps the matrix sparse,
#avoiding increase in computational demands)
lim = 0.0001
s = ""
polys = [[0, 1], [0, 2], [0, 3], [1, 2], [1, 3], [2, 3]]
#write matrix
s += "matrixto"+str(o+1)+" ={\n"
for nucs in range(4):
s += "{"
for l in range(58-1):
if l == nucs:
s += "1.0,"
else:
s += "0.0,"
s += "0.0}\n"
for pol in range(6):
for fre in range(9):
s += "{"
for nucs in range(4):
if nucs == polys[pol][0]:
val = binom(o+1, float(9-fre)/10, o+1)
if val > lim:
s += str(val)+","
else:
s += "0.0,"
elif nucs == polys[pol][1]:
val = binom(o+1, float(fre+1)/10, o+1)
if val > lim:
s += str(val)+","
else:
s += "0.0,"
else:
s += "0.0,"
for pol2 in range(6):
for fre2 in range(9):
if pol == pol2:
if fre2 < o:
val = binom(fre2+1, float(fre+1)/10, o+1)
if val > lim:
s += str(val)
else:
s += "0.0"
else:
s += "0.0"
if pol2*fre2 != 40:
s += ","
else:
s += "}\n"
else:
if pol2*fre2 != 40:
s += "0.0,"
else:
s += "0.0}\n"
s += "};\n\n\n\n"
s += "Model Mto" + str(o+1) + " = (\"matrixto" + \
str(o+1) + "\", Freqs, EXPLICIT_FORM_MATRIX_EXPONENTIAL);\n\n"
return s
def get_species_from_cf_headerline(line):
"""Get the number of species and the names fom a counts format header line.
:param str line: The header line.
:rtype: (int n_species, [str] sp_names)
"""
sp_names = line.split()[2:]
n_species = len(sp_names)
if n_species < 2:
print("Error: Not sufficiently many species (<2).\n")
raise ValueError()
return (n_species, sp_names)
def get_data_from_cf_line(cfStr):
"""Read in the data of a single counts format line.
The return type is a list with the number of samples and a two
dimensional array of the form data[species][nucleotide], where
species is the index of the species and nucleotide is the index of
the nucleotide (0,1,2 or 3 for a,c,g and t, respectively).
:param cfStr CFStream: The CFStream pointing to the line to be
read in.
:rtype: ([int] n_samples, [[int]] data)
"""
n_samples = []
data = []
for i in range(cfStr.nIndiv):
q = []
summ = 0
for j in range(4):
q.append(int(cfStr.countsL[i][j]))
summ += q[j]
n_samples.append(summ)
data.append(q)
return (n_samples, data)
def read_data_write_HyPhy_input(fn, N, thresh, path_bf,
muts, mutgamma,
sels, selgamma,
PoModatafile, PoModatafile_cons,
theta=None, vb=None):
"""Read the count data and write the HyPhy input file.
The provided filename has to point to a data file in counts format
(cf. :doc:`cf <cf>`). The data will be downsampled if necessary
and the HyPhy batch and input files will be written. The number
of species, the species names, the number of species samples and
the theta value (usr_def) will be returned in a tuple.
:param str fn: Counts format file name.
:param int N: Virtual population size.
:param float thresh: Trheshold of data discard for downsampling.
:param str path_bf: Path to the HyPhy batch files
:param str muts: Mutation model (:func:`mutModel`).
:param str mutgamma: Gamma of the mutation model (:func:`setGM`).
:param str sels: Selection model (:func:`selModel`).
:param str selgamma: Gamma of selection model (:func:`setGS`).
:param str PoModatafile: Path to HyPhy input file.
:param str PoModatafile_cons: Path to HyPhy input file.
:param Boolean vb: Verbosity.
:rtype: (int n_species, [str] sp_names, [str] sp_samples, Boolean all_one,
float usr_def)
"""
# define variables
# number of species
n_species = 0
# species names
sp_names = []
# sample size of each species
sp_samples = []
# actual data; it is a 3-dimensional array sp_data[species][pos][base]
sp_data = []
# Check input file format. If format is not counts file, convert
# the file to counts format. I have decided to do this because
# for large files, a lot of memory is needed to traverse fasta
# files and the counts file type seems to be better.
# Verbose HYPHY output only with -vv or more.
if (vb is None) or (vb == 1):
vbHyphy = None
if vb is not None:
print("Starting to read input file.")
try:
cfStr = lp.cf.CFStream(fn)
except lp.cf.NotACountsFormatFileError:
print(fn + " is not in counts format.")
print("Assuming fasta file format.")
print("Convert fasta to counts.")
outFN = os.path.basename(fn).split(".")[0] + ".cf"
# absOutFN = os.path.abspath(fn).split(".")[0] + ".cf"
# pdb.set_trace()
lp.cf.fasta_to_cf(fn, outFN)
print("Created counts file:", outFN)
print("""This file will not be deleted after the run. If you want to avoid
repeated file conversions, please run PoMo with counts
files. File conversion scripts are provided with PoMo in the
scripts folder.""")
print("")
fn = outFN
cfStr = lp.cf.CFStream(fn)
# Assign species names (first two columns are Chrom and Pos).
# (n_species, sp_names) = get_species_from_cf_headerline(line)
n_species = cfStr.nIndiv
sp_names = cfStr.indivL
# Initialize the number of species samples to 0.
for i in range(n_species):
sp_data.append([])
sp_samples.append(0)
# Read in the data.
leng = 0
while True:
leng += 1
(n_samples, data) = get_data_from_cf_line(cfStr)
# Update sp_data and the number of samples.
for i in range(n_species):
sp_data[i].append(data[i])
if n_samples[i] > sp_samples[i]:
sp_samples[i] = n_samples[i]
try:
cfStr.read_next_pos()
except ValueError:
break
if vb is not None:
print("Count file has been read.")
# Sites where some species have coverage 0 are removed
to_remove = []
for i in range(leng):
total = 1
for s in range(n_species):
summ = 0
for d in range(4):
summ += sp_data[s][i][d]
if summ == 0:
total = 0
break
if total == 0:
to_remove.append(i)
summ = 0
for i in range(len(to_remove)):
for s in range(n_species):
sp_data[s].pop(to_remove[i]-summ)
summ += 1
# Debugging point to improve memory.
# pdb.set_trace()
# Now, downsample if necessary
sp_samples2 = []
for i in range(n_species):
if sp_samples[i] > N:
sp_samples2.append(N)
if (vb is not None):
print("Downsampling ", cfStr.indivL[i], ".", sep="")
else:
if (vb is not None):
print(cfStr.indivL[i], "does not need to be downsampled.")
sp_samples2.append(sp_samples[i])
advantages = {}
covered = 0
for i in range(len(sp_data[0])):
summs = []
newlims = []
cov = 1
for s in range(n_species):
summs.append(0)
newlims.append(sp_samples2[s])
for d in range(4):
summs[s] += sp_data[s][i][d]
if summs[s] < sp_samples2[s]:
newlims[s] = summs[s]
cov = 0
limkey = ""
for ne in range(len(newlims)):
limkey += (str(newlims[ne])+":")
if cov == 1:
covered += 1
elif limkey in advantages.keys():
advantages[limkey] += 1
else:
advantages[limkey] = 1
ke = list(advantages)
while float(covered)/leng < thresh:
increments = []
advs = []
for s in range(n_species):
advs.append(0)
increments.append(1)
while advs[s] == 0:
for k in range(len(ke)):
kl = ke[k].split(":")
valid = 1
for s2 in range(n_species):
if s2 != s and int(kl[s2]) < sp_samples2[s2]:
valid = 0
if valid == 1 and int(kl[s]) >= \
sp_samples2[s] - increments[s] \
and int(kl[s]) < sp_samples2[s]:
advs[s] += advantages[ke[k]]
if advs[s] == 0:
if increments[s] < sp_samples2[s] - 1:
increments[s] += 1
else:
break
max_ad = 0
max_ind = -1
for s in range(n_species):
if advs[s] > max_ad:
max_ad = advs[s]
max_ind = s
if max_ad == 0:
print("Downsampling with threshold " + str(thresh) +
" reached an empasse. "
"Please lower the threshold using option "
"--DS, change downsampling strategy, "
"or ask for assistance!\n")
exit()
sp_samples2[max_ind] = sp_samples2[max_ind] - increments[max_ind]
covered += max_ad
sp_samples = sp_samples2
# Sites where some species have not sufficient coverage are removed
to_remove = []
for i in range(len(sp_data[0])):
total = 1
for s in range(n_species):
summ = 0
for d in range(4):
summ += sp_data[s][i][d]
if summ < sp_samples[s]:
total = 0
break
if total == 0:
to_remove.append(i)
summ = 0
for i in range(len(to_remove)):
for s in range(n_species):
sp_data[s].pop(to_remove[i]-summ)
summ += 1
leng = len(sp_data[0])
print("Number of species: ", str(n_species), ".", sep="")
print("Sample sizes effectively used: ", sp_samples, ".", sep="")
if (vb is not None):
print("Names of species: ", cfStr.indivL, ".", sep="")
all_one = True
for i in range(n_species):
if sp_samples[i] != 1:
all_one = False
if sp_samples[i] > N:
print("\n\n\nWarning: the number of samples " +
str(sp_samples[i]) +
" is bigger than the virtual population size " + str(N) +
". The considered species will be downsampled to " + str(N) +
". This is usually not a problem, "
"but if you want to avoid this, "
"if possible please increase the virtual population size."
"\n\n\n")
if all_one is True:
# Check if theta was given on command line and set it
# accordingly.
if (theta is None):
usr_def = float(input("""\n\n\nAll species have a sample size of
1, therefore there is no information at the population level,
which is required by PoMo. So, please enter a guessed or otherwise
estimated value for theta (population diversity):\n"""))
else:
usr_def = theta
else:
usr_def = 0.01
if (vb is not None):
print("Theta has been set to be ", usr_def, ".", sep="")
cfStr.close()
if n_species < 2:
print("Error: cannot calculate a tree with fewer than 2 species.")
exit()
# default options
# TODO Why are they not needed
# sampling = 1 # noqa
# onlysampling = 1 # noqa
# mbin = 0 # noqa
# Writing the HyPhy batch file for PoMo
newsamfile = open("PoMo10_root_only_sampling_preliminary_used.bf",
"w")
samfile = open(path_bf + "PoMo10_root_only_sampling_preliminary.bf")
line = "\n"
while line != "/*Define global parameters*/\n":
line = samfile.readline()
linelist = line.split()
newsamfile.write(line)
for i in range(23):
line = samfile.readline()
for i in range(len(muts)):
newsamfile.write(muts[i])
for i in range(len(sels)):
newsamfile.write(sels[i])
for i in range(len(mutgamma)):
newsamfile.write(mutgamma[i])
for i in range(len(selgamma)):
newsamfile.write(selgamma[i])
while line != "/*Find Root*/\n":
line = samfile.readline()
linelist = line.split()
if len(linelist) > 1 and linelist[0] == "fprintf" \
and linelist[1] == "(stdout," and vbHyphy is None:
newsamfile.write("/*"+line.replace("\n", "")+"*/\n")
else:
newsamfile.write(line)
samples_num = []
for i in range(n_species):
if not (sp_samples[i] in samples_num):
newsamfile.write(lp.main.probability_matrix(sp_samples[i]))
samples_num.append(sp_samples[i])
newsamfile.write("\n\n\n")
line = "\n"
while line != "":
line = samfile.readline()
linelist = line.split()
if line.split("=")[0] == "\tNsamples":
newsamfile.write("\tNsamples={{\"")
for i in range(n_species-1):
newsamfile.write(str(sp_samples[i])+"\"}{\"")
newsamfile.write(str(sp_samples[n_species-1])+"\"}};\n")
elif len(linelist) > 1 and linelist[0] == "fprintf" \
and linelist[1] == "(stdout," and vbHyphy is None: # noqa
newsamfile.write("/*"+line.replace("\n", "")+"*/\n")
else:
newsamfile.write(line)
samfile.close()
newsamfile.close()
# Writing the HyPhy batch file for PoMo with NNI
newsamfile = open("PoMo10_NNI_sampling_preliminary_used.bf", "w")
samfile = open(path_bf + "PoMo10_NNI_sampling.bf")
line = "\n"
while line != "/*Define global parameters*/\n":
line = samfile.readline()
linelist = line.split()
newsamfile.write(line)
for i in range(23):
line = samfile.readline()
for i in range(len(muts)):
newsamfile.write(muts[i])
for i in range(len(sels)):
newsamfile.write(sels[i])
for i in range(len(mutgamma)):
newsamfile.write(mutgamma[i])
for i in range(len(selgamma)):
newsamfile.write(selgamma[i])
while line != "/*pre-ML*/\n":
line = samfile.readline()
linelist = line.split()
if len(linelist) > 1 and linelist[0] == "fprintf" \
and linelist[1] == "(stdout," and vbHyphy is None:
newsamfile.write("/*" + line.replace("\n", "") + "*/\n")
else:
newsamfile.write(line)
samples_num = []
for i in range(n_species):
if not (sp_samples[i] in samples_num):
newsamfile.write(lp.main.probability_matrix(sp_samples[i]))
samples_num.append(sp_samples[i])
newsamfile.write("\n\n\n")
line = "\n"
while line != "":
line = samfile.readline()
linelist = line.split()
if line.split("=")[0] == "\tNsamples":
newsamfile.write("\tNsamples={{\"")
for i in range(n_species-1):
newsamfile.write(str(sp_samples[i])+"\"}{\"")
newsamfile.write(str(sp_samples[n_species-1])+"\"}};\n")
elif len(linelist) > 1 and linelist[0] == "fprintf" \
and linelist[1] == "(stdout," and vbHyphy is None: # noqa
newsamfile.write("/*"+line.replace("\n", "")+"*/\n")
else:
newsamfile.write(line)
samfile.close()
newsamfile.close()
# creating HyPhy input file
for l in range(n_species):
PoModatafile.write(">s" + str(l+1) + "\n")
PoModatafile_cons.write(">s" + str(l+1) + "\n")
for m in range(leng):
count = sp_data[l][m]
p = count
maxcount = 0
i2 = -1
for j2 in range(4):
if p[j2] > maxcount:
i1 = j2
maxcount = p[j2]
refs3 = codons[i1]
maxcount = 0
for j2 in range(4):
if j2 != i1 and p[j2] > maxcount:
i2 = j2
maxcount = p[j2]
if i2 == -1:
refs = codons[i1]
# refs2 = codons[i1]
else:
if p[i1]+p[i2] > sp_samples[l]:
count1 = p[i1]
count2 = p[i2]
newcount1 = 0
newcount2 = 0
for j5 in range(sp_samples[l]):
num = random.random()
if num < float(count1)/(count1+count2):
newcount1 += 1
count1 = count1 - 1
else:
newcount2 += 1
count2 = count2 - 1
else:
newcount1 = p[i1]
newcount2 = p[i2]
if i1 > i2:
i3 = i1
i1 = i2
i2 = i3
newcount3 = newcount1
newcount1 = newcount2
newcount2 = newcount3
if newcount1 == sp_samples[l]:
refs = codons[i1]
# refs2 = codons[i1]
elif newcount2 == sp_samples[l]:
refs = codons[i2]
# refs2 = codons[i2]
else:
pol = 0
if i1 == 1:
pol = 3
if i1 == 2:
pol = 5
pol += (i2-(i1+1))
p1 = newcount2 - 1
pos = 4+pol*(N-1)+p1
refs = codons[pos]
PoModatafile.write(refs)
PoModatafile_cons.write(refs3)
PoModatafile.write("\n")
PoModatafile_cons.write("\n")
PoModatafile.close()
PoModatafile_cons.close()
# Debugging point if necessary.
# pdb.set_trace()
return (n_species, sp_names, sp_samples, all_one, usr_def)
def timeStr():
"""Time in human readable format."""
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
| pomo-dev/PoMo | libPoMo/main.py | Python | gpl-2.0 | 23,843 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2012:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
# Gregory Starck, g.starck@gmail.com
# Hartmut Goebel, h.goebel@goebel-consult.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
"""
This class is for looking in the configuration for auth
"""
import os
#import crypt
from shinken.basemodule import BaseModule
from shinken.log import logger
properties = {
'daemons': ['webui', 'skonf'],
'type': 'cfg_password_webui'
}
# called by the plugin manager
def get_instance(plugin):
logger.info("[Cfg Password UI] Get an CFG/Password UI module for plugin %s" % plugin.get_name())
instance = Cfg_Password_Webui(plugin)
return instance
class Cfg_Password_Webui(BaseModule):
def __init__(self, modconf):
BaseModule.__init__(self, modconf)
# Try to connect if we got true parameter
def init(self):
logger.info("[Cfg Password UI] Trying to initialize the CFG/Password auth")
# To load the webui application
def load(self, app):
self.app = app
def check_auth(self, user, password):
c = self.app.datamgr.get_contact(user)
# Ok, if the user is bad, bailout
if not c:
return False
logger.info("[Cfg Password UI] User %s try to init" % user)
p = None
# In skonf, it's dummy object
if isinstance(c, dict):
p = c.get('password', 'NOPASSWORDSET')
else:
p = c.password
return p == password and p != 'NOPASSWORDSET'
| wbsavage/shinken | shinken/modules/cfg_password_ui_.py | Python | agpl-3.0 | 2,226 |
#!/usr/bin/env python
__author__ = 'ilkin safarli'
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
from FrequencyTransformer import *
from sklearn.datasets import load_files
from RScore import RScore
class Classifier(RScore):
"""
This class classifies text into 3 levels by using Naive Bayes classifier.
Features: english word frequency and tf-idf score
*** All methods of RScore class can be invoked too.
"""
def __init__(self):
self.categories = ["elementary", "intermediate", "advanced"]
self.count_vect = CountVectorizer()
self.tf_transformer = TfidfTransformer(use_idf=True)
RScore.__init__(self)
def train(self):
"""
This method trains the classifier by using a given data set.
:return: trained classifier
"""
self.train_data = load_files(container_path='train', description=None, categories=self.categories,shuffle=True)
train_counts = self.count_vect.fit_transform(self.train_data.data)
array = train_counts.toarray()
self.freq_transformer = FrequencyTransformer(self.count_vect)
self.combined_features = FeatureUnion([("frequency", self.freq_transformer), ("tfidf", self.tf_transformer)]).fit_transform(array)
self.classifier = MultinomialNB()
self.classifier.fit(self.combined_features, self.train_data.target)
def predict(self, file_name, file_type="text"):
"""
Predicts level of given text file or files.
:param file_name: name of directory or text file.
:return: predicted level
"""
if file_type == "text":
text = [self.clear_text(file_name)]
else:
text = load_files(container_path=file_name, description=None, categories=self.categories,encoding="utf8").data
new_counts = self.count_vect.transform(text)
array = new_counts.toarray()
res = FeatureUnion([("frequency", self.freq_transformer), ("tfidf", self.tf_transformer)]).transform(array)
predicted = self.classifier.predict(res)
for doc, category in zip(text, predicted):
print('%r => %s' % (doc, self.train_data.target_names[category]))
return zip(text, predicted) | kinimesi/rscore | Classifier.py | Python | apache-2.0 | 2,128 |
from collections import namedtuple
listeners = {'*': []}
SmykowskiMessage = namedtuple('SmykowskiMessage', ['topic', 'message'])
def publish(topic, message):
smykowski_message = SmykowskiMessage(topic=topic, message=message)
for listener in listeners[topic]:
listener(smykowski_message)
for listener in listeners['*']:
listener(smykowski_message)
def listeners_for_topic(topic):
return listeners[topic]
def register_listener(topic, callback):
if topic not in listeners.keys():
listeners[topic] = []
listeners[topic].append(callback)
| mahnve/smykowski | smykowski.py | Python | mit | 589 |
from hachoir_core.field import (MissingField, BasicFieldSet, Field, ParserError,
createRawField, createNullField, createPaddingField, FakeArray)
from hachoir_core.dict import Dict, UniqKeyError
from hachoir_core.error import HACHOIR_ERRORS
from hachoir_core.tools import lowerBound
import hachoir_core.config as config
class GenericFieldSet(BasicFieldSet):
"""
Ordered list of fields. Use operator [] to access fields using their
name (field names are unique in a field set, but not in the whole
document).
Class attributes:
- endian: Bytes order (L{BIG_ENDIAN} or L{LITTLE_ENDIAN}). Optional if the
field set has a parent ;
- static_size: (optional) Size of FieldSet in bits. This attribute should
be used in parser of constant size.
Instance attributes/methods:
- _fields: Ordered dictionnary of all fields, may be incomplete
because feeded when a field is requested ;
- stream: Input stream used to feed fields' value
- root: The root of all field sets ;
- __len__(): Number of fields, may need to create field set ;
- __getitem__(): Get an field by it's name or it's path.
And attributes inherited from Field class:
- parent: Parent field (may be None if it's the root) ;
- name: Field name (unique in parent field set) ;
- value: The field set ;
- address: Field address (in bits) relative to parent ;
- description: A string describing the content (can be None) ;
- size: Size of field set in bits, may need to create field set.
Event handling:
- "connectEvent": Connect an handler to an event ;
- "raiseEvent": Raise an event.
To implement a new field set, you need to:
- create a class which inherite from FieldSet ;
- write createFields() method using lines like:
yield Class(self, "name", ...) ;
- and maybe set endian and static_size class attributes.
"""
_current_size = 0
def __init__(self, parent, name, stream, description=None, size=None):
"""
Constructor
@param parent: Parent field set, None for root parser
@param name: Name of the field, have to be unique in parent. If it ends
with "[]", end will be replaced with "[new_id]" (eg. "raw[]"
becomes "raw[0]", next will be "raw[1]", and then "raw[2]", etc.)
@type name: str
@param stream: Input stream from which data are read
@type stream: L{InputStream}
@param description: Optional string description
@type description: str|None
@param size: Size in bits. If it's None, size will be computed. You
can also set size with class attribute static_size
"""
BasicFieldSet.__init__(self, parent, name, stream, description, size)
self._fields = Dict()
self._field_generator = self.createFields()
self._array_cache = {}
self.__is_feeding = False
def array(self, key):
try:
return self._array_cache[key]
except KeyError:
array = FakeArray(self, key)
self._array_cache[key] = array
return self._array_cache[key]
def reset(self):
"""
Reset a field set:
* clear fields ;
* restart field generator ;
* set current size to zero ;
* clear field array count.
But keep: name, value, description and size.
"""
BasicFieldSet.reset(self)
self._fields = Dict()
self._field_generator = self.createFields()
self._current_size = 0
self._array_cache = {}
def __str__(self):
return '<%s path=%s, current_size=%s, current length=%s>' % \
(self.__class__.__name__, self.path, self._current_size, len(self._fields))
def __len__(self):
"""
Returns number of fields, may need to create all fields
if it's not done yet.
"""
if self._field_generator is not None:
self._feedAll()
return len(self._fields)
def _getCurrentLength(self):
return len(self._fields)
current_length = property(_getCurrentLength)
def _getSize(self):
if self._size is None:
self._feedAll()
return self._size
size = property(_getSize, doc="Size in bits, may create all fields to get size")
def _getCurrentSize(self):
assert not(self.done)
return self._current_size
current_size = property(_getCurrentSize)
eof = property(lambda self: self._checkSize(self._current_size + 1, True) < 0)
def _checkSize(self, size, strict):
field = self
while field._size is None:
if not field._parent:
assert self.stream.size is None
if not strict:
return None
if self.stream.sizeGe(size):
return 0
break
size += field._address
field = field._parent
return field._size - size
autofix = property(lambda self: self.root.autofix)
def _addField(self, field):
"""
Add a field to the field set:
* add it into _fields
* update _current_size
May raise a StopIteration() on error
"""
if not issubclass(field.__class__, Field):
raise ParserError("Field type (%s) is not a subclass of 'Field'!"
% field.__class__.__name__)
assert isinstance(field._name, str)
if field._name.endswith("[]"):
self.setUniqueFieldName(field)
if config.debug:
self.info("[+] DBG: _addField(%s)" % field.name)
# required for the msoffice parser
if field._address != self._current_size:
self.warning("Fix address of %s to %s (was %s)" %
(field.path, self._current_size, field._address))
field._address = self._current_size
ask_stop = False
# Compute field size and check that there is enough place for it
self.__is_feeding = True
try:
field_size = field.size
except HACHOIR_ERRORS, err:
if field.is_field_set and field.current_length and field.eof:
self.warning("Error when getting size of '%s': %s" % (field.name, err))
field._stopFeeding()
ask_stop = True
else:
self.warning("Error when getting size of '%s': delete it" % field.name)
self.__is_feeding = False
raise
self.__is_feeding = False
# No more place?
dsize = self._checkSize(field._address + field.size, False)
if (dsize is not None and dsize < 0) or (field.is_field_set and field.size <= 0):
if self.autofix and self._current_size:
self._fixFieldSize(field, field.size + dsize)
else:
raise ParserError("Field %s is too large!" % field.path)
self._current_size += field.size
try:
self._fields.append(field._name, field)
except UniqKeyError, err:
self.warning("Duplicate field name " + unicode(err))
field._name += "[]"
self.setUniqueFieldName(field)
self._fields.append(field._name, field)
if ask_stop:
raise StopIteration()
def _fixFieldSize(self, field, new_size):
if new_size > 0:
if field.is_field_set and 0 < field.size:
field._truncate(new_size)
return
# Don't add the field <=> delete item
if self._size is None:
self._size = self._current_size + new_size
self.warning("[Autofix] Delete '%s' (too large)" % field.path)
raise StopIteration()
def _getField(self, name, const):
field = Field._getField(self, name, const)
if field is None:
if name in self._fields:
field = self._fields[name]
elif self._field_generator is not None and not const:
field = self._feedUntil(name)
return field
def getField(self, key, const=True):
if isinstance(key, (int, long)):
if key < 0:
raise KeyError("Key must be positive!")
if not const:
self.readFirstFields(key+1)
if len(self._fields.values) <= key:
raise MissingField(self, key)
return self._fields.values[key]
return Field.getField(self, key, const)
def _truncate(self, size):
assert size > 0
if size < self._current_size:
self._size = size
while True:
field = self._fields.values[-1]
if field._address < size:
break
del self._fields[-1]
self._current_size = field._address
size -= field._address
if size < field._size:
if field.is_field_set:
field._truncate(size)
else:
del self._fields[-1]
field = createRawField(self, size, "raw[]")
self._fields.append(field._name, field)
self._current_size = self._size
else:
assert size < self._size or self._size is None
self._size = size
if self._size == self._current_size:
self._field_generator = None
def _deleteField(self, index):
field = self._fields.values[index]
size = field.size
self._current_size -= size
del self._fields[index]
return field
def _fixLastField(self):
"""
Try to fix last field when we know current field set size.
Returns new added field if any, or None.
"""
assert self._size is not None
# Stop parser
message = ["stop parser"]
self._field_generator = None
# If last field is too big, delete it
while self._size < self._current_size:
field = self._deleteField(len(self._fields)-1)
message.append("delete field %s" % field.path)
assert self._current_size <= self._size
# If field size current is smaller: add a raw field
size = self._size - self._current_size
if size:
field = createRawField(self, size, "raw[]")
message.append("add padding")
self._current_size += field.size
self._fields.append(field._name, field)
else:
field = None
message = ", ".join(message)
self.warning("[Autofix] Fix parser error: " + message)
assert self._current_size == self._size
return field
def _stopFeeding(self):
new_field = None
if self._size is None:
if self._parent:
self._size = self._current_size
elif self._size != self._current_size:
if self.autofix:
new_field = self._fixLastField()
else:
raise ParserError("Invalid parser \"%s\" size!" % self.path)
self._field_generator = None
return new_field
def _fixFeedError(self, exception):
"""
Try to fix a feeding error. Returns False if error can't be fixed,
otherwise returns new field if any, or None.
"""
if self._size is None or not self.autofix:
return False
self.warning(unicode(exception))
return self._fixLastField()
def _feedUntil(self, field_name):
"""
Return the field if it was found, None else
"""
if self.__is_feeding \
or (self._field_generator and self._field_generator.gi_running):
self.warning("Unable to get %s (and generator is already running)"
% field_name)
return None
try:
while True:
field = self._field_generator.next()
self._addField(field)
if field.name == field_name:
return field
except HACHOIR_ERRORS, err:
if self._fixFeedError(err) is False:
raise
except StopIteration:
self._stopFeeding()
return None
def readMoreFields(self, number):
"""
Read more number fields, or do nothing if parsing is done.
Returns number of new added fields.
"""
if self._field_generator is None:
return 0
oldlen = len(self._fields)
try:
for index in xrange(number):
self._addField( self._field_generator.next() )
except HACHOIR_ERRORS, err:
if self._fixFeedError(err) is False:
raise
except StopIteration:
self._stopFeeding()
return len(self._fields) - oldlen
def _feedAll(self):
if self._field_generator is None:
return
try:
while True:
field = self._field_generator.next()
self._addField(field)
except HACHOIR_ERRORS, err:
if self._fixFeedError(err) is False:
raise
except StopIteration:
self._stopFeeding()
def __iter__(self):
"""
Create a generator to iterate on each field, may create new
fields when needed
"""
try:
done = 0
while True:
if done == len(self._fields):
if self._field_generator is None:
break
self._addField( self._field_generator.next() )
for field in self._fields.values[done:]:
yield field
done += 1
except HACHOIR_ERRORS, err:
field = self._fixFeedError(err)
if isinstance(field, Field):
yield field
elif hasattr(field, '__iter__'):
for f in field:
yield f
elif field is False:
raise
except StopIteration:
field = self._stopFeeding()
if isinstance(field, Field):
yield field
elif hasattr(field, '__iter__'):
for f in field:
yield f
def _isDone(self):
return (self._field_generator is None)
done = property(_isDone, doc="Boolean to know if parsing is done or not")
#
# FieldSet_SeekUtility
#
def seekBit(self, address, name="padding[]",
description=None, relative=True, null=False):
"""
Create a field to seek to specified address,
or None if it's not needed.
May raise an (ParserError) exception if address is invalid.
"""
if relative:
nbits = address - self._current_size
else:
nbits = address - (self.absolute_address + self._current_size)
if nbits < 0:
raise ParserError("Seek error, unable to go back!")
if 0 < nbits:
if null:
return createNullField(self, nbits, name, description)
else:
return createPaddingField(self, nbits, name, description)
else:
return None
def seekByte(self, address, name="padding[]", description=None, relative=True, null=False):
"""
Same as seekBit(), but with address in byte.
"""
return self.seekBit(address * 8, name, description, relative, null=null)
#
# RandomAccessFieldSet
#
def replaceField(self, name, new_fields):
# TODO: Check in self and not self.field
# Problem is that "generator is already executing"
if name not in self._fields:
raise ParserError("Unable to replace %s: field doesn't exist!" % name)
assert 1 <= len(new_fields)
old_field = self[name]
total_size = sum( (field.size for field in new_fields) )
if old_field.size != total_size:
raise ParserError("Unable to replace %s: "
"new field(s) hasn't same size (%u bits instead of %u bits)!"
% (name, total_size, old_field.size))
field = new_fields[0]
if field._name.endswith("[]"):
self.setUniqueFieldName(field)
field._address = old_field.address
if field.name != name and field.name in self._fields:
raise ParserError(
"Unable to replace %s: name \"%s\" is already used!"
% (name, field.name))
self._fields.replace(name, field.name, field)
self.raiseEvent("field-replaced", old_field, field)
if 1 < len(new_fields):
index = self._fields.index(new_fields[0].name)+1
address = field.address + field.size
for field in new_fields[1:]:
if field._name.endswith("[]"):
self.setUniqueFieldName(field)
field._address = address
if field.name in self._fields:
raise ParserError(
"Unable to replace %s: name \"%s\" is already used!"
% (name, field.name))
self._fields.insert(index, field.name, field)
self.raiseEvent("field-inserted", index, field)
index += 1
address += field.size
def getFieldByAddress(self, address, feed=True):
"""
Only search in existing fields
"""
if feed and self._field_generator is not None:
self._feedAll()
if address < self._current_size:
i = lowerBound(self._fields.values, lambda x: x.address + x.size <= address)
if i is not None:
return self._fields.values[i]
return None
def writeFieldsIn(self, old_field, address, new_fields):
"""
Can only write in existing fields (address < self._current_size)
"""
# Check size
total_size = sum( field.size for field in new_fields )
if old_field.size < total_size:
raise ParserError( \
"Unable to write fields at address %s " \
"(too big)!" % (address))
# Need padding before?
replace = []
size = address - old_field.address
assert 0 <= size
if 0 < size:
padding = createPaddingField(self, size)
padding._address = old_field.address
replace.append(padding)
# Set fields address
for field in new_fields:
field._address = address
address += field.size
replace.append(field)
# Need padding after?
size = (old_field.address + old_field.size) - address
assert 0 <= size
if 0 < size:
padding = createPaddingField(self, size)
padding._address = address
replace.append(padding)
self.replaceField(old_field.name, replace)
def nextFieldAddress(self):
return self._current_size
def getFieldIndex(self, field):
return self._fields.index(field._name)
| CouchPotato/CouchPotatoV1 | library/hachoir_core/field/generic_field_set.py | Python | gpl-3.0 | 19,175 |
'''
New Integration Test for migrate between clusters
@author: Legion
'''
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.test_lib as test_lib
import apibinding.inventory as inventory
import zstackwoodpecker.operations.resource_operations as res_ops
test_obj_dict = test_state.TestStateDict()
test_stub = test_lib.lib_get_test_stub()
data_migration = test_stub.DataMigration()
def test():
bs_list = res_ops.query_resource(res_ops.BACKUP_STORAGE)
for bs in bs_list:
if bs.type == inventory.CEPH_BACKUP_STORAGE_TYPE:
break
else:
test_util.test_logger('BS is type %s, skip.' % bs.type)
data_migration.migrate_image()
data_migration.check_origin_image_exist()
data_migration.clean_up_single_image_trash()
test_util.test_pass('Cleanup Single Image Trash Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
if data_migration.vm:
try:
data_migration.vm.destroy()
except:
pass
| zstackio/zstack-woodpecker | integrationtest/vm/multiclusters/data_migration/test_cleanup_single_image_trash.py | Python | apache-2.0 | 1,084 |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "homepage.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| Bladowicz/homepage | manage.py | Python | mit | 251 |
# Original code from Dan Bader Real Python Youtube channel
# Modified namedtuple parameters
import collections
import multiprocessing
import os
import time
from pprint import pprint
Scientist = collections.namedtuple('Scientist', [
'name',
'born',
])
scientists = (
Scientist(name='alpha', born=1991),
Scientist(name='beta', born=1978),
Scientist(name='gamma', born=1998),
Scientist(name='delta', born=2013),
Scientist(name='epsilon', born=1987),
)
print(scientists, '\n')
def transform(x):
print(f'Processing {os.getpid()} working record {x.name}')
time.sleep(2)
result = {'name': x.name, 'age': 2019 - x.born}
print(f'Process {os.getpid()} done processing record {x.name}')
return result
start = time.time()
pool = multiprocessing.Pool() # spawns as many processes as machine's cpu cores
# pool = multiprocessing.Pool(processes=2) # specifying set number of processes
# pool = multiprocessing.Pool(processes=len(scientists)) # dynamic
result = pool.map(transform, scientists)
end = time.time()
print(f'\nTime to complete: {end - start:.2f}s\n')
pprint(result)
| leon-lei/learning-materials | advance/multiprocessing_practice.py | Python | mit | 1,127 |
# coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
"""
Utility script for making a phase diagram plot.
Usage: python plot_phase_diagram.py /path/to/run_data/file
"""
from gasp.post_processing.plotter import Plotter
import sys
plotter = Plotter(sys.argv[1])
plotter.plot_phase_diagram()
| henniggroup/GASP-python | gasp/scripts/plot_phase_diagram.py | Python | mit | 402 |
from django.apps import AppConfig
class AccountsConfig(AppConfig):
name = 'accounts'
| LCOGT/valhalla | valhalla/accounts/apps.py | Python | gpl-3.0 | 91 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from typing import Any, List, Optional
from azure.batch import models as batch_models
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.microsoft.azure.hooks.azure_batch import AzureBatchHook
# pylint: disable=too-many-instance-attributes
class AzureBatchOperator(BaseOperator):
"""
Executes a job on Azure Batch Service
:param batch_pool_id: A string that uniquely identifies the Pool within the Account.
:type batch_pool_id: str
:param batch_pool_vm_size: The size of virtual machines in the Pool
:type batch_pool_vm_size: str
:param batch_job_id: A string that uniquely identifies the Job within the Account.
:type batch_job_id: str
:param batch_task_command_line: The command line of the Task
:type batch_task_command_line: str
:param batch_task_id: A string that uniquely identifies the task within the Job.
:type batch_task_id: str
:param batch_pool_display_name: The display name for the Pool.
The display name need not be unique
:type batch_pool_display_name: Optional[str]
:param batch_job_display_name: The display name for the Job.
The display name need not be unique
:type batch_job_display_name: Optional[str]
:param batch_job_manager_task: Details of a Job Manager Task to be launched when the Job is started.
:type batch_job_manager_task: Optional[batch_models.JobManagerTask]
:param batch_job_preparation_task: The Job Preparation Task. If set, the Batch service will
run the Job Preparation Task on a Node before starting any Tasks of that
Job on that Compute Node. Required if batch_job_release_task is set.
:type batch_job_preparation_task: Optional[batch_models.JobPreparationTask]
:param batch_job_release_task: The Job Release Task. Use to undo changes to Compute Nodes
made by the Job Preparation Task
:type batch_job_release_task: Optional[batch_models.JobReleaseTask]
:param batch_task_display_name: The display name for the task.
The display name need not be unique
:type batch_task_display_name: Optional[str]
:param batch_task_container_settings: The settings for the container under which the Task runs
:type batch_task_container_settings: Optional[batch_models.TaskContainerSettings]
:param batch_start_task: A Task specified to run on each Compute Node as it joins the Pool.
The Task runs when the Compute Node is added to the Pool or
when the Compute Node is restarted.
:type batch_start_task: Optional[batch_models.StartTask]
:param batch_max_retries: The number of times to retry this batch operation before it's
considered a failed operation. Default is 3
:type batch_max_retries: int
:param batch_task_resource_files: A list of files that the Batch service will
download to the Compute Node before running the command line.
:type batch_task_resource_files: Optional[List[batch_models.ResourceFile]]
:param batch_task_output_files: A list of files that the Batch service will upload
from the Compute Node after running the command line.
:type batch_task_output_files: Optional[List[batch_models.OutputFile]]
:param batch_task_user_identity: The user identity under which the Task runs.
If omitted, the Task runs as a non-administrative user unique to the Task.
:type batch_task_user_identity: Optional[batch_models.UserIdentity]
:param target_low_priority_nodes: The desired number of low-priority Compute Nodes in the Pool.
This property must not be specified if enable_auto_scale is set to true.
:type target_low_priority_nodes: Optional[int]
:param target_dedicated_nodes: The desired number of dedicated Compute Nodes in the Pool.
This property must not be specified if enable_auto_scale is set to true.
:type target_dedicated_nodes: Optional[int]
:param enable_auto_scale: Whether the Pool size should automatically adjust over time. Default is false
:type enable_auto_scale: bool
:param auto_scale_formula: A formula for the desired number of Compute Nodes in the Pool.
This property must not be specified if enableAutoScale is set to false.
It is required if enableAutoScale is set to true.
:type auto_scale_formula: Optional[str]
:param azure_batch_conn_id: The :ref:`Azure Batch connection id<howto/connection:azure_batch>`
:type azure_batch_conn_id: str
:param use_latest_verified_vm_image_and_sku: Whether to use the latest verified virtual
machine image and sku in the batch account. Default is false.
:type use_latest_verified_vm_image_and_sku: bool
:param vm_publisher: The publisher of the Azure Virtual Machines Marketplace Image.
For example, Canonical or MicrosoftWindowsServer. Required if
use_latest_image_and_sku is set to True
:type vm_publisher: Optional[str]
:param vm_offer: The offer type of the Azure Virtual Machines Marketplace Image.
For example, UbuntuServer or WindowsServer. Required if
use_latest_image_and_sku is set to True
:type vm_offer: Optional[str]
:param sku_starts_with: The starting string of the Virtual Machine SKU. Required if
use_latest_image_and_sku is set to True
:type sku_starts_with: Optional[str]
:param vm_sku: The name of the virtual machine sku to use
:type vm_sku: Optional[str]
:param vm_version: The version of the virtual machine
:param vm_version: Optional[str]
:param vm_node_agent_sku_id: The node agent sku id of the virtual machine
:type vm_node_agent_sku_id: Optional[str]
:param os_family: The Azure Guest OS family to be installed on the virtual machines in the Pool.
:type os_family: Optional[str]
:param os_version: The OS family version
:type os_version: Optional[str]
:param timeout: The amount of time to wait for the job to complete in minutes. Default is 25
:type timeout: int
:param should_delete_job: Whether to delete job after execution. Default is False
:type should_delete_job: bool
:param should_delete_pool: Whether to delete pool after execution of jobs. Default is False
:type should_delete_pool: bool
"""
template_fields = (
'batch_pool_id',
'batch_pool_vm_size',
'batch_job_id',
'batch_task_id',
'batch_task_command_line',
)
ui_color = '#f0f0e4'
def __init__(
self,
*, # pylint: disable=too-many-arguments,too-many-locals
batch_pool_id: str,
batch_pool_vm_size: str,
batch_job_id: str,
batch_task_command_line: str,
batch_task_id: str,
vm_publisher: Optional[str] = None,
vm_offer: Optional[str] = None,
sku_starts_with: Optional[str] = None,
vm_sku: Optional[str] = None,
vm_version: Optional[str] = None,
vm_node_agent_sku_id: Optional[str] = None,
os_family: Optional[str] = None,
os_version: Optional[str] = None,
batch_pool_display_name: Optional[str] = None,
batch_job_display_name: Optional[str] = None,
batch_job_manager_task: Optional[batch_models.JobManagerTask] = None,
batch_job_preparation_task: Optional[batch_models.JobPreparationTask] = None,
batch_job_release_task: Optional[batch_models.JobReleaseTask] = None,
batch_task_display_name: Optional[str] = None,
batch_task_container_settings: Optional[batch_models.TaskContainerSettings] = None,
batch_start_task: Optional[batch_models.StartTask] = None,
batch_max_retries: int = 3,
batch_task_resource_files: Optional[List[batch_models.ResourceFile]] = None,
batch_task_output_files: Optional[List[batch_models.OutputFile]] = None,
batch_task_user_identity: Optional[batch_models.UserIdentity] = None,
target_low_priority_nodes: Optional[int] = None,
target_dedicated_nodes: Optional[int] = None,
enable_auto_scale: bool = False,
auto_scale_formula: Optional[str] = None,
azure_batch_conn_id='azure_batch_default',
use_latest_verified_vm_image_and_sku: bool = False,
timeout: int = 25,
should_delete_job: bool = False,
should_delete_pool: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.batch_pool_id = batch_pool_id
self.batch_pool_vm_size = batch_pool_vm_size
self.batch_job_id = batch_job_id
self.batch_task_id = batch_task_id
self.batch_task_command_line = batch_task_command_line
self.batch_pool_display_name = batch_pool_display_name
self.batch_job_display_name = batch_job_display_name
self.batch_job_manager_task = batch_job_manager_task
self.batch_job_preparation_task = batch_job_preparation_task
self.batch_job_release_task = batch_job_release_task
self.batch_task_display_name = batch_task_display_name
self.batch_task_container_settings = batch_task_container_settings
self.batch_start_task = batch_start_task
self.batch_max_retries = batch_max_retries
self.batch_task_resource_files = batch_task_resource_files
self.batch_task_output_files = batch_task_output_files
self.batch_task_user_identity = batch_task_user_identity
self.target_low_priority_nodes = target_low_priority_nodes
self.target_dedicated_nodes = target_dedicated_nodes
self.enable_auto_scale = enable_auto_scale
self.auto_scale_formula = auto_scale_formula
self.azure_batch_conn_id = azure_batch_conn_id
self.use_latest_image = use_latest_verified_vm_image_and_sku
self.vm_publisher = vm_publisher
self.vm_offer = vm_offer
self.sku_starts_with = sku_starts_with
self.vm_sku = vm_sku
self.vm_version = vm_version
self.vm_node_agent_sku_id = vm_node_agent_sku_id
self.os_family = os_family
self.os_version = os_version
self.timeout = timeout
self.should_delete_job = should_delete_job
self.should_delete_pool = should_delete_pool
self.hook = self.get_hook()
def _check_inputs(self) -> Any:
if not self.os_family and not self.vm_publisher:
raise AirflowException("You must specify either vm_publisher or os_family")
if self.os_family and self.vm_publisher:
raise AirflowException(
"Cloud service configuration and virtual machine configuration "
"are mutually exclusive. You must specify either of os_family and"
" vm_publisher"
)
if self.use_latest_image:
if not all(elem for elem in [self.vm_publisher, self.vm_offer]):
raise AirflowException(
"If use_latest_image_and_sku is"
" set to True then the parameters vm_publisher, vm_offer, "
"must all be set. Found "
"vm_publisher={}, vm_offer={}".format(self.vm_publisher, self.vm_offer)
)
if self.vm_publisher:
if not all([self.vm_sku, self.vm_offer, self.vm_node_agent_sku_id]):
raise AirflowException(
"If vm_publisher is set, then the parameters vm_sku, vm_offer,"
"vm_node_agent_sku_id must be set. Found "
f"vm_publisher={self.vm_publisher}, vm_offer={self.vm_offer} "
f"vm_node_agent_sku_id={self.vm_node_agent_sku_id}, "
f"vm_version={self.vm_version}"
)
if not self.target_dedicated_nodes and not self.enable_auto_scale:
raise AirflowException(
"Either target_dedicated_nodes or enable_auto_scale must be set. None was set"
)
if self.enable_auto_scale:
if self.target_dedicated_nodes or self.target_low_priority_nodes:
raise AirflowException(
"If enable_auto_scale is set, then the parameters "
"target_dedicated_nodes and target_low_priority_nodes must not "
"be set. Found target_dedicated_nodes={},"
" target_low_priority_nodes={}".format(
self.target_dedicated_nodes, self.target_low_priority_nodes
)
)
if not self.auto_scale_formula:
raise AirflowException("The auto_scale_formula is required when enable_auto_scale is set")
if self.batch_job_release_task and not self.batch_job_preparation_task:
raise AirflowException(
"A batch_job_release_task cannot be specified without also "
" specifying a batch_job_preparation_task for the Job."
)
if not all(
[
self.batch_pool_id,
self.batch_job_id,
self.batch_pool_vm_size,
self.batch_task_id,
self.batch_task_command_line,
]
):
raise AirflowException(
"Some required parameters are missing.Please you must set all the required parameters. "
)
def execute(self, context: dict) -> None:
self._check_inputs()
self.hook.connection.config.retry_policy = self.batch_max_retries
pool = self.hook.configure_pool(
pool_id=self.batch_pool_id,
vm_size=self.batch_pool_vm_size,
display_name=self.batch_pool_display_name,
target_dedicated_nodes=self.target_dedicated_nodes,
use_latest_image_and_sku=self.use_latest_image,
vm_publisher=self.vm_publisher,
vm_offer=self.vm_offer,
sku_starts_with=self.sku_starts_with,
vm_sku=self.vm_sku,
vm_version=self.vm_version,
vm_node_agent_sku_id=self.vm_node_agent_sku_id,
os_family=self.os_family,
os_version=self.os_version,
target_low_priority_nodes=self.target_low_priority_nodes,
enable_auto_scale=self.enable_auto_scale,
auto_scale_formula=self.auto_scale_formula,
start_task=self.batch_start_task,
)
self.hook.create_pool(pool)
# Wait for nodes to reach complete state
self.hook.wait_for_all_node_state(
self.batch_pool_id,
{
batch_models.ComputeNodeState.start_task_failed,
batch_models.ComputeNodeState.unusable,
batch_models.ComputeNodeState.idle,
},
)
# Create job if not already exist
job = self.hook.configure_job(
job_id=self.batch_job_id,
pool_id=self.batch_pool_id,
display_name=self.batch_job_display_name,
job_manager_task=self.batch_job_manager_task,
job_preparation_task=self.batch_job_preparation_task,
job_release_task=self.batch_job_release_task,
)
self.hook.create_job(job)
# Create task
task = self.hook.configure_task(
task_id=self.batch_task_id,
command_line=self.batch_task_command_line,
display_name=self.batch_task_display_name,
container_settings=self.batch_task_container_settings,
resource_files=self.batch_task_resource_files,
output_files=self.batch_task_output_files,
user_identity=self.batch_task_user_identity,
)
# Add task to job
self.hook.add_single_task_to_job(job_id=self.batch_job_id, task=task)
# Wait for tasks to complete
self.hook.wait_for_job_tasks_to_complete(job_id=self.batch_job_id, timeout=self.timeout)
# Clean up
if self.should_delete_job:
# delete job first
self.clean_up(job_id=self.batch_job_id)
if self.should_delete_pool:
self.clean_up(self.batch_pool_id)
def on_kill(self) -> None:
response = self.hook.connection.job.terminate(
job_id=self.batch_job_id, terminate_reason='Job killed by user'
)
self.log.info("Azure Batch job (%s) terminated: %s", self.batch_job_id, response)
def get_hook(self) -> AzureBatchHook:
"""Create and return an AzureBatchHook."""
return AzureBatchHook(azure_batch_conn_id=self.azure_batch_conn_id)
def clean_up(self, pool_id: Optional[str] = None, job_id: Optional[str] = None) -> None:
"""
Delete the given pool and job in the batch account
:param pool_id: The id of the pool to delete
:type pool_id: str
:param job_id: The id of the job to delete
:type job_id: str
"""
if job_id:
self.log.info("Deleting job: %s", job_id)
self.hook.connection.job.delete(job_id)
if pool_id:
self.log.info("Deleting pool: %s", pool_id)
self.hook.connection.pool.delete(pool_id)
| nathanielvarona/airflow | airflow/providers/microsoft/azure/operators/azure_batch.py | Python | apache-2.0 | 17,851 |
#!/usr/bin/env python
import Tkinter as tk
import Tkconstants as Tkc
import logging
import ttk
logging.getLogger().setLevel(logging.DEBUG)
class GuiMaxAge(object):
"""
GUI Tkinter spinbox to configure a maximum age in days.
"""
def __init__(self, parent=None):
parent = parent or tk.Tk()
self.cache_days_var = tk.StringVar(parent)
self.cache_days_var.set("0")
self.label = tk.Label(parent, text="Days to cache package information:")
self.label.pack(side=Tkc.LEFT)
self.cache_days = tk.Spinbox(parent, from_=0.0, to=30.0, increment=1.0, textvariable=self.cache_days_var,
validate=Tkc.ALL, format="%0.1f", validatecommand=self.validate, value=0.5)
self.cache_days.pack(side=Tkc.LEFT)
@property
def days(self):
try:
return float(self.cache_days_var.get())
except Exception as e:
return -1.0
def validate(self, *args):
return 0.0 <= self.days <= 30.0
class GuiQuery(ttk.Frame):
def __init__(self, parent=None):
ttk.Frame.__init__(self, parent, padding=2)
self.query_frame = ttk.Frame(parent)
self.query_frame.pack(side=Tkc.TOP, fill=Tkc.X)
self.options_frame = ttk.Frame(parent)
self.options_frame.pack(side=Tkc.TOP, fill=Tkc.X)
self.days_frame = ttk.Frame(parent)
self.days_frame.pack(side=Tkc.TOP, fill=Tkc.X)
self.query_var = tk.StringVar(self.query_frame)
self.query_var.trace("w", self.on_write)
self.stats_var = tk.IntVar(self.options_frame)
self.stats_var.trace("w", self.on_stats_checked)
self.backup_var = tk.IntVar(self.options_frame)
self.backup_var.trace("w", self.on_backup_checked)
self.days_var = tk.StringVar(self.days_frame)
self.days_var.trace("w", self.on_days_changed)
self.search_label = tk.Label(self.query_frame, text="Search Query:")
self.search_label.pack(side=Tkc.LEFT)
self.entry = tk.Entry(self.query_frame, textvariable=self.query_var, bg="white")
self.entry.bind("<Return>", self.run_query)
self.entry.pack(side=Tkc.LEFT, expand=True, fill=Tkc.X)
self.button = tk.Button(self.query_frame, text="Search", command=self.run_query)
self.button.pack(side=Tkc.LEFT)
self.button.config(state=Tkc.DISABLED)
self.stats = tk.Checkbutton(self.options_frame, text="Collect statistics?", variable=self.stats_var)
self.stats.pack(side=Tkc.LEFT, expand=True, fill=Tkc.X)
self.backup = tk.Checkbutton(self.options_frame, text="Enable backup search?", variable=self.backup_var)
self.backup.pack(side=Tkc.LEFT, expand=True, fill=Tkc.X)
self.stats.select()
self.age_label = tk.Label(self.days_frame, text="Days to cache package information:")
self.age_label.pack(side=Tkc.LEFT)
self.max_age = tk.Spinbox(self.days_frame, from_=0.0, to=30.0, increment=0.5, format="%0.3f",
textvariable=self.days_var)
self.max_age.pack(side=Tkc.LEFT)
self.max_age.config(state="readonly")
@property
def query(self):
return self.query_var.get()
@property
def should_do_stats(self):
return bool(self.stats_var.get())
@property
def should_do_backup(self):
return bool(self.backup_var.get())
@property
def max_cache_age(self):
return float(self.days_var.get())
def run_query(self, event=None):
query_config = {"query": self.query,
"stats": self.should_do_stats,
"backup": self.should_do_backup,
"max_cache_age": self.max_cache_age}
logging.info("Query will run with %s", query_config)
def on_write(self, name, index, mode, var=None):
var = var or self.query_var
query_text = self.query
search_state = Tkc.DISABLED if not query_text.strip() else Tkc.NORMAL
self.button.config(state=search_state)
def on_stats_checked(self, name, index, mode, var=None):
var = var or self.stats_var
do_stats = self.should_do_stats
if not do_stats:
self.backup.config(state=Tkc.DISABLED)
self.backup_var.set(False)
else:
self.backup.config(state=Tkc.NORMAL)
def on_backup_checked(self, name, index, mode, var=None):
var = var or self.backup_var
do_backup = self.should_do_backup
logging.info("Backup search: %s", self.backup_var.get())
def on_days_changed(self, name, index, mode, var=None):
var = var or self.days_var
def on_max_age_change(self, old_value, new_value):
logging.info("Changing max age from %0.3f to %0.3f", old_value, new_value)
def on_max_age_validate(self, old_value, new_value):
logging.info("Validating max age (%0.3f -> %0.3f)", old_value, new_value)
def quit(self, event=None):
print "Quitting... (event: {0})".format(event)
self.master.destroy()
class GuiLogger(logging.Handler):
def __init__(self, parent=None):
logging.Handler.__init__(self)
self.setLevel(logging.DEBUG)
parent = parent or tk.Tk()
self.rsb = tk.Scrollbar(parent)
self.rsb.pack(side=Tkc.RIGHT, fill=Tkc.Y)
self.widget = tk.Listbox(parent, yscrollcommand=self.rsb.set, bg="white")
self.widget.pack(fill=Tkc.BOTH, expand=True)
self.rsb.config(command=self.widget.yview)
self.widget.configure(state=Tkc.DISABLED)
def emit(self, record):
self.widget.configure(state=Tkc.NORMAL)
self.widget.insert(Tkc.END, self.format(record) + "\n")
self.widget.see(Tkc.END)
# self.widget.configure(state=Tkc.DISABLED)
class MyGui(object):
TITLE = "Logging GUI"
def __init__(self, root=None):
self.root = root or tk.Tk()
self.root.title(self.TITLE)
self.options_panel = ttk.Frame(self.root)
self.options_panel.pack(side=Tkc.TOP, fill=Tkc.BOTH, expand=True)
self.log_panel = ttk.Frame(self.root)
self.log_panel.pack(side=Tkc.TOP, fill=Tkc.BOTH, expand=True)
self.results_panel = ttk.Frame(self.root)
self.results_panel.pack(side=Tkc.TOP, fill=Tkc.BOTH, expand=True)
self.log_box = GuiLogger(parent=self.log_panel)
self.log_box.pack(side=Tkc.BOTTOM, fill=Tkc.BOTH, expand=True)
def main():
root = tk.Tk()
root.withdraw()
root.title("PyPI Pip Search with Statistics (v.0.2.0)")
root.option_add("*tearOff", False)
gui_query = GuiQuery(root)
root.protocol("WM_DELETE_WINDOW", gui_query.quit)
root.deiconify()
root.mainloop()
if __name__ == "__main__": # pragma: no cover
main()
| achernet/pyscripts | mygui.py | Python | lgpl-2.1 | 6,809 |
import tensorflow as tf
import numpy as np
import pandas as pd
import time
def get_step_indexes(step, batch_size, n):
start = (step * batch_size) % n
stop = start + batch_size
if stop > n:
stop = n
return (start, stop)
ratio = 0.7
batch_size = 1000
n_features = 784
label_index = 0
feature_index = range(0, n_features + 1)
feature_index.remove(label_index)
n_classes = 10
learning_rate = 0.001
keep_prob = 0.98
training_epochs = 2000
display_step = 100
x_all = np.loadtxt('../data/train.csv',
delimiter = ',',
skiprows = 1,
dtype = np.float32,
usecols = range(0, n_features + 1))
np.random.shuffle(x_all)
n_total = x_all.shape[0]
split = int(ratio * n_total)
n_hidden_1 = np.int32(np.floor(n_features / 2))
n_hidden_2 = np.int32(np.floor(n_features / 2))
n_hidden_3 = np.int32(np.floor(n_features / 2))
y_all = np.zeros((n_total, n_classes), np.float32)
digits = x_all[:, label_index]
for i, b in enumerate(digits):
y_all[i, int(b)] = 1.0
x_train = x_all[:split, feature_index]
x_test = x_all[split:, feature_index]
y_train = y_all[:split, :]
y_test = y_all[split:, :]
n = x_train.shape[0]
x = tf.placeholder("float", [None, n_features])
y = tf.placeholder("float", [None, n_classes])
dropout = tf.placeholder("float")
def nn(_X, _weights, _biases, _dropout):
layer_1 = tf.nn.relu(tf.add(tf.matmul(_X, _weights['h1']), _biases['b1']))
layer_1 = tf.nn.dropout(layer_1, _dropout)
layer_2 = tf.nn.relu(tf.add(tf.matmul(layer_1, _weights['h2']), _biases['b2']))
layer_2 = tf.nn.dropout(layer_2, _dropout)
layer_3 = tf.nn.relu(tf.add(tf.matmul(layer_2, _weights['h3']), _biases['b3']))
layer_3 = tf.nn.dropout(layer_3, _dropout)
return tf.matmul(layer_3, _weights['out']) + _biases['out']
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_uniform([n_features, n_hidden_1], -1.0, 1.0)),
'h2': tf.Variable(tf.random_uniform([n_hidden_1, n_hidden_2], -1.0, 1.0)),
'h3': tf.Variable(tf.random_uniform([n_hidden_2, n_hidden_3], -1.0, 1.0)),
'out': tf.Variable(tf.random_uniform([n_hidden_2, n_classes], -1.0, 1.0))
}
biases = {
'b1': tf.Variable(tf.random_uniform([n_hidden_1], -1.0, 1.0)),
'b2': tf.Variable(tf.random_uniform([n_hidden_2], -1.0, 1.0)),
'b3': tf.Variable(tf.random_uniform([n_hidden_3], -1.0, 1.0)),
'out': tf.Variable(tf.random_uniform([n_classes], -1.0, 1.0))
}
# Construct model
pred = nn(x, weights, biases, dropout)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, y)) # Softmax loss
tf.scalar_summary('cost', cost)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer
init = tf.initialize_all_variables()
with tf.Session() as sess:
train_writer = tf.train.SummaryWriter('logs/', sess.graph)
sess.run(init)
batches = int(np.ceil(n / batch_size))
merged = tf.merge_all_summaries()
for epoch in xrange(training_epochs + 1):
avg_cost = 0.0
for step in range(batches):
start, stop = get_step_indexes(step, batch_size, n)
feed = {x: x_train[start:stop, :],
y: y_train[start:stop, :],
dropout: keep_prob }
batch_cost, _ = sess.run([cost, optimizer], feed_dict = feed)
avg_cost += batch_cost
if epoch % display_step == 0 or epoch == training_epochs:
summary = sess.run(merged, feed_dict = feed)
train_writer.add_summary(summary, epoch)
print 'epoch: %05d' % epoch, 'cost:', '{:.9f}'.format(avg_cost / batches)
print("Optimization Complete")
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print "Accuracy:", accuracy.eval({x: x_test, y: y_test, dropout: 1.0})
print("loading set for submission")
x_test = np.loadtxt('../data/test.csv',
delimiter = ',',
skiprows = 1,
dtype = np.float32,
usecols = range(0, n_features))
print("predicting against submission set")
test_output = nn(x_test, weights, biases, dropout)
output = sess.run(test_output, feed_dict = {x: x_test, dropout: keep_prob})
labels = np.argmax(output, 1)
image_ids = np.array(range(1, labels.shape[0] + 1))
results = np.transpose(np.matrix([image_ids, labels]))
print("writing submission file")
df = pd.DataFrame(results, columns = ('ImageId', 'Label'))
dstring = time.strftime('%Y.%m.%d.%H.%M.%S', time.gmtime())
filename = 'output/nn.' + dstring + '.csv'
df.to_csv(filename, index = False)
print("wrote " + filename)
print("done")
| thoolihan/KaggleMNIST | Python/nn.py | Python | apache-2.0 | 4,805 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
import logging.config
import raven
from dotenv import load_dotenv
load_dotenv('.env')
# Environment
ENV = os.environ.get('PYTHON_ENV', 'development')
if os.environ.get('CI'):
ENV = 'testing'
SENTRY = raven.Client(os.environ.get('SENTRY_DSN'))
# Storage
if ENV == 'testing':
WAREHOUSE_URL = os.environ['TEST_WAREHOUSE_URL']
DATABASE_URL = os.environ['TEST_DATABASE_URL']
else:
WAREHOUSE_URL = os.environ['WAREHOUSE_URL']
DATABASE_URL = os.environ['DATABASE_URL']
EXPLORERDB_URL = os.environ['EXPLORERDB_URL']
# Logging
def setup_syslog_handler():
if os.environ.get('LOGGING_URL', None):
host, port = os.environ['LOGGING_URL'].split(':')
handler = logging.handlers.SysLogHandler(address=(host, int(port)))
else:
handler = logging.handlers.SysLogHandler()
return handler
LOGGING_CONFIG = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '%(levelname)s %(name)s: %(message)s',
},
},
'handlers': {
'default_handler': {
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
'level': 'DEBUG',
'formatter': 'default',
},
'syslog_handler': {
'()': setup_syslog_handler,
'level': 'INFO',
'formatter': 'default',
},
},
'root': {
'handlers': ['default_handler', 'syslog_handler'],
'level': os.environ.get('LOGGING_LEVEL', 'DEBUG').upper(),
},
}
logging.config.dictConfig(LOGGING_CONFIG)
# OSF
OSF_URL = os.environ.get('OSF_URL', None)
OSF_KEY = os.environ.get('OSF_KEY', None)
OSF_NAMESPACE = os.environ.get('OSF_NAMESPACE', None)
# Amazon
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID', None)
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY', None)
AWS_S3_BUCKET = os.environ.get('AWS_S3_BUCKET', None)
AWS_S3_REGION = os.environ.get('AWS_S3_REGION', None)
AWS_S3_CUSTOM_DOMAIN = os.environ.get('AWS_S3_CUSTOM_DOMAIN')
# DocumentCloud
DOCUMENTCLOUD_USERNAME = os.environ.get('DOCUMENTCLOUD_USERNAME')
DOCUMENTCLOUD_PASSWORD = os.environ.get('DOCUMENTCLOUD_PASSWORD')
DOCUMENTCLOUD_PROJECT = os.environ.get('DOCUMENTCLOUD_PROJECT')
# PyBossa
PYBOSSA_URL = os.environ.get('PYBOSSA_URL')
PYBOSSA_API_KEY = os.environ.get('PYBOSSA_API_KEY')
PYBOSSA_PROJECT_INDICATIONS = os.environ.get('PYBOSSA_PROJECT_INDICATIONS')
# Remove sources
REMOVE_SOURCE_IDS = os.environ.get('REMOVE_SOURCE_IDS')
# Contrib
# Contributions mapping to upload
CONTRIB = {
# Contribution ID
'9e4f1280-41bf-11e6-8971-f99af8d5a820': {
# Contribution type
'csr_synopsis': [
# Regex to extract primary_id from filename
r'(?P<primary_id>nct\d{3,})\.pdf',
# Hard-coded mapping for primary_id
('some_document.pdf', 'ISRCT12345678'),
],
},
}
# This provides aliases for document_categories in order to reduce the points of
# change. Please use these aliases throughout the code.
DOCUMENT_CATEGORIES = {
'registry_entry': 19,
'other': 20,
'journal_article': 21,
'clinical_study_report': 22,
'clinical_study_report_synopsis': 23,
'epar_document_section': 24,
'fda_document_segment': 25,
'press_release_results': 26,
'conference_abstract_results': 27,
'report_funder': 28,
'case_report_form': 29,
'grant_application': 30,
'irb_hrec_approval': 31,
'investigator_brochure': 32,
'consent_form': 33,
'statistical_analysis_plan': 34,
'trial_protocol': 35,
'analytics_code': 36,
'trialist_webpage': 37,
'lay_summary_design': 38,
'lay_summary_results': 39,
'individual_patient_data': 40,
'systematic_review_data': 41,
'blog_post': 42,
'journal_article_critique': 43,
'systematic_review': 44,
'review_article': 45,
'news_article': 46,
'press_release_trial': 47,
'report_from_sponsor': 48,
'journal_article_reanalysis': 49,
}
| arthurSena/processors | processors/base/config.py | Python | mit | 4,282 |
import os
import sys
from owanimo.app.error import ERROR as e
from owanimo.util import define
from owanimo.util.log import LOG as L
class AllegoryBase(object):
def __init__(self, runner):
self.runner = runner
self.checklist = []
self.resultmap = {}
self._step = 0
def step(self):
self._step += 1
return self._step
def check(self, expect, execute, error=e.UNKNOWN):
result = e.UNKNOWN
if expect == execute:
result = e.TRUE
L.info("Result : %s" % result)
else:
result = error
L.warning("Result : %s" % result)
self.checklist.append(result)
return execute
def flush(self, step):
flush = filter((lambda x: x != e.TRUE), self.checklist)
if len(flush) == 0:
flush.append(e.TRUE)
self.resultmap[step] = flush
self.checklist = []
def end(self):
return self.resultmap
| setsulla/owanimo | app/allegory.py | Python | mit | 976 |
import re
SMART_CTL = {
'6.2':
{
'INFO':
{
'START':r'.*INFORMATION SECTION.*',
'END':'',
'REGEX':
[
r'Serial Number:\s+(?P<serial>.+)',
r'Device Model:\s+(?P<model>.+)',
r'Firmware Version:\s+(?P<firmware>.+)',
r'Model Family:\s+(?P<family>.+)',
r'User Capacity:\s+(?P<gigabytes>[,\d]+)',
r'Rotation Rate:\s+(?P<rpm>\d+).*',
]
},
'ATTRS':
{
'START':r'ID#\s+ATTRIBUTE_NAME\s+FLAG\s+VALUE\s+WORST\s+THRESH\s+TYPE\s+UPDATED\s+WHEN_FAILED\s+RAW_VALUE',
'END':r'',
'REGEX': #Last regex has priority, and will overwrite attr data!!
[
r'\s*(?P<id>\d+)\s+(?P<name>\w+)\s+[0-9xa-f]{6}\s+(?P<value>\d+)\s+(?P<worst>\d+)\s+(?P<thresh>\d+)\s+(?P<type>[-\w]+)\s+(?P<updated>\w+)\s+(?P<failed>[-\w]+)\s+(?P<raw_value>\d+)\s*.*'
]
}
}
}
class SmartParse(object):
def __init__(self,text,version):
self.text = text
self.version = version
self.info = {}
self.attrs = []
self.parse()
def parse(self):
lines = self.text.splitlines()
for index, line in enumerate(lines):
if re.match(SMART_CTL[self.version]['INFO']['START'],line) is not None:
self.parse_info(lines[index+1:])
if self.info.has_key('gigabytes'):
self.info['gigabytes'] = int(self.info['gigabytes'].replace(',',''))/(1000000000)
if re.match(SMART_CTL[self.version]['ATTRS']['START'],line) is not None:
self.parse_attrs(lines[index+1:])
def parse_info(self,lines):
for index,line in enumerate(lines):
if line == '':
return index
for regex in SMART_CTL[self.version]['INFO']['REGEX']:
matches = re.match(regex,line)
if matches is not None:
try:
self.info.update(matches.groupdict())
except:
print 'No update'
def parse_attrs(self,lines):
for index,line in enumerate(lines):
if line == '':
return
attr = None
#Last regex has priority, and will overwrite attr data!!
for regex in SMART_CTL[self.version]['ATTRS']['REGEX']:
matches = re.match(regex,line)
if matches is not None:
attr = matches.groupdict()
if attr['failed'] == '-':
attr['failed']=None
if attr is not None:
self.attrs.append(attr)
def get_pk(self):
return '%s:%s'%(self.info['model'].replace(' ','_'),self.info['serial'].replace(' ','_'))
| amschaal/maestor | maestor/parsers.py | Python | mit | 2,897 |
from SCTerminal import SCTerminal
from otp.otpbase.OTPLocalizer import CustomSCStrings
SCCustomMsgEvent = 'SCCustomMsg'
def decodeSCCustomMsg(textId):
return CustomSCStrings.get(textId, None)
class SCCustomTerminal(SCTerminal):
def __init__(self, textId):
SCTerminal.__init__(self)
self.textId = textId
self.text = CustomSCStrings[self.textId]
def handleSelect(self):
SCTerminal.handleSelect(self)
messenger.send(self.getEventName(SCCustomMsgEvent), [self.textId])
| ksmit799/Toontown-Source | otp/speedchat/SCCustomTerminal.py | Python | mit | 522 |
from io import BytesIO
from django.core.servers.basehttp import ServerHandler
from django.utils.unittest import TestCase
from django.utils.six.moves import StringIO
#
# Tests for #9659: wsgi.file_wrapper in the builtin server.
# We need to mock a couple of of handlers and keep track of what
# gets called when using a couple kinds of WSGI apps.
#
class DummyHandler(object):
def log_request(*args, **kwargs):
pass
class FileWrapperHandler(ServerHandler):
def __init__(self, *args, **kwargs):
ServerHandler.__init__(self, *args, **kwargs)
self.request_handler = DummyHandler()
self._used_sendfile = False
def sendfile(self):
self._used_sendfile = True
return True
def wsgi_app(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return [b'Hello World!']
def wsgi_app_file_wrapper(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
return environ['wsgi.file_wrapper'](StringIO('foo'))
class WSGIFileWrapperTests(TestCase):
"""
Test that the wsgi.file_wrapper works for the builting server.
"""
def test_file_wrapper_uses_sendfile(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
err = StringIO()
handler = FileWrapperHandler(None, StringIO(), err, env)
handler.run(wsgi_app_file_wrapper)
self.assertTrue(handler._used_sendfile)
def test_file_wrapper_no_sendfile(self):
env = {'SERVER_PROTOCOL': 'HTTP/1.0'}
err = BytesIO()
handler = FileWrapperHandler(None, BytesIO(), err, env)
handler.run(wsgi_app)
self.assertFalse(handler._used_sendfile)
self.assertEqual(handler.stdout.getvalue().splitlines()[-1], b'Hello World!')
| vsajip/django | tests/regressiontests/builtin_server/tests.py | Python | bsd-3-clause | 1,777 |
# -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import unittest
from datetime import date
import holidays
date(1950, 6, 19)
class TestChina(unittest.TestCase):
# https: // en.wikipedia.org / wiki / Public_holidays_in_China
def setUp(self):
self.holidays = holidays.CN()
def test_1950(self):
self.assertIn(date(1950, 1, 1), self.holidays) # New Year's Day
# Chinese New Year day 1
self.assertIn(date(1950, 2, 17), self.holidays)
# Chinese New Year day 2
self.assertIn(date(1950, 2, 18), self.holidays)
# Chinese New Year day 3
self.assertIn(date(1950, 2, 19), self.holidays)
# Labour Day day 1
self.assertIn(date(1950, 5, 1), self.holidays)
# NO Labour Day day 2
self.assertNotIn(date(1950, 5, 2), self.holidays)
# NO Labour Day day 3
self.assertNotIn(date(1950, 5, 3), self.holidays)
# NO Dragon Boat Festival
self.assertNotIn(date(1950, 6, 19), self.holidays)
# NO Mid-Autumn Festival
self.assertNotIn(date(1950, 9, 26), self.holidays)
self.assertIn(date(1950, 10, 1), self.holidays) # National Day day 1
self.assertIn(date(1950, 10, 2), self.holidays) # National Day day 2
# NO National Day day 3
self.assertNotIn(date(1950, 10, 3), self.holidays)
def test_2005(self):
# New Year's Day
self.assertIn(date(2005, 1, 1), self.holidays)
# Chinese New Year day 1
self.assertIn(date(2005, 2, 9), self.holidays)
# Chinese New Year day 2
self.assertIn(date(2005, 2, 10), self.holidays)
# Chinese New Year day 3
self.assertIn(date(2005, 2, 11), self.holidays)
# NO Tomb-Sweeping Day
self.assertNotIn(date(2005, 4, 5), self.holidays)
# Labour Day day 1
self.assertIn(date(2005, 5, 1), self.holidays)
# Labour Day day 2
self.assertIn(date(2005, 5, 2), self.holidays)
# Labour Day day 3
self.assertIn(date(2005, 5, 3), self.holidays)
self.assertNotIn(
date(2005, 6, 11), self.holidays
) # NO Dragon Boat Festival
# NO Mid-Autumn Festival
self.assertNotIn(date(2005, 9, 18), self.holidays)
# National Day day 1
self.assertIn(date(2005, 10, 1), self.holidays)
# National Day day 2
self.assertIn(date(2005, 10, 2), self.holidays)
# National Day day 3
self.assertIn(date(2005, 10, 3), self.holidays)
def test_chinese_new_year_2010(self):
self.assertIn(date(2010, 2, 13), self.holidays)
self.assertIn(date(2010, 2, 14), self.holidays)
self.assertIn(date(2010, 2, 15), self.holidays)
def test_2015(self):
# New Year's Day
self.assertIn(date(2015, 1, 1), self.holidays)
# Chinese New Year day 1
self.assertIn(
date(2015, 2, 19), self.holidays
) # Chinese New Year day 2
self.assertIn(date(2015, 2, 20), self.holidays)
# Chinese New Year day 3
self.assertIn(date(2015, 2, 21), self.holidays)
# Tomb-Sweeping Day
self.assertIn(date(2015, 4, 5), self.holidays)
# Labour Day
self.assertIn(date(2015, 5, 1), self.holidays)
# NO Labour Day day 2
self.assertNotIn(date(2015, 5, 2), self.holidays)
# NO Labour Day day 3
self.assertNotIn(date(2015, 5, 3), self.holidays)
self.assertIn(date(2015, 6, 20), self.holidays) # Dragon Boat Festival
self.assertIn(date(2015, 9, 27), self.holidays) # Mid-Autumn Festival
self.assertIn(date(2015, 10, 1), self.holidays) # National Day day 1
self.assertIn(date(2015, 10, 2), self.holidays) # National Day day 2
self.assertIn(date(2015, 10, 3), self.holidays) # National Day day 3
| dr-prodigy/python-holidays | test/countries/test_china.py | Python | mit | 4,290 |
from django.conf.urls import url
from . import views
app_name = 'revisions'
urlpatterns = [
url(r'^revision/$', views.RevisionView.as_view(), name = 'revision'),
url(r'^mail/$', views.MailView.as_view(), name = 'mail'),
]
| pelgoros/kwyjibo | revisions/urls.py | Python | gpl-3.0 | 232 |
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
# Import stuff for working with dates
from datetime import datetime
from matplotlib.dates import date2num
# Hits/month, pages, and gigabytes served.
# To get the Google analytics data:
# .) Go to analytics.google.com.
# .) There should be (as of July 2017) a "Google Analytics Home" box at the top left of the dashboard.
# .) Click the "Audience Overview" link at the bottom right corner of this box.
# .) Adjust date range to previous month.
# .) Record the number of "Pageviews" in the "Hits" column below.
# The data below are from the libmesh.github.io site, which uses the
# number UA-24978333-1.
#
# Note: we do not have control over the analytics for the
# https://www.github.com/libMesh/libmesh page. If you look at the page
# source, analytics code UA-3769691-2 appears, but if I try to add
# this property in my analytics account, Google assigns me the number
# UA-24978333-{2,3,...} (where the last digit may change depending on
# how many times you tried to add/remove this property in the
# Analytics Dashboard) and there does not seem to be a straightforward
# way of inserting this code into the source. There have been some
# README.md based hacks for doing this in the past, but I don't think
# they are particularly reliable...
# Hits, pages, GB served
data = [
# 'Jan 2003', 616, 616, 0
# 'Feb 2003', 2078, 2078, 0,
# 'Mar 2003', 3157, 3157, 0,
# 'Apr 2003', 7800, 7800, 0,
# 'May 2003', 4627, 4627, 0,
# 'Jun 2003', 6156, 6156, 0,
# 'Jul 2003', 6389, 6389, 0,
# 'Aug 2003', 10136, 10136, 0,
# 'Sep 2003', 8871, 8871, 0,
# 'Oct 2003', 9703, 9703, 0,
# 'Nov 2003', 9802, 9802, 0,
# 'Dec 2003', 9123, 9123, 0,
# 'Jan 2004', 13599, 13599, 0,
# 'Feb 2004', 11018, 11018, 0,
# 'Mar 2004', 11713, 11713, 0,
# 'Apr 2004', 14995, 14995, 0,
# 'May 2004', 11285, 11285, 0,
# 'Jun 2004', 12974, 12974, 0,
# 'Jul 2004', 12939, 12939, 0,
# 'Aug 2004', 9708, 9708, 0,
# 'Sep 2004', 7994, 7994, 0,
# 'Oct 2004', 6920, 6920, 0,
# 'Nov 2004', 10261, 10261, 0,
# 'Dec 2004', 7483, 7483, 0,
# 'Jan 2005', 3184, 3184, 0,
# 'Feb 2005', 37733, 14077, .4373,
# 'Mar 2005', 43927, 16408, .5637,
# 'Apr 2005', 29792, 8518, .2890,
# 'May 2005', 51288, 17629, .5689,
# 'Jun 2005', 40617, 16599, .5379,
# 'Jul 2005', 29944, 10006, .3363,
# 'Aug 2005', 39592, 14556, .4577,
# 'Sep 2005', 57638, 14666, .4881,
# 'Oct 2005', 48336, 17976, .5749,
# 'Nov 2005', 49563, 15308, .5810,
# 'Dec 2005', 90863, 40736, .9415,
# 'Jan 2006', 46723, 13487, .5662,
# 'Feb 2006', 62285, 26567, .8229,
# 'Mar 2006', 47446, 14711, .6534,
# 'Apr 2006', 90314, 29635, .9762,
# 'May 2006', 68209, 20998, .7949,
# 'Jun 2006', 50495, 17128, .6881,
# 'Jul 2006', 42387, 10958, .6016,
# 'Aug 2006', 55658, 11793, .6174,
# 'Sep 2006', 54919, 20591, .9056,
# 'Oct 2006', 52916, 17944, .9015,
# 'Nov 2006', 55382, 19833, .9439,
# 'Dec 2006', 54265, 22688, .9162,
# 'Jan 2007', 53813, 19881, 1.0 ,
# 'Feb 2007', 52434, 17920, .9472,
# 'Mar 2007', 61530, 21172, 1.2,
# 'Apr 2007', 125578, 77539, 1.3,
# 'May 2007', 182764, 129596, 1.6,
# 'Jun 2007', 115730, 38571, 1.7,
# 'Jul 2007', 121054, 42757, 1.8,
# 'Aug 2007', 81192, 28187, 1.3,
# 'Sep 2007', 143553, 39734, 2.3,
# 'Oct 2007', 110449, 42111, 2.4,
# 'Nov 2007', 128307, 57851, 2.3,
# 'Dec 2007', 80584, 42631, 2.0,
# 'Jan 2008', 69623, 34155, 2.0,
# 'Feb 2008', 144881, 111751, 2.5,
# 'Mar 2008', 69801, 29211, 1.9,
# 'Apr 2008', 74023, 31149, 2.0,
# 'May 2008', 63123, 23277, 1.8,
# 'Jun 2008', 66055, 25418, 2.1,
# 'Jul 2008', 60046, 22082, 2.0,
# 'Aug 2008', 60206, 24543, 2.0,
# 'Sep 2008', 53057, 18635, 1.6,
# 'Oct 2008', 64828, 27042, 2.1,
# 'Nov 2008', 72406, 29767, 2.3,
# 'Dec 2008', 76248, 31690, 2.3,
# 'Jan 2009', 73002, 29744, 2.0,
# 'Feb 2009', 70801, 29156, 2.1,
# 'Mar 2009', 78200, 31139, 2.1,
# 'Apr 2009', 70888, 26182, 1.7,
# 'May 2009', 67263, 26210, 1.8,
# 'Jun 2009', 73146, 31328, 2.6,
# 'Jul 2009', 77828, 33711, 2.4,
# 'Aug 2009', 64378, 28542, 1.9,
# 'Sep 2009', 76167, 33484, 2.2,
# 'Oct 2009', 95727, 41062, 2.8,
# 'Nov 2009', 88042, 38869, 2.5,
# 'Dec 2009', 76148, 37609, 2.3,
# 'Jan 2010', 268856, 45983, 3.2,
# 'Feb 2010', 208210, 42680, 3.0,
# 'Mar 2010', 116263, 42660, 2.6,
# 'Apr 2010', 102493, 32942, 2.4,
# 'May 2010', 117023, 37107, 2.5,
# 'Jun 2010', 128589, 38019, 2.5,
# 'Jul 2010', 87183, 34026, 2.2,
# 'Aug 2010', 99161, 33199, 2.5,
# 'Sep 2010', 81657, 32305, 2.5,
# 'Oct 2010', 98236, 42091, 3.4,
# 'Nov 2010', 115603, 48695, 3.4,
# 'Dec 2010', 105030, 45570, 3.4,
# 'Jan 2011', 133476, 43549, 3.1,
# 'Feb 2011', 34483, 15002, 1.1,
# 'Mar 2011', 0, 0, 0.0,
# 'Apr 2011', 0, 0, 0.0,
# 'May 2011', 0, 0, 0.0,
# 'Jun 2011', 0, 0, 0.0,
# 'Jul 2011', 0, 0, 0.0,
'Aug 2011', 10185, 0, 0.0, # New "Pageviews" data from google analytics, does not seem comparable to sf.net pagehits data
'Sep 2011', 10305, 0, 0.0,
'Oct 2011', 14081, 0, 0.0,
'Nov 2011', 13397, 0, 0.0,
'Dec 2011', 13729, 0, 0.0,
'Jan 2012', 11050, 0, 0.0,
'Feb 2012', 12779, 0, 0.0,
'Mar 2012', 12970, 0, 0.0,
'Apr 2012', 13051, 0, 0.0,
'May 2012', 11857, 0, 0.0,
'Jun 2012', 12584, 0, 0.0,
'Jul 2012', 12995, 0, 0.0,
'Aug 2012', 13204, 0, 0.0,
'Sep 2012', 13170, 0, 0.0,
'Oct 2012', 13335, 0, 0.0,
'Nov 2012', 11337, 0, 0.0,
'Dec 2012', 10108, 0, 0.0, # libmesh switched to github on December 10, 2012
'Jan 2013', 13029, 0, 0.0,
'Feb 2013', 10420, 0, 0.0,
'Mar 2013', 13400, 0, 0.0,
'Apr 2013', 14416, 0, 0.0,
'May 2013', 13875, 0, 0.0,
'Jun 2013', 13747, 0, 0.0,
'Jul 2013', 14019, 0, 0.0,
'Aug 2013', 10828, 0, 0.0,
'Sep 2013', 9969, 0, 0.0,
'Oct 2013', 13083, 0, 0.0,
'Nov 2013', 12938, 0, 0.0,
'Dec 2013', 9079, 0, 0.0,
'Jan 2014', 9736, 0, 0.0,
'Feb 2014', 11824, 0, 0.0,
'Mar 2014', 10861, 0, 0.0,
'Apr 2014', 12711, 0, 0.0,
'May 2014', 11177, 0, 0.0,
'Jun 2014', 10738, 0, 0.0,
'Jul 2014', 10349, 0, 0.0,
'Aug 2014', 8877, 0, 0.0,
'Sep 2014', 9226, 0, 0.0,
'Oct 2014', 8052, 0, 0.0, # Google analytics number moved over to libmesh.github.io in Oct 2014
'Nov 2014', 9243, 0, 0.0,
'Dec 2014', 10714, 0, 0.0,
'Jan 2015', 11508, 0, 0.0,
'Feb 2015', 11278, 0, 0.0,
'Mar 2015', 13305, 0, 0.0,
'Apr 2015', 12347, 0, 0.0,
'May 2015', 11368, 0, 0.0,
'Jun 2015', 11203, 0, 0.0,
'Jul 2015', 10419, 0, 0.0,
'Aug 2015', 11282, 0, 0.0,
'Sep 2015', 13535, 0, 0.0,
'Oct 2015', 12912, 0, 0.0,
'Nov 2015', 13894, 0, 0.0,
'Dec 2015', 11694, 0, 0.0,
'Jan 2016', 11837, 0, 0.0,
'Feb 2016', 14102, 0, 0.0,
'Mar 2016', 13212, 0, 0.0,
'Apr 2016', 13355, 0, 0.0,
'May 2016', 12486, 0, 0.0,
'Jun 2016', 13973, 0, 0.0,
'Jul 2016', 10688, 0, 0.0,
'Aug 2016', 10048, 0, 0.0,
'Sep 2016', 10847, 0, 0.0,
'Oct 2016', 10984, 0, 0.0,
'Nov 2016', 12233, 0, 0.0,
'Dec 2016', 11430, 0, 0.0,
'Jan 2017', 10327, 0, 0.0,
'Feb 2017', 11039, 0, 0.0,
'Mar 2017', 12986, 0, 0.0,
'Apr 2017', 9773, 0, 0.0,
'May 2017', 10880, 0, 0.0,
'Jun 2017', 9179, 0, 0.0,
'Jul 2017', 8344, 0, 0.0,
'Aug 2017', 8617, 0, 0.0,
'Sep 2017', 8576, 0, 0.0,
'Oct 2017', 11255, 0, 0.0,
'Nov 2017', 10362, 0, 0.0,
]
# Extract number of hits/month
n_hits_month = data[1::4]
# Divide by 1000 for plotting...
n_hits_month = np.divide(n_hits_month, 1000.)
# Extract list of date strings
date_strings = data[0::4]
# Convert date strings into numbers
date_nums = []
for d in date_strings:
date_nums.append(date2num(datetime.strptime(d, '%b %Y')))
# Get a reference to the figure
fig = plt.figure()
# 111 is equivalent to Matlab's subplot(1,1,1) command
ax = fig.add_subplot(111)
# Make the bar chart. We have one number/month, there are about 30
# days in each month, this defines the bar width...
# The color used comes from sns.color_palette("muted").as_hex() They
# are the "same basic order of hues as the default matplotlib color
# cycle but more attractive colors."
ax.bar(date_nums, n_hits_month, width=30, color=u'#4878cf')
# Create title
fig.suptitle('LibMesh Page Hits/Month (in Thousands)')
# Set up x-tick locations -- August of each year
ticks_names = ['2012', '2013', '2014', '2015', '2016', '2017']
# Get numerical values for the names
tick_nums = []
for x in ticks_names:
tick_nums.append(date2num(datetime.strptime('Jan ' + x, '%b %Y')))
# Set tick labels and positions
ax.set_xticks(tick_nums)
ax.set_xticklabels(ticks_names)
# Set x limits for the plot
plt.xlim(date_nums[0], date_nums[-1]+30);
# Make x-axis ticks point outward
ax.get_xaxis().set_tick_params(direction='out')
# Save as PDF
plt.savefig('libmesh_pagehits.pdf')
# Local Variables:
# python-indent: 2
# End:
| balborian/libmesh | doc/statistics/libmesh_pagehits.py | Python | lgpl-2.1 | 9,913 |
import pygame
pygame.init()
WIDTH=600
HEIGHT=480
SCREEN = pygame.display.set_mode((WIDTH, HEIGHT))
while True:
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
print('this DOES work! :)')
| just-kir/just-kir.github.io | game_test.py | Python | mit | 295 |
RUN_AUTO = 0 # First item of this block is cued right after last item of previous block
RUN_MANUAL = 1 # Playback stops at the end of the last item of previous block
RUN_SOFT = 2 # First item of this block is cued if previous block is running and current_time >= scheduled_time
RUN_HARD = 3 # First item of this block starts immediately if previous block is running and current_time >= scheduled_time
RUN_SKIP = 4
| immstudios/nebula-core | nebulacore/constants/run_modes.py | Python | gpl-3.0 | 439 |
#!python -u
import os, sys
import datetime
import re
import glob
import tarfile
import subprocess
import shutil
import time
def next_build_number():
try:
file = open('.build_number', 'r')
build_number = file.read()
file.close()
except IOError:
build_number = '0'
file = open('.build_number', 'w')
file.write(str(int(build_number) + 1))
file.close()
return build_number
def make_header():
now = datetime.datetime.now()
file = open('include\\version.h', 'w')
file.write('#define MAJOR_VERSION\t' + os.environ['MAJOR_VERSION'] + '\n')
file.write('#define MAJOR_VERSION_STR\t"' + os.environ['MAJOR_VERSION'] + '"\n')
file.write('\n')
file.write('#define MINOR_VERSION\t' + os.environ['MINOR_VERSION'] + '\n')
file.write('#define MINOR_VERSION_STR\t"' + os.environ['MINOR_VERSION'] + '"\n')
file.write('\n')
file.write('#define MICRO_VERSION\t' + os.environ['MICRO_VERSION'] + '\n')
file.write('#define MICRO_VERSION_STR\t"' + os.environ['MICRO_VERSION'] + '"\n')
file.write('\n')
file.write('#define BUILD_NUMBER\t' + os.environ['BUILD_NUMBER'] + '\n')
file.write('#define BUILD_NUMBER_STR\t"' + os.environ['BUILD_NUMBER'] + '"\n')
file.write('\n')
file.write('#define YEAR\t' + str(now.year) + '\n')
file.write('#define YEAR_STR\t"' + str(now.year) + '"\n')
file.write('#define MONTH\t' + str(now.month) + '\n')
file.write('#define MONTH_STR\t"' + str(now.month) + '"\n')
file.write('#define DAY\t' + str(now.day) + '\n')
file.write('#define DAY_STR\t"' + str(now.day) + '"\n')
file.close()
def copy_inf(name):
src = open('src\\%s.inf' % name, 'r')
dst = open('proj\\%s.inf' % name, 'w')
for line in src:
line = re.sub('@MAJOR_VERSION@', os.environ['MAJOR_VERSION'], line)
line = re.sub('@MINOR_VERSION@', os.environ['MINOR_VERSION'], line)
line = re.sub('@MICRO_VERSION@', os.environ['MICRO_VERSION'], line)
line = re.sub('@BUILD_NUMBER@', os.environ['BUILD_NUMBER'], line)
dst.write(line)
dst.close()
src.close()
def get_expired_symbols(name, age = 30):
path = os.path.join(os.environ['SYMBOL_SERVER'], '000Admin\\history.txt')
try:
file = open(path, 'r')
except IOError:
return []
threshold = datetime.datetime.utcnow() - datetime.timedelta(days = age)
expired = []
for line in file:
item = line.split(',')
if (re.match('add', item[1])):
id = item[0]
date = item[3].split('/')
time = item[4].split(':')
tag = item[5].strip('"')
age = datetime.datetime(year = int(date[2]),
month = int(date[0]),
day = int(date[1]),
hour = int(time[0]),
minute = int(time[1]),
second = int(time[2]))
if (tag == name and age < threshold):
expired.append(id)
elif (re.match('del', item[1])):
id = item[2].rstrip()
try:
expired.remove(id)
except ValueError:
pass
file.close()
return expired
def get_configuration(release, debug):
configuration = release
if debug:
configuration += ' Debug'
else:
configuration += ' Release'
return configuration
def get_target_path(release, arch, debug):
configuration = get_configuration(release, debug)
name = ''.join(configuration.split(' '))
target = { 'x86': os.sep.join([name, 'Win32']), 'x64': os.sep.join([name, 'x64']) }
target_path = os.sep.join(['proj', target[arch]])
return target_path
def shell(command, dir):
print(dir)
print(command)
sys.stdout.flush()
sub = subprocess.Popen(' '.join(command), cwd=dir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True)
for line in sub.stdout:
print(line.rstrip())
sub.wait()
return sub.returncode
class msbuild_failure(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def msbuild(platform, configuration, target, file, args, dir):
os.environ['PLATFORM'] = platform
os.environ['CONFIGURATION'] = configuration
os.environ['TARGET'] = target
os.environ['FILE'] = file
os.environ['EXTRA'] = args
bin = os.path.join(os.getcwd(), 'msbuild.bat')
status = shell([bin], dir)
if (status != 0):
raise msbuild_failure(configuration)
def build_sln(name, release, arch, debug):
configuration = get_configuration(release, debug)
if arch == 'x86':
platform = 'Win32'
elif arch == 'x64':
platform = 'x64'
cwd = os.getcwd()
msbuild(platform, configuration, 'Build', name + '.sln', '', 'proj')
def remove_timestamps(path):
try:
os.unlink(path + '.orig')
except OSError:
pass
os.rename(path, path + '.orig')
src = open(path + '.orig', 'r')
dst = open(path, 'w')
for line in src:
if line.find('TimeStamp') == -1:
dst.write(line)
dst.close()
src.close()
def sdv_clean(name):
path = ['proj', name, 'sdv']
print(path)
shutil.rmtree(os.path.join(*path), True)
path = ['proj', name, 'sdv.temp']
print(path)
shutil.rmtree(os.path.join(*path), True)
path = ['proj', name, 'staticdv.job']
print(path)
try:
os.unlink(os.path.join(*path))
except OSError:
pass
path = ['proj', name, 'refine.sdv']
print(path)
try:
os.unlink(os.path.join(*path))
except OSError:
pass
path = ['proj', name, 'sdv-map.h']
print(path)
try:
os.unlink(os.path.join(*path))
except OSError:
pass
def run_sdv(name, dir):
configuration = get_configuration('Windows 8', False)
platform = 'x64'
msbuild(platform, configuration, 'Build', name + '.vcxproj',
'', os.path.join('proj', name))
sdv_clean(name)
msbuild(platform, configuration, 'sdv', name + '.vcxproj',
'/p:Inputs="/scan"', os.path.join('proj', name))
path = ['proj', name, 'sdv-map.h']
file = open(os.path.join(*path), 'r')
for line in file:
print(line)
file.close()
msbuild(platform, configuration, 'sdv', name + '.vcxproj',
'/p:Inputs="/check:default.sdv"', os.path.join('proj', name))
path = ['proj', name, 'sdv', 'SDV.DVL.xml']
remove_timestamps(os.path.join(*path))
msbuild(platform, configuration, 'dvl', name + '.vcxproj',
'', os.path.join('proj', name))
path = ['proj', name, name + '.DVL.XML']
shutil.copy(os.path.join(*path), dir)
path = ['proj', name, 'refine.sdv']
if os.path.isfile(os.path.join(*path)):
msbuild(platform, configuration, 'sdv', name + '.vcxproj',
'/p:Inputs=/refine', os.path.join('proj', name))
def symstore_del(name, age):
symstore_path = [os.environ['KIT'], 'Debuggers']
if os.environ['PROCESSOR_ARCHITECTURE'] == 'x86':
symstore_path.append('x86')
else:
symstore_path.append('x64')
symstore_path.append('symstore.exe')
symstore = os.path.join(*symstore_path)
for id in get_expired_symbols(name, age):
command=['"' + symstore + '"']
command.append('del')
command.append('/i')
command.append(str(id))
command.append('/s')
command.append(os.environ['SYMBOL_SERVER'])
shell(command, None)
def symstore_add(name, release, arch, debug):
target_path = get_target_path(release, arch, debug)
symstore_path = [os.environ['KIT'], 'Debuggers']
if os.environ['PROCESSOR_ARCHITECTURE'] == 'x86':
symstore_path.append('x86')
else:
symstore_path.append('x64')
symstore_path.append('symstore.exe')
symstore = os.path.join(*symstore_path)
version = '.'.join([os.environ['MAJOR_VERSION'],
os.environ['MINOR_VERSION'],
os.environ['MICRO_VERSION'],
os.environ['BUILD_NUMBER']])
command=['"' + symstore + '"']
command.append('add')
command.append('/s')
command.append(os.environ['SYMBOL_SERVER'])
command.append('/r')
command.append('/f')
command.append('*.pdb')
command.append('/t')
command.append(name)
command.append('/v')
command.append(version)
shell(command, target_path)
def manifest():
cmd = ['git', 'ls-tree', '-r', '--name-only', 'HEAD']
sub = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output = sub.communicate()[0]
ret = sub.returncode
if ret != 0:
raise(Exception("Error %d in : %s" % (ret, cmd)))
return output.decode('utf-8')
def archive(filename, files, tgz=False):
access='w'
if tgz:
access='w:gz'
tar = tarfile.open(filename, access)
for name in files :
try:
tar.add(name)
except:
pass
tar.close()
if __name__ == '__main__':
debug = { 'checked': True, 'free': False }
sdv = { 'nosdv': False, None: True }
driver = 'xennet'
os.environ['MAJOR_VERSION'] = '7'
os.environ['MINOR_VERSION'] = '2'
os.environ['MICRO_VERSION'] = '0'
if 'BUILD_NUMBER' not in os.environ.keys():
os.environ['BUILD_NUMBER'] = next_build_number()
print("BUILD_NUMBER=%s" % os.environ['BUILD_NUMBER'])
if 'GIT_REVISION' in os.environ.keys():
revision = open('revision', 'w')
print(os.environ['GIT_REVISION'], file=revision)
revision.close()
make_header()
copy_inf(driver)
symstore_del(driver, 30)
release = 'Windows Vista'
build_sln(driver, release, 'x86', debug[sys.argv[1]])
build_sln(driver, release, 'x64', debug[sys.argv[1]])
symstore_add(driver, release, 'x86', debug[sys.argv[1]])
symstore_add(driver, release, 'x64', debug[sys.argv[1]])
if len(sys.argv) <= 2 or sdv[sys.argv[2]]:
run_sdv('xennet', driver)
archive(driver + '\\source.tgz', manifest().splitlines(), tgz=True)
archive(driver + '.tar', [driver,'revision'])
| xenserver/win-xennet | build.py | Python | bsd-2-clause | 10,383 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import NATransformerModel
def _sequential_poisoning(s, V, beta=0.33, bos=2, eos=3, pad=1):
# s: input batch
# V: vocabulary size
rand_words = torch.randint(low=4, high=V, size=s.size(), device=s.device)
choices = torch.rand(size=s.size(), device=s.device)
choices.masked_fill_((s == pad) | (s == bos) | (s == eos), 1)
replace = choices < beta / 3
repeat = (choices >= beta / 3) & (choices < beta * 2 / 3)
swap = (choices >= beta * 2 / 3) & (choices < beta)
safe = choices >= beta
for i in range(s.size(1) - 1):
rand_word = rand_words[:, i]
next_word = s[:, i + 1]
self_word = s[:, i]
replace_i = replace[:, i]
swap_i = swap[:, i] & (next_word != 3)
repeat_i = repeat[:, i] & (next_word != 3)
safe_i = safe[:, i] | ((next_word == 3) & (~replace_i))
s[:, i] = (
self_word * (safe_i | repeat_i).long()
+ next_word * swap_i.long()
+ rand_word * replace_i.long()
)
s[:, i + 1] = (
next_word * (safe_i | replace_i).long()
+ self_word * (swap_i | repeat_i).long()
)
return s
def gumbel_noise(input, TINY=1e-8):
return (
input.new_zeros(*input.size())
.uniform_()
.add_(TINY)
.log_()
.neg_()
.add_(TINY)
.log_()
.neg_()
)
@register_model("iterative_nonautoregressive_transformer")
class IterNATransformerModel(NATransformerModel):
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
parser.add_argument(
"--train-step",
type=int,
help="number of refinement iterations during training",
)
parser.add_argument(
"--dae-ratio",
type=float,
help="the probability of switching to the denoising auto-encoder loss",
)
parser.add_argument(
"--stochastic-approx",
action="store_true",
help="sampling from the decoder as the inputs for next iteration",
)
@classmethod
def build_model(cls, args, task):
model = super().build_model(args, task)
model.train_step = getattr(args, "train_step", 4)
model.dae_ratio = getattr(args, "dae_ratio", 0.5)
model.stochastic_approx = getattr(args, "stochastic_approx", False)
return model
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
B, T = prev_output_tokens.size()
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(
normalize=False, encoder_out=encoder_out
)
length_tgt = self.decoder.forward_length_prediction(
length_out, encoder_out, tgt_tokens
)
# decoding
word_ins_outs, word_ins_tgts, word_ins_masks = [], [], []
for t in range(self.train_step):
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
step=t,
)
word_ins_tgt = tgt_tokens
word_ins_mask = word_ins_tgt.ne(self.pad)
word_ins_outs.append(word_ins_out)
word_ins_tgts.append(word_ins_tgt)
word_ins_masks.append(word_ins_mask)
if t < (self.train_step - 1):
# prediction for next iteration
if self.stochastic_approx:
word_ins_prediction = (
word_ins_out + gumbel_noise(word_ins_out)
).max(-1)[1]
else:
word_ins_prediction = word_ins_out.max(-1)[1]
prev_output_tokens = prev_output_tokens.masked_scatter(
word_ins_mask, word_ins_prediction[word_ins_mask]
)
if self.dae_ratio > 0:
# we do not perform denoising for the first iteration
corrputed = (
torch.rand(size=(B,), device=prev_output_tokens.device)
< self.dae_ratio
)
corrputed_tokens = _sequential_poisoning(
tgt_tokens[corrputed],
len(self.tgt_dict),
0.33,
self.bos,
self.eos,
self.pad,
)
prev_output_tokens[corrputed] = corrputed_tokens
# concat everything
word_ins_out = torch.cat(word_ins_outs, 0)
word_ins_tgt = torch.cat(word_ins_tgts, 0)
word_ins_mask = torch.cat(word_ins_masks, 0)
return {
"word_ins": {
"out": word_ins_out,
"tgt": word_ins_tgt,
"mask": word_ins_mask,
"ls": self.args.label_smoothing,
"nll_loss": True,
},
"length": {
"out": length_out,
"tgt": length_tgt,
"factor": self.decoder.length_loss_factor,
},
}
@register_model_architecture(
"iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer"
)
def inat_base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.apply_bert_init = getattr(args, "apply_bert_init", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
# --- special arguments ---
args.sg_length_pred = getattr(args, "sg_length_pred", False)
args.pred_length_offset = getattr(args, "pred_length_offset", False)
args.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
args.ngram_predictor = getattr(args, "ngram_predictor", 1)
args.src_embedding_copy = getattr(args, "src_embedding_copy", False)
args.train_step = getattr(args, "train_step", 4)
args.dae_ratio = getattr(args, "dae_ratio", 0.5)
args.stochastic_approx = getattr(args, "stochastic_approx", False)
@register_model_architecture(
"iterative_nonautoregressive_transformer",
"iterative_nonautoregressive_transformer_wmt_en_de",
)
def iter_nat_wmt_en_de(args):
inat_base_architecture(args)
| pytorch/fairseq | fairseq/models/nat/iterative_nonautoregressive_transformer.py | Python | mit | 8,647 |
def Setup(Settings, DefaultModel):
# set3_crossval-one-model-on-one-dataset/type_of_model_osm_kfold_d4.py
Settings["experiment_name"] = "TypesOfModels_monoTest_OSM_kfold_d4"
Settings["graph_histories"] = [] # ['all','together',[],[1,0],[0,0,0],[]]
n = 0
#d1 5556x_markable_640x640 SegmentsData_marked_R100_4Tables
#d2 5556x_markable_640x640_2x_expanded SegmentsData_marked_R100_4Tables_expanded.dump
#d3 5556x_minlen30_640px SegmentsData_marked_R100_4Tables.dump
#d4 5556x_minlen30_640px_2x_expanded SegmentsData_marked_R100_4Tables_expanded.dump
Settings["models"][n]["dataset_name"] = "5556x_minlen30_640px_2x_expanded"
Settings["models"][n]["dump_file_override"] = 'SegmentsData_marked_R100_4Tables_expanded.dump'
Settings["models"][n]["pixels"] = 640
Settings["models"][n]["model_type"] = 'osm_only' # osm_only img_only img_osm_mix
Settings["models"][n]["unique_id"] = 'osmmodel'
Settings["models"][n]["top_repeat_FC_block"] = 2
Settings["models"][n]["epochs"] = 500
Settings["models"][n]["k_fold_crossvalidation"] = True
Settings["models"][n]["crossvalidation_k"] = 10
Settings["graph_histories"] = []
return Settings
| previtus/MGR-Project-Code | Settings/set3_crossval-one-model-on-one-dataset/type_of_model_osm_kfold_d4.py | Python | mit | 1,246 |
import click
@click.group(name='pytn')
def cli():
"""
Output a greeting to PyTennessee!
"""
@cli.command()
def prompt1():
"""
Prompt for data.
"""
data = click.prompt("Data")
click.echo("data: {0}".format(data))
@cli.command()
def prompt2():
"""
Prompt for data w/ custom text.
"""
click.confirm("Are you sure?", abort=True)
click.echo("OK")
if __name__ == '__main__':
cli()
| tylerdave/PyTN2016-Click-Tutorial | solutions/input_prompts.py | Python | mit | 435 |
## -*- coding: utf-8 -*-
<%
import os
context.write('<hr /><div class="dev_data_dump">')
env_ks = os.environ.keys()
for ev in env_ks:
# first only 'HTTP_EVE' vars
ev_s = ev[0:8]
if ev_s == 'HTTP_EVE':
context.write('[{0}] = [{1}]<br />'.format(ev, os.environ[ev]))
# context.write('[{0}][{2}] = [{1}]<br />'.format(ev, os.environ[ev], ev_s))
context.write('<hr />')
for ev in env_ks:
# then other vars
ev_s = ev[0:8]
if ev_s != 'HTTP_EVE':
context.write('[{0}] = [{1}]<br />'.format(ev, os.environ[ev]))
context.write('</div><hr />')
%> | minlexx/whdbx_web | templates/dump_env.py | Python | mit | 586 |
"""
Score Module
This module defines the score class. It keeps track of the players
score and displays it on the screen.
"""
import pygame
from ..config import RESOLUTION
class Score(pygame.sprite.Sprite):
"""Score Class"""
def __init__(self, score=0):
"""Initialize player score."""
super().__init__()
self.score = score
self.color = (200,200,200)
self.font = pygame.font.Font(None, 40)
self.image = self.font.render(str(self.score), True, self.color)
self.rect = self.image.get_rect()
self.rect.center = (RESOLUTION[0]-100, RESOLUTION[1]-25)
self.draw_rect = self.rect.inflate(40,10)
def update(self, *args, **kwargs):
"""Update the player's score."""
self.image = self.font.render(str(self.score), True, self.color)
self.rect = self.image.get_rect()
self.rect.center = (RESOLUTION[0]-100, RESOLUTION[1]-25)
def incr(self, incr=5):
"""Increment score"""
self.score += incr
| Oisota/Breakout | breakout/game/score.py | Python | gpl-3.0 | 1,020 |
#!/usr/bin/env python
#coding: utf-8
"""
https://leetcode.com/problems/spiral-matrix-ii/
Given an integer n, generate a square matrix filled with elements from 1 to n2 in spiral order.
For example,
Given n = 3,
You should return the following matrix:
[
[ 1, 2, 3 ],
[ 8, 9, 4 ],
[ 7, 6, 5 ]
]
"""
class Solution(object):
def generateMatrix(self, n):
"""
:type n: int
:rtype: List[List[int]]
"""
result = [([0] * n) for i in range(n)]
DIRECTION_RIGHT = "RIGHT"
DIRECTION_DOWN = "DOWN"
DIRECTION_LEFT = "LEFT"
DIRECTION_UP = "UP"
direction = DIRECTION_RIGHT
x, y, steps_count = 0, 0, 1
max_steps_count = n * n
while steps_count <= max_steps_count:
result[x][y] = steps_count
cache_x, cache_y = x, y
if direction == DIRECTION_RIGHT:
if y == (n - 1) or result[x][y + 1] != 0:
direction = DIRECTION_DOWN
x += 1
else:
y += 1
elif direction == DIRECTION_DOWN:
if x == (n - 1) or result[x + 1][y] != 0:
direction = DIRECTION_LEFT
y -= 1
else:
x += 1
elif direction == DIRECTION_LEFT:
if y == 0 or result[x][y - 1] != 0:
direction = DIRECTION_UP
x -= 1
else:
y -= 1
elif direction == DIRECTION_UP:
if x == 0 or result[x - 1][y] != 0:
direction = DIRECTION_RIGHT
y += 1
else:
x -= 1
steps_count += 1
return result
solution = Solution()
print(solution.generateMatrix(4))
| caoxudong/code_practice | leetcode/59_spiral_matrix_ii.py | Python | mit | 1,960 |
from test_support import TestFailed
# A test for SF bug 422177: manifest float constants varied way too much in
# precision depending on whether Python was loading a module for the first
# time, or reloading it from a precompiled .pyc. The "expected" failure
# mode is that when test_import imports this after all .pyc files have been
# erased, it passes, but when test_import imports this from
# double_const.pyc, it fails. This indicates a woeful loss of precision in
# the marshal format for doubles. It's also possible that repr() doesn't
# produce enough digits to get reasonable precision for this box.
PI = 3.14159265358979324
TWOPI = 6.28318530717958648
PI_str = "3.14159265358979324"
TWOPI_str = "6.28318530717958648"
# Verify that the double x is within a few bits of eval(x_str).
def check_ok(x, x_str):
assert x > 0.0
x2 = eval(x_str)
assert x2 > 0.0
diff = abs(x - x2)
# If diff is no larger than 3 ULP (wrt x2), then diff/8 is no larger
# than 0.375 ULP, so adding diff/8 to x2 should have no effect.
if x2 + (diff / 8.) != x2:
raise TestFailed("Manifest const %s lost too much precision " % x_str)
check_ok(PI, PI_str)
check_ok(TWOPI, TWOPI_str)
| neopoly/rubyfox-server | lib/rubyfox/server/data/lib/Lib/test/double_const.py | Python | mit | 1,212 |
# Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
CellState Manager
"""
import copy
import datetime
import functools
from oslo.config import cfg
from nova.cells import rpc_driver
from nova import context
from nova.db import base
from nova import exception
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
cell_state_manager_opts = [
cfg.IntOpt('db_check_interval',
default=60,
help='Seconds between getting fresh cell info from db.'),
cfg.StrOpt('cells_config',
help='Configuration file from which to read cells '
'configuration. If given, overrides reading cells '
'from the database.'),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('reserve_percent', 'nova.cells.opts', group='cells')
CONF.import_opt('mute_child_interval', 'nova.cells.opts', group='cells')
#CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_state_manager_opts, group='cells')
class CellState(object):
"""Holds information for a particular cell."""
def __init__(self, cell_name, is_me=False):
self.name = cell_name
self.is_me = is_me
self.last_seen = datetime.datetime.min
self.capabilities = {}
self.capacities = {}
self.db_info = {}
# TODO(comstud): The DB will specify the driver to use to talk
# to this cell, but there's no column for this yet. The only
# available driver is the rpc driver.
self.driver = rpc_driver.CellsRPCDriver()
def update_db_info(self, cell_db_info):
"""Update cell credentials from db."""
self.db_info = dict(
[(k, v) for k, v in cell_db_info.iteritems()
if k != 'name'])
def update_capabilities(self, cell_metadata):
"""Update cell capabilities for a cell."""
self.last_seen = timeutils.utcnow()
self.capabilities = cell_metadata
def update_capacities(self, capacities):
"""Update capacity information for a cell."""
self.last_seen = timeutils.utcnow()
self.capacities = capacities
def get_cell_info(self):
"""Return subset of cell information for OS API use."""
db_fields_to_return = ['is_parent', 'weight_scale', 'weight_offset']
url_fields_to_return = {
'username': 'username',
'hostname': 'rpc_host',
'port': 'rpc_port',
}
cell_info = dict(name=self.name, capabilities=self.capabilities)
if self.db_info:
for field in db_fields_to_return:
cell_info[field] = self.db_info[field]
url_info = rpc_driver.parse_transport_url(
self.db_info['transport_url'])
for field, canonical in url_fields_to_return.items():
cell_info[canonical] = url_info[field]
return cell_info
def send_message(self, message):
"""Send a message to a cell. Just forward this to the driver,
passing ourselves and the message as arguments.
"""
self.driver.send_message_to_cell(self, message)
def __repr__(self):
me = "me" if self.is_me else "not_me"
return "Cell '%s' (%s)" % (self.name, me)
def sync_before(f):
"""Use as a decorator to wrap methods that use cell information to
make sure they sync the latest information from the DB periodically.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
self._cell_data_sync()
return f(self, *args, **kwargs)
return wrapper
def sync_after(f):
"""Use as a decorator to wrap methods that update cell information
in the database to make sure the data is synchronized immediately.
"""
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
result = f(self, *args, **kwargs)
self._cell_data_sync(force=True)
return result
return wrapper
_unset = object()
class CellStateManager(base.Base):
def __new__(cls, cell_state_cls=None, cells_config=_unset):
if cls is not CellStateManager:
return super(CellStateManager, cls).__new__(cls)
if cells_config is _unset:
cells_config = CONF.cells.cells_config
if cells_config:
config_path = CONF.find_file(cells_config)
if not config_path:
raise cfg.ConfigFilesNotFoundError(path=config_path)
return CellStateManagerFile(cell_state_cls, config_path)
return CellStateManagerDB(cell_state_cls)
def __init__(self, cell_state_cls=None):
super(CellStateManager, self).__init__()
if not cell_state_cls:
cell_state_cls = CellState
self.cell_state_cls = cell_state_cls
self.my_cell_state = cell_state_cls(CONF.cells.name, is_me=True)
self.parent_cells = {}
self.child_cells = {}
self.last_cell_db_check = datetime.datetime.min
self._cell_data_sync(force=True)
my_cell_capabs = {}
for cap in CONF.cells.capabilities:
name, value = cap.split('=', 1)
if ';' in value:
values = set(value.split(';'))
else:
values = set([value])
my_cell_capabs[name] = values
self.my_cell_state.update_capabilities(my_cell_capabs)
def _refresh_cells_from_dict(self, db_cells_dict):
"""Make our cell info map match the db."""
# Update current cells. Delete ones that disappeared
for cells_dict in (self.parent_cells, self.child_cells):
for cell_name, cell_info in cells_dict.items():
is_parent = cell_info.db_info['is_parent']
db_dict = db_cells_dict.get(cell_name)
if db_dict and is_parent == db_dict['is_parent']:
cell_info.update_db_info(db_dict)
else:
del cells_dict[cell_name]
# Add new cells
for cell_name, db_info in db_cells_dict.items():
if db_info['is_parent']:
cells_dict = self.parent_cells
else:
cells_dict = self.child_cells
if cell_name not in cells_dict:
cells_dict[cell_name] = self.cell_state_cls(cell_name)
cells_dict[cell_name].update_db_info(db_info)
def _time_to_sync(self):
"""Is it time to sync the DB against our memory cache?"""
diff = timeutils.utcnow() - self.last_cell_db_check
return diff.seconds >= CONF.cells.db_check_interval
def _update_our_capacity(self, ctxt=None):
"""Update our capacity in the self.my_cell_state CellState.
This will add/update 2 entries in our CellState.capacities,
'ram_free' and 'disk_free'.
The values of these are both dictionaries with the following
format:
{'total_mb': <total_memory_free_in_the_cell>,
'units_by_mb: <units_dictionary>}
<units_dictionary> contains the number of units that we can
build for every instance_type that we have. This number is
computed by looking at room available on every compute_node.
Take the following instance_types as an example:
[{'memory_mb': 1024, 'root_gb': 10, 'ephemeral_gb': 100},
{'memory_mb': 2048, 'root_gb': 20, 'ephemeral_gb': 200}]
capacities['ram_free']['units_by_mb'] would contain the following:
{'1024': <number_of_instances_that_will_fit>,
'2048': <number_of_instances_that_will_fit>}
capacities['disk_free']['units_by_mb'] would contain the following:
{'122880': <number_of_instances_that_will_fit>,
'225280': <number_of_instances_that_will_fit>}
Units are in MB, so 122880 = (10 + 100) * 1024.
NOTE(comstud): Perhaps we should only report a single number
available per instance_type.
"""
if not ctxt:
ctxt = context.get_admin_context()
reserve_level = CONF.cells.reserve_percent / 100.0
compute_hosts = {}
def _get_compute_hosts():
compute_nodes = self.db.compute_node_get_all(ctxt)
for compute in compute_nodes:
service = compute['service']
if not service or service['disabled']:
continue
host = service['host']
compute_hosts[host] = {
'free_ram_mb': compute['free_ram_mb'],
'free_disk_mb': compute['free_disk_gb'] * 1024,
'total_ram_mb': compute['memory_mb'],
'total_disk_mb': compute['local_gb'] * 1024}
_get_compute_hosts()
if not compute_hosts:
self.my_cell_state.update_capacities({})
return
ram_mb_free_units = {}
disk_mb_free_units = {}
total_ram_mb_free = 0
total_disk_mb_free = 0
def _free_units(total, free, per_inst):
if per_inst:
min_free = total * reserve_level
free = max(0, free - min_free)
return int(free / per_inst)
else:
return 0
def _update_from_values(values, instance_type):
memory_mb = instance_type['memory_mb']
disk_mb = (instance_type['root_gb'] +
instance_type['ephemeral_gb']) * 1024
ram_mb_free_units.setdefault(str(memory_mb), 0)
disk_mb_free_units.setdefault(str(disk_mb), 0)
ram_free_units = _free_units(compute_values['total_ram_mb'],
compute_values['free_ram_mb'], memory_mb)
disk_free_units = _free_units(compute_values['total_disk_mb'],
compute_values['free_disk_mb'], disk_mb)
ram_mb_free_units[str(memory_mb)] += ram_free_units
disk_mb_free_units[str(disk_mb)] += disk_free_units
instance_types = self.db.flavor_get_all(ctxt)
for compute_values in compute_hosts.values():
total_ram_mb_free += compute_values['free_ram_mb']
total_disk_mb_free += compute_values['free_disk_mb']
for instance_type in instance_types:
_update_from_values(compute_values, instance_type)
capacities = {'ram_free': {'total_mb': total_ram_mb_free,
'units_by_mb': ram_mb_free_units},
'disk_free': {'total_mb': total_disk_mb_free,
'units_by_mb': disk_mb_free_units}}
self.my_cell_state.update_capacities(capacities)
@sync_before
def get_cell_info_for_neighbors(self):
"""Return cell information for all neighbor cells."""
cell_list = [cell.get_cell_info()
for cell in self.child_cells.itervalues()]
cell_list.extend([cell.get_cell_info()
for cell in self.parent_cells.itervalues()])
return cell_list
@sync_before
def get_my_state(self):
"""Return information for my (this) cell."""
return self.my_cell_state
@sync_before
def get_child_cells(self):
"""Return list of child cell_infos."""
return self.child_cells.values()
@sync_before
def get_parent_cells(self):
"""Return list of parent cell_infos."""
return self.parent_cells.values()
@sync_before
def get_parent_cell(self, cell_name):
return self.parent_cells.get(cell_name)
@sync_before
def get_child_cell(self, cell_name):
return self.child_cells.get(cell_name)
@sync_before
def update_cell_capabilities(self, cell_name, capabilities):
"""Update capabilities for a cell."""
cell = (self.child_cells.get(cell_name) or
self.parent_cells.get(cell_name))
if not cell:
LOG.error(_("Unknown cell '%(cell_name)s' when trying to "
"update capabilities"),
{'cell_name': cell_name})
return
# Make sure capabilities are sets.
for capab_name, values in capabilities.items():
capabilities[capab_name] = set(values)
cell.update_capabilities(capabilities)
@sync_before
def update_cell_capacities(self, cell_name, capacities):
"""Update capacities for a cell."""
cell = (self.child_cells.get(cell_name) or
self.parent_cells.get(cell_name))
if not cell:
LOG.error(_("Unknown cell '%(cell_name)s' when trying to "
"update capacities"),
{'cell_name': cell_name})
return
cell.update_capacities(capacities)
@sync_before
def get_our_capabilities(self, include_children=True):
capabs = copy.deepcopy(self.my_cell_state.capabilities)
if include_children:
for cell in self.child_cells.values():
if timeutils.is_older_than(cell.last_seen,
CONF.cells.mute_child_interval):
continue
for capab_name, values in cell.capabilities.items():
if capab_name not in capabs:
capabs[capab_name] = set([])
capabs[capab_name] |= values
return capabs
def _add_to_dict(self, target, src):
for key, value in src.items():
if isinstance(value, dict):
target.setdefault(key, {})
self._add_to_dict(target[key], value)
continue
target.setdefault(key, 0)
target[key] += value
@sync_before
def get_our_capacities(self, include_children=True):
capacities = copy.deepcopy(self.my_cell_state.capacities)
if include_children:
for cell in self.child_cells.values():
self._add_to_dict(capacities, cell.capacities)
return capacities
@sync_before
def get_capacities(self, cell_name=None):
if not cell_name or cell_name == self.my_cell_state.name:
return self.get_our_capacities()
if cell_name in self.child_cells:
return self.child_cells[cell_name].capacities
raise exception.CellNotFound(cell_name=cell_name)
@sync_before
def cell_get(self, ctxt, cell_name):
for cells_dict in (self.parent_cells, self.child_cells):
if cell_name in cells_dict:
return cells_dict[cell_name]
raise exception.CellNotFound(cell_name=cell_name)
class CellStateManagerDB(CellStateManager):
@utils.synchronized('cell-db-sync')
def _cell_data_sync(self, force=False):
"""
Update cell status for all cells from the backing data store
when necessary.
:param force: If True, cell status will be updated regardless
of whether it's time to do so.
"""
if force or self._time_to_sync():
LOG.debug(_("Updating cell cache from db."))
self.last_cell_db_check = timeutils.utcnow()
ctxt = context.get_admin_context()
db_cells = self.db.cell_get_all(ctxt)
db_cells_dict = dict((cell['name'], cell) for cell in db_cells)
self._refresh_cells_from_dict(db_cells_dict)
self._update_our_capacity(ctxt)
@sync_after
def cell_create(self, ctxt, values):
return self.db.cell_create(ctxt, values)
@sync_after
def cell_update(self, ctxt, cell_name, values):
return self.db.cell_update(ctxt, cell_name, values)
@sync_after
def cell_delete(self, ctxt, cell_name):
return self.db.cell_delete(ctxt, cell_name)
class CellStateManagerFile(CellStateManager):
def __init__(self, cell_state_cls, cells_config_path):
self.cells_config_path = cells_config_path
super(CellStateManagerFile, self).__init__(cell_state_cls)
def _cell_data_sync(self, force=False):
"""
Update cell status for all cells from the backing data store
when necessary.
:param force: If True, cell status will be updated regardless
of whether it's time to do so.
"""
reloaded, data = fileutils.read_cached_file(self.cells_config_path,
force_reload=force)
if reloaded:
LOG.debug(_("Updating cell cache from config file."))
self.cells_config_data = jsonutils.loads(data)
self._refresh_cells_from_dict(self.cells_config_data)
if force or self._time_to_sync():
self.last_cell_db_check = timeutils.utcnow()
self._update_our_capacity()
def cell_create(self, ctxt, values):
raise exception.CellsUpdateProhibited()
def cell_update(self, ctxt, cell_name, values):
raise exception.CellsUpdateProhibited()
def cell_delete(self, ctxt, cell_name):
raise exception.CellsUpdateProhibited()
| pombredanne/MOG | nova/cells/state.py | Python | apache-2.0 | 17,886 |
"""
This test file tests the lib.tokens.certificatetoken
"""
from .base import MyTestCase
from privacyidea.lib.tokens.certificatetoken import CertificateTokenClass
from privacyidea.models import Token
from privacyidea.lib.caconnector import save_caconnector
from privacyidea.lib.token import get_tokens
import os
from OpenSSL import crypto
CERT = """-----BEGIN CERTIFICATE-----
MIIGXDCCBUSgAwIBAgITYwAAAA27DqXl0fVdOAAAAAAADTANBgkqhkiG9w0BAQsF
ADBCMRMwEQYKCZImiZPyLGQBGRYDb3JnMRkwFwYKCZImiZPyLGQBGRYJYXV0aC10
ZXN0MRAwDgYDVQQDEwdDQTJGMDAxMB4XDTE1MDIxMTE2NDE1M1oXDTE2MDIxMTE2
NDE1M1owgYExEzARBgoJkiaJk/IsZAEZFgNvcmcxGTAXBgoJkiaJk/IsZAEZFglh
dXRoLXRlc3QxDjAMBgNVBAMTBVVzZXJzMRowGAYDVQQDExFDb3JuZWxpdXMgS29l
bGJlbDEjMCEGCSqGSIb3DQEJARYUY29ybmVsaXVzQGJhbGZvby5uZXQwggEiMA0G
CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCN5xYqSoKhxKgywdWOjZTgOobPN5lN
DbSKQktdiG7asH0/Bzg8DIyd+k6wj5yncNhHKBhDJC/cAz3YAYY+KJj/tECLyt5V
AqZLuf3sTA/Ak/neHzXwrlo9PB67JxY4tgJcaR0Cml5oSx4ofRowOCrXv60Asfkl
+3lMRaNyEpQiSVdqIzGZAM1FIy0chwknMB8PfQhlC3v60rGiWoG65Rl5zuGl9lJC
nR990FGSIUW2GLCtI57QCCdBVHIBL+M0WNbdonk9qYSHm8ArFeoftsw2UxHQazM9
KftS7osJnQWOeNw+iIQIgZxLlyC9CBeKBCj3gIwLMEZRz6y951A9nngbAgMBAAGj
ggMJMIIDBTAOBgNVHQ8BAf8EBAMCBLAwHQYDVR0OBBYEFGFYluib3gs1BQQNB25A
FyEvQuoxMB8GA1UdIwQYMBaAFO9tVOusjflOf/y1lJuQ0YZej3vuMIHHBgNVHR8E
gb8wgbwwgbmggbaggbOGgbBsZGFwOi8vL0NOPUNBMkYwMDEsQ049Z2FuZGFsZixD
Tj1DRFAsQ049UHVibGljJTIwS2V5JTIwU2VydmljZXMsQ049U2VydmljZXMsQ049
Q29uZmlndXJhdGlvbixEQz1hdXRoLXRlc3QsREM9b3JnP2NlcnRpZmljYXRlUmV2
b2NhdGlvbkxpc3Q/YmFzZT9vYmplY3RDbGFzcz1jUkxEaXN0cmlidXRpb25Qb2lu
dDCBuwYIKwYBBQUHAQEEga4wgaswgagGCCsGAQUFBzAChoGbbGRhcDovLy9DTj1D
QTJGMDAxLENOPUFJQSxDTj1QdWJsaWMlMjBLZXklMjBTZXJ2aWNlcyxDTj1TZXJ2
aWNlcyxDTj1Db25maWd1cmF0aW9uLERDPWF1dGgtdGVzdCxEQz1vcmc/Y0FDZXJ0
aWZpY2F0ZT9iYXNlP29iamVjdENsYXNzPWNlcnRpZmljYXRpb25BdXRob3JpdHkw
PQYJKwYBBAGCNxUHBDAwLgYmKwYBBAGCNxUIheyJB4SuoT6EjYcBh+WGHoXd8y83
g7DpBYPZgFwCAWQCAQgwKQYDVR0lBCIwIAYKKwYBBAGCNxQCAgYIKwYBBQUHAwIG
CCsGAQUFBwMEMDUGCSsGAQQBgjcVCgQoMCYwDAYKKwYBBAGCNxQCAjAKBggrBgEF
BQcDAjAKBggrBgEFBQcDBDBEBgNVHREEPTA7oCMGCisGAQQBgjcUAgOgFQwTY29y
bnlAYXV0aC10ZXN0Lm9yZ4EUY29ybmVsaXVzQGJhbGZvby5uZXQwRAYJKoZIhvcN
AQkPBDcwNTAOBggqhkiG9w0DAgICAIAwDgYIKoZIhvcNAwQCAgCAMAcGBSsOAwIH
MAoGCCqGSIb3DQMHMA0GCSqGSIb3DQEBCwUAA4IBAQCVI9ULYQgLxOcDWAlWPE4g
ZRcbg65oCNdB0MBzTFhQZC/YFlSTNAGU2gUhnW+LoQ4N4sVnwxPbCRpsiA0ImqFU
hh/qcIV4JYthUGYdYkGjsc1YQjdLpYsg0GRUXTQHYjMQo6gvg1z/iMhzCCU8DbjT
DkTm/0JYVCt+vpvpigX/XWLWeHLHzPHFYAdBVAYgnwbTV4hgNIO98YRiMWsXOAIR
S/IreZ58alclwJJRIGTuOTKSCd+uE7QMALztDty7cjtpMANGrz1k/uUWg9T+UgQs
czZ68tF258iaWLPbsdRWqO160iy7eDSKWFFMR4HnfLHX/UPRSpBNGSHmvT1hbkUr
-----END CERTIFICATE-----"""
CAKEY = "cakey.pem"
CACERT = "cacert.pem"
OPENSSLCNF = "openssl.cnf"
WORKINGDIR = "tests/testdata/ca"
REQUEST = """-----BEGIN CERTIFICATE REQUEST-----
MIICmTCCAYECAQAwVDELMAkGA1UEBhMCREUxDzANBgNVBAgMBkhlc3NlbjEUMBIG
A1UECgwLcHJpdmFjeWlkZWExHjAcBgNVBAMMFXJlcXVlc3Rlci5sb2NhbGRvbWFp
bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM2+FE/6zgE/QiIbHZyv
3ZLSf9tstz45Q0NrEwPxBfQHdLx2aSgLrxmO1/zjzcZY8sp/CG1T/AcCRCTGtDRM
jAT+Mw5A4iC6AnNa9/VPY27MxrbfVB03OX1RNiZfvdw/qItroq62ndYh599BuHoo
KmhIyqgt7eHpRl5acm20hDiHkf2UEQsohMbCLyr7Afk2egl10TOIPHNBW8i/lIlw
ofDAuS5QUx6xF2Rp9C2B4KkNDjLpulWKhfEbb0l5tH+Iww0+VIibPR84jATz7mpj
K/XG27SDqsR4QTp9S+HIPnHKG2FZ6sbEyjJeyem/EinmxsNj/qBV2nrxYJhNJu36
cC0CAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQB7uJC6I1By0T29IZ0B1ue5YNxM
NDPbqCytRPMQ9awJ6niMMIQRS1YPhSFPWyEWrGKWAUvbn/lV0XHH7L/tvHg6HbC0
AjLc8qPH4Xqkb1WYV1GVJYr5qyEFS9QLZQLQDC2wk018B40MSwZWtsv14832mPu8
gP5WP+mj9LRgWCP1MdAR9pcNGd9pZMcCHQLxT76mc/eol4kb/6/U6yxBmzaff8eB
oysLynYXZkm0wFudTV04K0aKlMJTp/G96sJOtw1yqrkZSe0rNVcDs9vo+HAoMWO/
XZp8nprZvJuk6/QIRpadjRkv4NElZ2oNu6a8mtaO38xxnfQm4FEMbm5p+4tM
-----END CERTIFICATE REQUEST-----"""
class CertificateTokenTestCase(MyTestCase):
serial1 = "CRT0001"
serial2 = "CRT0002"
def test_01_create_token_from_certificate(self):
db_token = Token(self.serial1, tokentype="certificate")
db_token.save()
token = CertificateTokenClass(db_token)
# just upload a ready certificate
token.update({"certificate": CERT})
self.assertTrue(token.token.serial == self.serial1, token)
self.assertTrue(token.token.tokentype == "certificate",
token.token.tokentype)
self.assertTrue(token.type == "certificate", token)
class_prefix = token.get_class_prefix()
self.assertTrue(class_prefix == "CRT", class_prefix)
self.assertEqual(token.get_class_type(), "certificate")
detail = token.get_init_detail()
self.assertEqual(detail.get("certificate"), CERT)
def test_02_create_token_from_request(self):
cwd = os.getcwd()
# setup ca connector
r = save_caconnector({"cakey": CAKEY,
"cacert": CACERT,
"type": "local",
"caconnector": "localCA",
"openssl.cnf": OPENSSLCNF,
"CSRDir": "",
"CertificateDir": "",
"WorkingDir": cwd + "/" + WORKINGDIR})
db_token = Token(self.serial2, tokentype="certificate")
db_token.save()
token = CertificateTokenClass(db_token)
# just upload a ready certificate
token.update({"ca": "localCA",
"request": REQUEST})
self.assertTrue(token.token.serial == self.serial2, token)
self.assertTrue(token.token.tokentype == "certificate",
token.token.tokentype)
self.assertTrue(token.type == "certificate", token)
class_prefix = token.get_class_prefix()
self.assertTrue(class_prefix == "CRT", class_prefix)
self.assertTrue(token.get_class_type() == "certificate", token)
detail = token.get_init_detail()
certificate = detail.get("certificate")
# At each testrun, the certificate might get another serial number!
x509obj = crypto.load_certificate(crypto.FILETYPE_PEM, certificate)
self.assertEqual("%r" % x509obj.get_issuer(),
"<X509Name object '/C=DE/ST=Hessen"
"/O=privacyidea/CN=CA001'>")
self.assertEqual("%r" % x509obj.get_subject(),
"<X509Name object '/C=DE/ST=Hessen"
"/O=privacyidea/CN=requester.localdomain'>")
# Test, if the certificate is also completely stored in the tokeninfo
# and if we can retrieve it from the tokeninfo
token = get_tokens(serial=self.serial2)[0]
certificate = token.get_tokeninfo("certificate")
x509obj = crypto.load_certificate(crypto.FILETYPE_PEM, certificate)
self.assertEqual("%r" % x509obj.get_issuer(),
"<X509Name object '/C=DE/ST=Hessen"
"/O=privacyidea/CN=CA001'>")
self.assertEqual("%r" % x509obj.get_subject(),
"<X509Name object '/C=DE/ST=Hessen"
"/O=privacyidea/CN=requester.localdomain'>")
def test_03_class_methods(self):
db_token = Token.query.filter(Token.serial == self.serial1).first()
token = CertificateTokenClass(db_token)
info = token.get_class_info()
self.assertTrue(info.get("title") == "Certificate Token", info)
info = token.get_class_info("title")
self.assertTrue(info == "Certificate Token", info)
| woddx/privacyidea | tests/test_lib_tokens_certificate.py | Python | agpl-3.0 | 7,454 |
"""
This module implements the quicksort algorithm.
"""
from random import choice
def quick_sort(arr, key=lambda x:x):
"""
Perform a quicksort on a given list. Return the sorted list.
Example:
>>> quick_sort([1, 5, 7, 2, 3, 4, 1])
[1, 1, 2, 3, 4, 5, 7]
>>> quick_sort([
... {"name": "Fox", "age": 13},
... {"name": "John Doe", "age": 12},
... {"name": "Wumpus", "age": 15}], key=lambda x: x["age"])
[{'age': 12, 'name': 'John Doe'}, {'age': 13, 'name': 'Fox'}, {'age': 15, 'name': 'Wumpus'}]
"""
pass
| jcotton42/libuseful | stubs/sorts/quicksort.py | Python | gpl-3.0 | 550 |
#!/usr/bin/env python
# -*- coding: ascii -*-
r"""
ConfigFile wraps a configparser file as an in-memory container with a few
more options.
such as:
mycfg['BadNews','IQ'] = 'very low'
x = mycfg['BadNews','IQ']
del mycfg['BadNews','IQ']
mycfg.save_file()
myDict = mycfg.get_dictionary()
Save to file can be triggered by a "has_changes" flag.
ConfigFile
Copyright (C) 2015 Charlie Taylor
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
-----------------------
"""
# for multi-file projects see LICENSE file for authorship info
# for single file projects, insert following information
__author__ = 'Charlie Taylor'
__copyright__ = 'Copyright (c) 2013 Charlie Taylor'
__license__ = 'GPL-3'
__version__ = '0.1.1' #Versioning: http://www.python.org/dev/peps/pep-0386/
__email__ = "cet@appliedpython.com"
__status__ = "Development" # "Prototype", "Development", or "Production"
#
# import statements here. (built-in first, then 3rd party, then yours)
import sys
import os
import configparser
from configparser import NoOptionError, NoSectionError
class ConfigInterface(object):
"""ConfigFile wraps a configparser file as an in-memory container
"""
def __init__(self, config_filename='myconfig.cfg', sectionL=None):
"""Inits ConfigInterface a ConfigParser and a config file name.
:param config_filename: name of config file (data files also in config format)
:param sectionL: a list of section headings in config file
:type config_filename: string
:type sectionL: list of strings
"""
self.has_changes = False # If True, then a save_file should be done
self.state_id_number = 0 # is incremented each time a change takes place
self.config_filename= os.path.abspath( config_filename )
if config_filename.lower().endswith('.cfg'):
print 'Config File:', self.config_filename
else:
print 'Data File:', self.config_filename
#self.config = configparser.SafeConfigParser()
self.config = configparser.RawConfigParser()
self.config.optionxform = str
if os.path.isfile(self.config_filename):
self.config.read(self.config_filename)
if sectionL:
for section in sectionL:
if not self.config.has_section( section ):
self.config.add_section(section)
self.has_changes = True
self.state_id_number += 1
def get_sectionL(self):
"""Return a list of all sections in config file"""
return self.config.sections()
def get_optionL(self, section):
"""Return a list of options for section in config file"""
return self.config.options(section)
def get_dictionary(self):
"""Make a dictionary representation of config file"""
D = {} # build from empty dictionary
for section in self.config.sections():
sectD = {} # section dictionary
for option in self.config.options(section):
sectD[option] = self.config.get(section, option)
D[section] = sectD
return D
def set(self, section, option, value):
"""Calls configparser set method and sets self.has_changes=True"""
self.config.set(section, option, value)
self.has_changes = True
self.state_id_number += 1
def has_section(self, section):
"""Calls configparser has_section method"""
return self.config.has_section(section)
def has_option(self, section, option):
"""Calls configparser has_section method"""
return self.config.has_option(section, option)
def save_file(self):
"""Saves self.config to self.config_filename
Also sets self.has_changes=False
"""
with open(self.config_filename, 'w') as configfile:
self.config.write( configfile )
# if has_changes is reset to True, then another save_file should be done
self.has_changes = False
def delete_file(self):
"""Deletes self.config_filename"""
if os.path.isfile( self.config_filename ):
os.remove( self.config_filename )
def __getitem__(self, key_tup):
"""Allows data access such as:
mycfg['BadNews','IQ']
"""
if len(key_tup)==2:
section, option = key_tup
try:
return self.config.get(section, option)
except (NoOptionError, NoSectionError):
return None
def __setitem__(self, key_tup, value):
"""Allows assignments such as:
mycfg['BadNews','IQ'] = 'very low'
(changes self.has_changes to True as a side-effect)
"""
if len(key_tup)==2:
section, option = key_tup
self.set(section, option, value)
def __delitem__(self, key_tup):
"""Allows removal of item as:
del mycfg['BadNews','IQ']
(changes self.has_changes to True as a side-effect)
"""
if len(key_tup)==2:
section, option = key_tup
try:
self.state_id_number += 1
self.config.remove_option(section, option)
self.has_changes = True
except (NoOptionError, NoSectionError):
pass # if not deletable, don't worry about it
if __name__ == '__main__':
C = ConfigInterface()
| sonofeft/Qt4_GenGUI | qt4_gengui/config_file.py | Python | lgpl-3.0 | 6,187 |
import requests
import terminatorlib.plugin as plugin
from gi.repository import Gtk
from terminatorlib.config import ConfigBase
from terminatorlib.translation import _
from terminatorlib.util import get_config_dir, err, dbg, gerr
AVAILABLE = ['TerminatorThemes']
class TerminatorThemes(plugin.Plugin):
capabilities = ['terminal_menu']
config_base = ConfigBase()
base_url = 'https://api.github.com/repos/EliverLara/terminator-themes/contents/themes.json'
inherits_config_from = "default"
def callback(self, menuitems, menu, terminal):
"""Add our item to the menu"""
self.terminal = terminal
item = Gtk.ImageMenuItem(Gtk.STOCK_FIND)
item.connect('activate',self.configure)
item.set_label("Themes")
item.set_sensitive(True)
menuitems.append(item)
def configure(self, widget, data = None):
ui = {}
dbox = Gtk.Dialog( _("Terminator themes"), None, Gtk.DialogFlags.MODAL)
headers = { "Accept": "application/vnd.github.v3.raw" }
response = requests.get(self.base_url, headers=headers)
if response.status_code != 200:
gerr(_("Failed to get list of available themes"))
return
self.themes_from_repo = response.json()["themes"]
self.profiles = self.terminal.config.list_profiles()
main_container = Gtk.HBox(spacing=7)
main_container.pack_start(self._create_themes_list(ui), True, True, 0)
main_container.pack_start(self._create_settings_grid(ui), True, True, 0)
dbox.vbox.pack_start(main_container, True, True, 0)
self.dbox = dbox
dbox.show_all()
res = dbox.run()
if res == Gtk.ResponseType.ACCEPT:
self.terminal.config.save()
del(self.dbox)
dbox.destroy()
return
def _create_themes_list(self, ui):
profiles_list_model = Gtk.ListStore(str, bool, object)
# Set add/remove buttons availability
for theme in self.themes_from_repo:
if theme["name"] in self.profiles:
profiles_list_model.append([theme["name"], False, theme])
else:
profiles_list_model.append([theme["name"], True, theme])
treeview = Gtk.TreeView(profiles_list_model)
selection = treeview.get_selection()
selection.set_mode(Gtk.SelectionMode.SINGLE)
selection.connect("changed", self.on_selection_changed, ui)
ui['treeview'] = treeview
renderer_text = Gtk.CellRendererText()
column_text = Gtk.TreeViewColumn("Theme", renderer_text, text=0)
treeview.append_column(column_text)
scroll_window = Gtk.ScrolledWindow()
scroll_window.set_size_request(300, 250)
scroll_window.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scroll_window.add_with_viewport(treeview)
return scroll_window
def _create_settings_grid(self, ui):
grid = Gtk.Grid()
grid.set_column_spacing(5)
grid.set_row_spacing(7)
grid.attach(self._create_default_inherits_check(ui), 0, 0, 2, 1)
grid.attach(Gtk.Label("Available profiles: "), 0, 1, 1, 1)
grid.attach(self._create_inherits_from_combo(ui), 1, 1, 1, 1)
grid.attach(self._create_main_action_button(ui, "install", self.on_install), 0, 4, 1, 1)
grid.attach(self._create_main_action_button(ui, "remove", self.on_uninstall), 1, 4, 1, 1)
return grid
def _create_default_inherits_check(self, ui):
check = Gtk.CheckButton("Inherit preferences from default profile")
check.set_active(True)
check.connect("toggled", self.on_inheritsfromdefaultcheck_toggled, ui)
ui['check_inherits_from_default'] = check
return check
def _create_inherits_from_combo(self, ui):
combo = Gtk.ComboBoxText()
combo.set_entry_text_column(0)
combo.set_sensitive(False)
combo.connect("changed", self.on_inheritsfromcombo_changed, ui)
ui['inherits_from_combo'] = combo
for profile in self.profiles:
combo.append_text(profile)
combo.set_active(self.profiles.index(self.terminal.config.get_profile()))
return combo
def _create_main_action_button(self, ui, label, action):
btn = Gtk.Button(_(label.capitalize()))
btn.connect("clicked", action, ui)
btn.set_sensitive(False)
ui['button_' + label] = btn
return btn
def on_inheritsfromdefaultcheck_toggled(self, check, data=None):
if check.get_active() is not True:
data["inherits_from_combo"].set_sensitive(True)
self.inherits_config_from = self.profiles[data['inherits_from_combo'].get_active()]
else:
data["inherits_from_combo"].set_sensitive(False)
self.inherits_config_from = 'default'
def on_inheritsfromcombo_changed(self, combo, data):
if combo.get_sensitive():
self.inherits_config_from = self.profiles[combo.get_active()]
else:
self.inherits_config_from = 'default'
def on_selection_changed(self, selection, data=None):
(model, iter) = selection.get_selected()
data['button_install'].set_sensitive(model[iter][1])
data['button_remove'].set_sensitive(model[iter][1] is not True)
def on_uninstall(self, button, data):
treeview = data['treeview']
selection = treeview.get_selection()
(store, iter) = selection.get_selected()
target = store[iter][0]
# If selected theme is active, sets terminal profile to default before unistalling
if self.terminal.get_profile() == target:
widget = self.terminal.get_vte()
self.terminal.force_set_profile(widget, 'default')
self.terminal.config.del_profile(target)
self.terminal.config.save()
self.update_comboInheritsFrom(data)
#'Add' button available again
data['treeview'].get_model().set_value(iter, 1, True)
self.on_selection_changed(selection, data)
def on_install(self, button, data):
treeview = data['treeview']
selection = treeview.get_selection()
(store, iter) = selection.get_selected()
target = store[iter][2]
widget = self.terminal.get_vte()
treeview.set_enable_tree_lines(False)
if not iter:
return
self.terminal.config.add_profile(target["name"])
template_data = self.config_base.profiles[self.inherits_config_from].copy()
for k, v in target.items():
if k != 'background_image' and k != 'name' and k != 'type':
if k == 'background_darkness':
template_data[k] = float(v)
else:
template_data[k] = v
for k, v in template_data.items():
self.config_base.set_item(k, v, target["name"])
self.terminal.force_set_profile(widget, target["name"])
self.terminal.config.save()
self.update_comboInheritsFrom(data)
# "Remove" button available again
data['treeview'].get_model().set_value(iter, 1, False)
self.on_selection_changed(selection, data)
treeview.set_enable_tree_lines(True)
def update_comboInheritsFrom(self, data):
data['inherits_from_combo'].remove_all()
profiles = self.terminal.config.list_profiles()
self.profiles = profiles
for profile in profiles:
data['inherits_from_combo'].append_text(profile)
data['inherits_from_combo'].set_active(profiles.index(self.terminal.config.get_profile())) | tjtrabue/dotfiles | link/config/terminator/plugins/terminator-themes.py | Python | mit | 7,724 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('reviewers', '0003_auto_20150727_1017'),
]
# An incorrect constraint was added in this migration, and was removed due
# to bug 1195292.
operations = []
| ingenioustechie/zamboni | mkt/reviewers/migrations/0004_fix_unique.py | Python | bsd-3-clause | 347 |
import meta
import sage
from sage import player, triggers, aliases
from sage.signals import pre_shutdown
from sage.signals.gmcp import skills, vitals
import time
import MySQLdb as mysql
import MySQLdb.cursors
class HealthTracker(object):
def __init__(self):
self.health = 100
self.mana = 100
self.last_health = 0
self.last_mana = 0
self.ema_health_gain = 0
self.ema_health_loss = 0
self.cur_health_gain = 0
self.cur_health_loss = 0
self.ema_mana_gain = 0
self.ema_mana_loss = 0
self.cur_mana_gain = 0
self.cur_mana_loss = 0
def update_health_stats(self, **kwargs):
cur_health = kwargs['health']
cur_mana = kwargs['mana']
self.health = cur_health.value
self.mana = cur_mana.value
if self.last_health == 0:
self.last_health = cur_health.value
self.last_mana = cur_mana.value
d_health = cur_health.value - self.last_health
d_mana = cur_mana.value - self.last_mana
if d_health > 0:
self.cur_health_gain = self.cur_health_gain + d_health
if d_health < 0:
self.cur_health_loss = self.cur_health_loss - d_health
if d_mana > 0:
self.cur_mana_gain = self.cur_mana_gain + d_mana
if d_mana < 0:
self.cur_mana_loss = self.cur_mana_loss - d_mana
self.last_health = cur_health.value
self.last_mana = cur_mana.value
def update_health_gain(self, trigger):
self.ema_health_gain = self.ema_health_gain *.1 + self.cur_health_gain
self.ema_health_loss = max(self.ema_health_loss *.1 + .9 * self.cur_health_loss,
self.cur_health_loss)
self.cur_health_gain = 0
self.cur_health_loss = 0
self.ema_mana_gain = self.ema_mana_gain *.5 + self.cur_mana_gain
self.ema_mana_loss = self.ema_mana_loss *.5 + self.cur_mana_loss
self.cur_mana_gain = 0
self.cur_mana_loss = 0
tracker = HealthTracker()
health_trigs = triggers.create_group('health', app='explorer')
health_trigs.enable()
@health_trigs.exact("You may drink another health or mana elixir.")
def snap_health(trigger):
tracker.update_health_gain(trigger)
vitals.connect(tracker.update_health_stats)
| danielja/mudpy | health_tracker/health_tracker.py | Python | gpl-2.0 | 2,521 |
#!/usr/bin/env python
"""This module contains the Image class, for image processing operations.
The operations implemented in the class methods are focused to the
images obtained from the external FPGAs in the UviSpace project. Thus,
once obtained a grey scale image, this module provide functions for
getting the shapes (triangles) in the image, and then their vertices.
Prior to segment the image, a binarization has to be applied, as it
eases the segmentation process.
Important note
--------------
A point array is written by convention in the form
*[row, column]*. In a cartesian system, the points are expressed
as *(x,y)*. Finally, in an image representation (viewer), the typical is
to display points coordinates as *(x', y')*. They equivalences are the
following:
.. math::
x = x' = column
y = -y' = -row
Thus, special care has to be taken when dealing with operations in
different scopes e.g. trigonometric operations will be handled with the
cartesian values, while image operations are normally performed with the
array convention. Finally, when sending the array values to a viewer or
to an external device, the image representation mentioned above is
the typical used system.
"""
# Standard libraries
import logging
import sys
# Third party libraries
import cv2
import numpy as np
import skimage.measure
import skimage.morphology
# Local libraries
import geometry
try:
# Logging setup.
import settings
except ImportError:
# Exit program if the settings module can't be found.
sys.exit("Can't find settings module. Maybe environment variables are not"
"set. Run the environment .sh script at the project root folder.")
logger = logging.getLogger("sensor")
class Image(object):
"""Class with image processing methods oriented to UGV detection.
:param np.array image: original grey scale image.
:param list contours: each element is an Mx2 array containing M
points defining a closed contour.
"""
def __init__(self, image, contours=[]):
"""
Image class constructor. Set image and contours attributes.
"""
self.image = image
self._binarized = None
self.triangles = []
self.contours = contours
def binarize(self, thresholds):
"""Get a binarized image from a grey image given the thresholds.
The input image can only have one dimension. This method is
intended to work with 3-component threshold values stored in a
single 30-bit register:
* *register[0 to 10]* : red component thresholds
* *register[10 to 20]* : green component thresholds
* *register[10 to 30]* : blue component thresholds
The raw binary image contains a lot of noise. As it is very low
around the triangles, masks around them are used to get rid of
the noise in the rest of the image.
:param [int or float, int or float] thresholds : minimum and
maximum values between whom the image intensity values will be
accepted as 1 (rescaled to 255). Values greater than the
maximum and smaller than the minimum will be truncated to 0.
:return bin_image: Image of the same size as the input image
with only 255 or 0 values (Equivalent to 1 and 0), according
to the input threshold values.
:rtype: binary numpy.array(shape=MxN)
"""
# Obtain the thresholds in base 2 and get the red component slice.
th_min = bin(thresholds[0])
th_max = bin(thresholds[1])
red_c = (th_min[-30:-20], th_max[-30:-20])
# Why is it necessary to divide by 4??
thr_min = int(red_c[0], 2) / 4
thr_max = int(red_c[1], 2) / 4
logger.debug("Thresholding between {} and {}"
.format(thr_min, thr_max))
# The first binary approach is obtained evaluating 2 thresholds
raw_binarized = cv2.inRange(self.image, thr_min, thr_max)
# A simple erosion gets rid of the whole noise. Dilating the eroded
# image several times provides an acceptable ROI for the binary mask.
kernel = np.ones((5, 5), np.uint8)
erosion = cv2.erode(raw_binarized, kernel, iterations=1)
kernel = np.ones((5, 5), np.uint8)
dilate = cv2.dilate(erosion, kernel, iterations=5)
mask = dilate / 255
filtered = raw_binarized * mask
# Eliminate holes inside the detected shapes
labels = skimage.morphology.label(filtered)
label_count = np.bincount(labels.ravel())
# Detect the background pixels, assuming they are majority in the image.
background = np.argmax(label_count)
self._binarized = filtered
self._binarized[labels != background] = 255
logger.debug("Image binarization finished")
return self._binarized
def correct_distortion(self, kx=0.035, ky=0.035, only_contours=True):
"""Correct barrel distortion on contours or on the whole image.
The distortion is corrected using a 2nd polynomial equation for
every pixel with coordinates :math:`(X_d, Y_d)`. The resulting
corrected coordinates :math:`(X_u, Y_u)` are obtained with the
following equations:
.. math::
X_u &=(X_d - C_x) * (1 + k_x * r^2) + C_x \\
Y_u &= (Y_d - C_y) * (1 + k_y * r^2) + C_y \\
r &= [(X_d - C_x)^2 + (Y_d - C_y)^2] / [(C_x^2 + C_y^2) * 2]
:param float kx: X-Axe Distortion coefficient of the lens.
:param float ky: Y-Axe Distortion coefficient of the lens.
:param bool only_contours: Specify if the correction is to be
applied to the whole image or only to the contours.
"""
# Calculate the image center as the middle point of the width and height
center = np.array(self.image.shape) / 2
# If contours is an empty list, algorithm is not outperformed.
if only_contours and self.contours:
for index, cnt in enumerate(self.contours):
distance = cnt - center
# Calculate the r distance. First numerator and then denominator
r = (distance ** 2).sum(axis=1).astype(np.float)
r /= (center ** 2).sum() * 2
coeffs = np.array([r*ky, r*kx]).transpose() + 1
corrected = distance * coeffs + center
self.contours[index] = corrected
elif not only_contours:
pass
def get_shapes(self, tolerance=8, get_contours=True):
"""Get the shapes' vertices in the binarized image.
Update the *self.triangles* attribute.
The shape is obtained using the *Marching Cubes Algorithm*.
Once obtained, the vertices are calculated using the
*Ramer-Douglas-Peucker Algorithm*. Both are implemented on the
*skimage* library, and there is more information on its docs.
if the kwarg *get_contours* if False, it is assumed that the
contours are already known (stored in variable *self.contours*).
If this is the case, the marching cubes algorithm is omitted.
:param float tolerance: minimum distance between an observed
pixel and the previous vertices pixels required to add the
first one to the vertices list.
:param bool get_contours: specify if the *Marching Cubes
Algorithm* is applied to the binarized image. Specifically set
to False when the binarization algorithm is implemented in the
external device (i.e. the FPGA).
:return: vertices of the N shapes detected on the
image. each element contains an Mx2 *np.rray* with the
coordinates of the M vertices of the shape.
:rtype: list
"""
logger.debug("Getting the shapes' vertices in the image")
# Obtain a list with all the contours in the image, separating each
# shape in a different element of the list
if get_contours:
self.contours = skimage.measure.find_contours(self._binarized, 200)
self.triangles = []
# Get the vertices of each shape in the image.
for cnt in self.contours:
coords = skimage.measure.approximate_polygon(cnt, tolerance)
max_coords = np.array(self.image.shape) - 1
# Sometimes, the initial vertex is repeatead at the end.
# Thus, if len is 3 and vertex is NOT repeated, it is a triangle
if len(coords) == 3 and (not np.array_equal(coords[0], coords[-1])):
triangle = geometry.Triangle(
np.clip(coords, [0,0], max_coords))
self.triangles.append(triangle)
# If len is 4 and vertex IS repeated, it is a triangle
if len(coords) == 4 and np.array_equal(coords[0], coords[-1]):
triangle = geometry.Triangle(np.clip(coords[1:],
[0,0], max_coords))
self.triangles.append(triangle)
logger.debug("A {}-vertices shape was found".format(len(coords)))
return self.triangles
| UviDTE-UviSpace/UviSpace | uvispace/uvisensor/imgprocessing.py | Python | gpl-3.0 | 9,235 |
import _plotly_utils.basevalidators
class ValuesValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="values", parent_name="parcoords.dimension", **kwargs
):
super(ValuesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/parcoords/dimension/_values.py | Python | mit | 422 |
#!/usr/bin/env python
# -*- encoding: UTF-8 -*-
# This file is part of Addison Arches.
#
# Addison Arches is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Addison Arches is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Addison Arches. If not, see <http://www.gnu.org/licenses/>.
import asyncio
import cmd
from collections import Counter
from collections import defaultdict
from collections import namedtuple
import concurrent.futures
import datetime
from decimal import Decimal
import getpass
import itertools
import operator
import os.path
import random
import sys
import uuid
from turberfield.ipc.message import Alert
from turberfield.ipc.message import parcel
from addisonarches.business import CashBusiness
from addisonarches.business import Buying
from addisonarches.business import Selling
from addisonarches.business import Trader
from addisonarches.cli import parsers
import addisonarches.game
from addisonarches.game import Clock
from addisonarches.game import Game
from addisonarches.game import Persistent
from addisonarches.scenario.types import Location
from addisonarches.scenario.types import Character
from addisonarches.utils import get_objects
from addisonarches.utils import group_by_type
from addisonarches.utils import query_object_chain
from addisonarches.valuation import Ask
from addisonarches.valuation import Bid
def create_local_console(progress, down, up, loop=None):
console = Console(progress, down, up, loop=loop)
executor = concurrent.futures.ThreadPoolExecutor(
max(4, len(console.routines) + 1)
)
for coro in console.routines:
loop.create_task(coro(executor, loop=loop))
return console
class Console(cmd.Cmd):
def __init__(self, progress, down, up, *args, loop=None, **kwargs):
super().__init__(*args, **kwargs)
self.progress = progress
self.down = down
self.up = up
self.commands = asyncio.Queue(loop=loop)
self.prompt = "Type 'help' for commands > "
self.ts = None
@staticmethod
def get_command(prompt):
line = sys.stdin.readline()
if not len(line):
line = "EOF"
else:
line = line.rstrip("\r\n")
return line
@property
def routines(self):
return [self.command_loop, self.input_loop]
@asyncio.coroutine
def input_loop(self, executor, loop=None):
line = ""
while not line.lower().startswith("quit"):
try:
line = yield from asyncio.wait_for(
loop.run_in_executor(
executor,
self.get_command,
self.prompt
),
timeout=None,
loop=loop)
except asyncio.TimeoutError:
pass
yield from self.commands.put(line)
@asyncio.coroutine
def command_loop(self, executor, loop=None):
line = ""
locn = None
self.preloop()
while not line.lower().startswith("quit"):
sys.stdout.write(self.prompt)
sys.stdout.flush()
line = yield from self.commands.get()
try:
line = self.precmd(line)
msg = self.onecmd(line)
if msg is not None:
yield from self.up.put(msg)
reply = yield from self.down.get()
stop = self.postcmd(msg, line)
if stop:
# TODO: Send 'stop' msg to game (up)
break
except Exception as e:
print(e)
data = get_objects(self.progress)
objs = group_by_type(data)
#print(*list(objs.items()), sep="\n")
locn = next(iter(objs[Location]), None)
print("You're at {}.".format(getattr(locn, "name", "?")))
for bystander in objs[Character]:
print("{0.name} is nearby.".format(bystander))
drama = next(iter(objs[Game.Drama]), None)
print("You're in a {0.mood} mood.".format(drama))
tally = query_object_chain(data, "name", "cash")
print("You've got {0.units}{0.value} in the kitty.".format(tally))
for actor, patter in itertools.groupby(objs[Trader.Patter], operator.attrgetter("actor")):
print("{0[1]} says, ".format(actor))
phrases = list(patter)
for n, phrase in enumerate(phrases):
if n == len(phrases) - 1:
print("'{0}'.".format(phrase.text.rstrip('.')))
else:
print("'{0.text}'".format(phrase))
for alert in objs[Alert]:
print("{0.text}".format(alert))
self.postloop()
sys.stdout.flush()
for task in asyncio.Task.all_tasks(loop):
task.cancel()
loop.stop()
def precmd(self, line):
return line
def postcmd(self, msg, line):
"Potential 'game over' decisions."
data = get_objects(self.progress)
objs = group_by_type(data)
tick = next(iter(objs[Clock.Tick]), None)
self.ts = tick.ts
t = datetime.datetime.strptime(tick.value, "%Y-%m-%d %H:%M:%S")
self.prompt = "{:%A %H:%M} > ".format(t)
# TODO: Send 'stop' to game (down)
return line.startswith("quit") and msg is None
def do_buy(self, arg):
"""
'Buy' lists items you can buy. Supply a number from
that menu to buy a specific item, eg::
> buy
(a list will be shown)
> buy 3
"""
line = arg.strip()
#view = self.game.here.inventories[self.game.location].contents.items()
data = get_objects(self.progress)
progress = group_by_type(data)
totals = Counter(progress[Game.Item])
menu = list(set(progress[Game.Item]))
if not line:
print("Here's what you can buy:")
print(
*["{0:01}: {1.label} ({2})".format(n, i, totals[i])
for n, i in enumerate(menu) if totals[i]],
sep="\n")
sys.stdout.write("\n")
elif line.isdigit():
item = menu[int(line)]
drama = Buying(memory=[item])
msg = parcel(None, drama)
return msg
def do_ask(self, arg):
"""
'Ask' demands money for an item, eg::
> ask 50
"""
line = arg.strip()
if line.isdigit():
offer = Ask(self.ts, int(line), "£")
msg = parcel(None, offer)
return msg
def do_bid(self, arg):
"""
'Bid' offers money for an item, eg::
> bid 35
"""
line = arg.strip()
if line.isdigit():
offer = Bid(self.ts, int(line), "£")
msg = parcel(None, offer)
return msg
def do_sell(self, arg):
"""
'Sell' lists items you can sell. Supply a number from
that menu to sell a specific item, eg::
> sell
(a list will be shown)
> sell 3
"""
line = arg.strip()
data = get_objects(self.progress._replace(file="inventory.rson"))
view = Counter(data).items()
if not line:
print("Here's what you can sell:")
print(
*["{0:01}: {1.label} ({2})".format(n, k, v)
for n, (k, v) in enumerate(view)],
sep="\n")
sys.stdout.write("\n")
elif line.isdigit():
k, v = list(view)[int(line)]
drama = Selling(memory=[k])
msg = parcel(None, drama)
return msg
def do_go(self, arg):
"""
'Go' lists places you can go. Supply a number from
that menu to travel to a specific location, eg::
> go
(a list will be shown)
> go 3
"""
line = arg.strip()
data = get_objects(self.progress)
progress = group_by_type(data)
if not line:
print("Here's where you can go:")
print(*["{0:01}: {1}".format(i.id, i.name) for i in progress[Game.Via]],
sep="\n")
sys.stdout.write("\n")
elif line.isdigit():
via = progress[Game.Via][int(line)]
msg = parcel(None, via)
return msg
def do_look(self, arg):
"""
'Look' tells you where you are and what you can see.
Add a number from that menu to get specific details, eg::
> look
(a list will be shown)
> look 2
(more details may follow)
"""
line = arg.strip()
data = get_objects(self.progress)
progress = group_by_type(data)
totals = Counter(progress[Game.Item])
menu = list(set(progress[Game.Item]))
if not line:
print("Here's what you can see:")
if menu:
print(
*["{0:01}: {1.label} ({2})".format(n, i, totals[i])
for n, i in enumerate(menu) if totals[i]],
sep="\n")
elif line.isdigit():
prefix = random.choice([
"Dunno about the", "No details on the", "Just",
])
item = menu[int(line)]
print(item.description or "{prefix} {0}{1}.".format(
item.label.lower(), ("s" if totals[item] > 1 else ""), prefix=prefix
))
sys.stdout.write("\n")
def do_split(self, arg):
"""
'Split' tells you what you have that can be taken apart.
Add a number from that menu to split that item up, eg::
> split
(a list will be shown)
> split 2
(more details may follow)
"""
line = arg.strip()
data = [i
for i in get_objects(self.progress._replace(file="inventory.rson"))
if getattr(i, "type", None) == "Compound"
]
view = Counter(data).items()
if not line:
print("Here's what you can split:")
print(
*["{0:01}: {1.label} ({2})".format(n, k, v)
for n, (k, v) in enumerate(view)],
sep="\n")
sys.stdout.write("\n")
elif line.isdigit():
k, v = list(view)[int(line)]
msg = parcel(None, k)
return msg
def do_wait(self, arg):
"""
Pass the time quietly.
"""
return None
def do_quit(self, arg):
"""
End the game.
"""
return None
def main(args):
user = getpass.getuser()
name = input("Please enter your name: ")
path = Persistent.Path(args.output, user, None, None)
Persistent.make_path(path)
loop = asyncio.SelectorEventLoop()
asyncio.set_event_loop(loop)
#down = asyncio.Queue(loop=loop)
#up = asyncio.Queue(loop=loop)
#tok = token(args.connect, APP_NAME)
#node = create_udp_node(loop, tok, down, up)
#loop.create_task(node(token=tok))
progress, down, up = addisonarches.game.create(
args.output, user, name, loop=loop
)
console = create_local_console(progress, down, up, loop=loop)
try:
loop.run_forever()
except concurrent.futures.CancelledError:
pass
finally:
loop.close()
return 0
def run():
p, subs = parsers()
args = p.parse_args()
rv = 0
if args.version:
sys.stdout.write(addisonarches.__version__ + "\n")
else:
rv = main(args)
if rv == 2:
sys.stderr.write("\n Missing command.\n\n")
p.print_help()
sys.exit(rv)
if __name__ == "__main__":
run()
| tundish/addisonarches | addisonarches/console.py | Python | agpl-3.0 | 12,414 |
#!/usr/bin/python
# (c) 2017, Alberto Murillo <alberto.murillo.silva@intel.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: swupd
short_description: Manages updates and bundles in ClearLinux systems.
description:
- Manages updates and bundles with the swupd bundle manager, which is used by the
Clear Linux Project for Intel Architecture.
version_added: "2.3"
author: Alberto Murillo (@albertomurillo)
options:
contenturl:
description:
- URL pointing to the contents of available bundles.
If not specified, the contents are retrieved from clearlinux.org.
required: false
default: null
format:
description:
- The format suffix for version file downloads. For example [1,2,3,staging,etc].
If not specified, the default format is used.
required: false
default: null
manifest:
description:
- The manifest contains information about the bundles at certaion version of the OS.
Specify a Manifest version to verify against that version or leave unspecified to
verify against the current version.
required: false
default: null
aliases: [release, version]
name:
description:
- Name of the (I)bundle to install or remove.
required: false
default: null
aliases: [bundle]
state:
description:
- Indicates the desired (I)bundle state. C(present) ensures the bundle
is installed while C(absent) ensures the (I)bundle is not installed.
required: false
default: present
choices: [present, absent]
update:
description:
- Updates the OS to the latest version.
required: false
default: no
url:
description:
- Overrides both I(contenturl) and I(versionurl).
required: false
default: null
verify:
description:
- Verify content for OS version.
required: false
default: null
versionurl:
description:
- URL for version string download.
required: false
default: null
'''
EXAMPLES = '''
- name: Update the OS to the latest version
swupd:
update: yes
- name: Installs the "foo" bundle
swupd:
name: foo
state: present
- name: Removes the "foo" bundle
swupd:
name: foo
state: absent
- name: Check integrity of filesystem
swupd:
verify: yes
- name: Downgrade OS to release 12920
swupd:
verify: yes
manifest: 12920
'''
RETURN = '''
stdout:
description: stdout of swupd
returned: always
type: string
stderr:
description: stderr of swupd
returned: always
type: string
'''
import os
from ansible.module_utils.basic import AnsibleModule
class Swupd(object):
FILES_NOT_MATCH = "files did not match"
FILES_REPLACED = "missing files were replaced"
FILES_FIXED = "files were fixed"
FILES_DELETED = "files were deleted"
def __init__(self, module):
# Fail if swupd is not found
self.module = module
self.swupd_cmd = module.get_bin_path("swupd", False)
if not self.swupd_cmd:
module.fail_json(msg="Could not find swupd.")
# Initialize parameters
for key in module.params.keys():
setattr(self, key, module.params[key])
# Initialize return values
self.changed = False
self.failed = False
self.msg = None
self.rc = None
self.stderr = ""
self.stdout = ""
def _run_cmd(self, cmd):
self.rc, self.stdout, self.stderr = self.module.run_command(cmd, check_rc=False)
def _get_cmd(self, command):
cmd = "%s %s" % (self.swupd_cmd, command)
if self.format:
cmd += " --format=%s" % self.format
if self.manifest:
cmd += " --manifest=%s" % self.manifest
if self.url:
cmd += " --url=%s" % self.url
else:
if self.contenturl and command != "check-update":
cmd += " --contenturl=%s" % self.contenturl
if self.versionurl:
cmd += " --versionurl=%s" % self.versionurl
return cmd
def _is_bundle_installed(self, bundle):
try:
os.stat("/usr/share/clear/bundles/%s" % bundle)
except OSError:
return False
return True
def _needs_update(self):
cmd = self._get_cmd("check-update")
self._run_cmd(cmd)
if self.rc == 0:
return True
if self.rc == 1:
return False
self.failed = True
self.msg = "Failed to check for updates"
def _needs_verify(self):
cmd = self._get_cmd("verify")
self._run_cmd(cmd)
if self.rc != 0:
self.failed = True
self.msg = "Failed to check for filesystem inconsistencies."
if self.FILES_NOT_MATCH in self.stdout:
return True
return False
def install_bundle(self, bundle):
"""Installs a bundle with `swupd bundle-add bundle`"""
if self.module.check_mode:
self.module.exit_json(changed=not self._is_bundle_installed(bundle))
if self._is_bundle_installed(bundle):
self.msg = "Bundle %s is already installed" % bundle
return
cmd = self._get_cmd("bundle-add %s" % bundle)
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Bundle %s installed" % bundle
return
if self.rc == 18:
self.msg = "Bundle name %s is invalid" % bundle
return
self.failed = True
self.msg = "Failed to install bundle %s" % bundle
def remove_bundle(self, bundle):
"""Removes a bundle with `swupd bundle-remove bundle`"""
if self.module.check_mode:
self.module.exit_json(changed=self._is_bundle_installed(bundle))
if not self._is_bundle_installed(bundle):
self.msg = "Bundle %s not installed"
return
cmd = self._get_cmd("bundle-remove %s" % bundle)
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Bundle %s removed" % bundle
return
self.failed = True
self.msg = "Failed to remove bundle %s" % bundle
def update_os(self):
"""Updates the os with `swupd update`"""
if self.module.check_mode:
self.module.exit_json(changed=self._needs_update())
if not self._needs_update():
self.msg = "There are no updates available"
return
cmd = self._get_cmd("update")
self._run_cmd(cmd)
if self.rc == 0:
self.changed = True
self.msg = "Update successful"
return
self.failed = True
self.msg = "Failed to check for updates"
def verify_os(self):
"""Verifies filesystem against specified or current version"""
if self.module.check_mode:
self.module.exit_json(changed=self._needs_verify())
if not self._needs_verify():
self.msg = "No files where changed"
return
cmd = self._get_cmd("verify --fix")
self._run_cmd(cmd)
if self.rc == 0 and (self.FILES_REPLACED in self.stdout or self.FILES_FIXED in self.stdout or self.FILES_DELETED in self.stdout):
self.changed = True
self.msg = "Fix successful"
return
self.failed = True
self.msg = "Failed to verify the OS"
def main():
"""The main function."""
module = AnsibleModule(
argument_spec=dict(
contenturl=dict(type="str"),
format=dict(type="str"),
manifest=dict(aliases=["release", "version"], type="int"),
name=dict(aliases=["bundle"], type="str"),
state=dict(default="present", choices=["present", "absent"], type="str"),
update=dict(default=False, type="bool"),
url=dict(type="str"),
verify=dict(default=False, type="bool"),
versionurl=dict(type="str"),
),
required_one_of=[["name", "update", "verify"]],
mutually_exclusive=[["name", "update", "verify"]],
supports_check_mode=True
)
swupd = Swupd(module)
name = module.params["name"]
state = module.params["state"]
update = module.params["update"]
verify = module.params["verify"]
if update:
swupd.update_os()
elif verify:
swupd.verify_os()
elif state == "present":
swupd.install_bundle(name)
elif state == "absent":
swupd.remove_bundle(name)
else:
swupd.failed = True
if swupd.failed:
module.fail_json(msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
else:
module.exit_json(changed=swupd.changed, msg=swupd.msg, stdout=swupd.stdout, stderr=swupd.stderr)
if __name__ == '__main__':
main()
| Tatsh-ansible/ansible | lib/ansible/modules/packaging/os/swupd.py | Python | gpl-3.0 | 9,590 |
# Copyright 2014 the Secure GAE Scaffold Project Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import webapp2
class RootHandler(webapp2.RequestHandler):
def get(self):
self.redirect('/static/index.html')
| google/crisis-info-hub | src/handlers.py | Python | apache-2.0 | 763 |
# -*- coding: utf-8 -*-
CRIME_DICT = {
"ASSAULT": ["ASSAULT"],
"THEFT": [
"BIKE THEFT", "BURGLARY", "CAR PROWL", "MAIL THEFT", "PICKPOCKET",
"PURSE SNATCH", "SHOPLIFTING", "THEFT OF SERVICES", "VEHICLE THEFT",
"BURGLARY-SECURE PARKING-RES"
],
"SIN": ["GAMBLE", "PORNOGRAPHY", "PROSTITUTION"],
"DRUGS": [
"DUI", "LIQUOR VIOLATION", "NARCOTICS", "STAY OUT OF AREA OF DRUGS"
],
"FRAUD": ["COUNTERFEIT", "EMBEZZLE", "FORGERY", "FRAUD"],
"HOMICIDE": ["HOMICIDE"],
"PROPERTY": [
"ILLEGAL DUMPING", "LOST PROPERTY", "OTHER PROPERTY",
"PROPERTY DAMAGE", "RECKLESS BURNING", "RECOVERED PROPERTY",
"STOLEN PROPERTY",
],
"ROBBERY": ["ROBBERY"],
"WEAPON": ["WEAPON"],
"DISTURBANCE": [
"DISORDERLY CONDUCT", "DISPUTE", "FIREWORK", "PUBLIC NUISANCE",
"THREATS", "TRESPASS", "DISTURBANCE", "THREATS"
],
"OTHER": [
"ANIMAL COMPLAINT", "BIAS INCIDENT", "ELUDING", "ESCAPE", "EXTORTION",
"HARBOR CALLS", "INJURY", "LOITERING", "METRO", "OBSTRUCT", "TRAFFIC",
"WARRANT ARREST", "VIOLATION OF COURT ORDER",
"[INC - CASE DC USE ONLY]", "FALSE REPORT",
],
}
| MurderSheWrote/MurderSheWroteSeattle | crimemapper/crimedict.py | Python | mit | 1,205 |
import unittest, time, sys
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_import as h2i, h2o_jobs, h2o_gbm
DO_CLASSIFICATION = True
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
h2o.init(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GBMGrid_basic_benign(self):
csvFilename = "benign.csv"
print "\nStarting", csvFilename
csvPathname = 'logreg/' + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put')
# columns start at 0
# cols 0-13. 3 is output
# no member id in this one
# check the first in the models list. It should be the best
colNames = [ 'STR','OBS','AGMT','FNDX','HIGD','DEG','CHK', 'AGP1','AGMN','NLV','LIV','WT','AGLP','MST' ]
modelKey = 'GBMGrid_benign'
# 'cols', 'ignored_cols_by_name', and 'ignored_cols' have to be exclusive
params = {
'destination_key': modelKey,
'ignored_cols_by_name': 'STR',
'learn_rate': '.1,.2,.25',
'ntrees': '3:5:1',
'max_depth': '5,7',
'min_rows': '1,2',
'response': 'FNDX',
'classification': 1 if DO_CLASSIFICATION else 0,
}
kwargs = params.copy()
timeoutSecs = 1800
start = time.time()
GBMResult = h2o_cmd.runGBM(parseResult=parseResult, **kwargs)
elapsed = time.time() - start
print "GBM training completed in", elapsed, "seconds."
h2o_gbm.showGBMGridResults(GBMResult, 0)
def test_GBMGrid_basic_prostate(self):
csvFilename = "prostate.csv"
print "\nStarting", csvFilename
# columns start at 0
csvPathname = 'logreg/' + csvFilename
parseResult = h2i.import_parse(bucket='smalldata', path=csvPathname, hex_key=csvFilename + ".hex", schema='put')
colNames = ['ID','CAPSULE','AGE','RACE','DPROS','DCAPS','PSA','VOL','GLEASON']
modelKey = 'GBMGrid_prostate'
# 'cols', 'ignored_cols_by_name', and 'ignored_cols' have to be exclusive
params = {
'destination_key': modelKey,
'ignored_cols_by_name': 'ID',
'learn_rate': '.1,.2',
'ntrees': '1:3:1',
'max_depth': '8,9',
'min_rows': '1:5:2',
'response': 'CAPSULE',
'classification': 1 if DO_CLASSIFICATION else 0,
}
kwargs = params.copy()
timeoutSecs = 1800
start = time.time()
GBMResult = h2o_cmd.runGBM(parseResult=parseResult, **kwargs)
elapsed = time.time() - start
print "GBM training completed in", elapsed, "seconds."
h2o_gbm.showGBMGridResults(GBMResult, 15)
if __name__ == '__main__':
h2o.unit_main()
| rowhit/h2o-2 | py/testdir_single_jvm/test_GBMGrid_basic_class.py | Python | apache-2.0 | 2,962 |
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP S.A. (<http://www.openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "eMail Gateway for CRM Claim",
"version" : "1.0",
"depends" : ["fetchmail", "crm_claim"],
"author" : "OpenERP SA",
'category': 'Hidden',
"description": """
""",
'website': 'http://www.openerp.com',
'init_xml': [],
'update_xml': [
"installer.xml",
],
'demo_xml': [
],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Johnzero/erp | openerp/addons/fetchmail_crm_claim/__openerp__.py | Python | agpl-3.0 | 1,461 |
#!/usr/bin/env python3
import os
import sys
import subprocess
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from lutris.util.wineregistry import WineRegistry
PREFIXES_PATH = os.path.expanduser("~/Games/wine/prefixes")
def get_registries():
registries = []
directories = os.listdir(PREFIXES_PATH)
directories.append(os.path.expanduser("~/.wine"))
for prefix in directories:
for path in os.listdir(os.path.join(PREFIXES_PATH, prefix)):
if path.endswith(".reg"):
registries.append(os.path.join(PREFIXES_PATH, prefix, path))
return registries
def check_registry(registry_path):
with open(registry_path, 'r') as registry_file:
original_content = registry_file.read()
try:
registry = WineRegistry(registry_path)
except:
sys.stderr.write("Error parsing {}\n".format(registry_path))
raise
content = registry.render()
if content != original_content:
wrong_path = os.path.join(os.path.dirname(__file__), 'error.reg')
with open(wrong_path, 'w') as wrong_reg:
wrong_reg.write(content)
print("Content of parsed registry doesn't match: {}".format(registry_path))
subprocess.call(["meld", registry_path, wrong_path])
sys.exit(2)
registries = get_registries()
for registry in registries:
check_registry(registry)
print("All {} registry files validated!".format(len(registries)))
| daniel-j/lutris | tests/check_prefixes.py | Python | gpl-3.0 | 1,469 |
#
# Copyright (c) 2015 Red Hat
# Licensed under The MIT License (MIT)
# http://opensource.org/licenses/MIT
#
from django.test import TestCase
from rest_framework.test import APITestCase
from rest_framework import status
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from pdc.apps.bindings import models as binding_models
from pdc.apps.common.test_utils import TestCaseWithChangeSetMixin
from pdc.apps.component import models as component_models
from pdc.apps.release import models as release_models
from . import models
class RPMSortKeyTestCase(TestCase):
def test_sort_key_precedence(self):
data = [((0, "10", "10"), (1, "1", "1")),
((0, "1", "10"), (0, "10", "1")),
((0, "1", "1"), (0, "1", "10"))]
for v1, v2 in data:
p1 = models.RPM(epoch=v1[0], version=v1[1], release=v1[2])
p2 = models.RPM(epoch=v2[0], version=v2[1], release=v2[2])
self.assertTrue(p1.sort_key < p2.sort_key)
def test_complex_version_sort(self):
data = [((0, "1.0.1", "10"), (1, "1.0.2", "1")),
((0, "1.11.1", "10"), (0, "1.100.1", "1")),
((0, "1", "1.0.1"), (0, "1", "1.1")),
((0, "1", "11"), (0, "1", "101"))]
for v1, v2 in data:
p1 = models.RPM(epoch=v1[0], version=v1[1], release=v1[2])
p2 = models.RPM(epoch=v2[0], version=v2[1], release=v2[2])
self.assertTrue(p1.sort_key < p2.sort_key, msg="%s < %s" % (v1, v2))
def test_handle_non_numbers(self):
data = [((0, "svn24104.0.92", "1"), (1, "svn24104.0.93", "1")),
((0, "3.2.5d", "1"), (0, "3.2.5e", "1")),
((0, "3.2.5d", "1"), (0, "3.2.6a", "1")),
((0, "2.1a15", "1"), (0, "2.1a20", "1")),
((0, "2.1a15", "1"), (0, "2.2", "1")),
((0, "2.1a15", "1"), (0, "2.1", "1"))]
for v1, v2 in data:
p1 = models.RPM(epoch=v1[0], version=v1[1], release=v1[2])
p2 = models.RPM(epoch=v2[0], version=v2[1], release=v2[2])
self.assertTrue(p1.sort_key < p2.sort_key, msg="%s < %s" % (v1, v2))
class RPMSaveValidationTestCase(TestCase):
def test_empty_srpm_nevra_with_arch_is_src(self):
rpm = models.RPM.objects.create(name='kernel', epoch=0, version='3.19.3', release='100',
arch='src', srpm_name='kernel', filename='kernel-3.19.3-100.src.rpm')
self.assertIsNotNone(rpm)
self.assertEqual(1, models.RPM.objects.count())
def test_non_empty_srpm_nevra_with_arch_is_not_src(self):
rpm = models.RPM.objects.create(name='kernel', epoch=0, version='3.19.3', release='100',
arch='x86_64', srpm_name='kernel', filename='kernel-3.19.3-100.x86_64.rpm',
srpm_nevra='kernel-0:3.19.3-100.x86_64')
self.assertIsNotNone(rpm)
self.assertEqual(1, models.RPM.objects.count())
def test_non_empty_srpm_nevra_with_arch_is_src(self):
with self.assertRaises(ValidationError):
models.RPM.objects.create(name='kernel', epoch=0, version='3.19.3', release='100',
arch='src', srpm_name='kernel', filename='kernel-3.19.3-100.src.rpm',
srpm_nevra='kernel-0:3.19.3-100.src')
self.assertEqual(0, models.RPM.objects.count())
def test_empty_srpm_nevra_with_arch_is_not_src(self):
with self.assertRaises(ValidationError):
models.RPM.objects.create(name='kernel', epoch=0, version='3.19.3', release='100',
arch='x86_64', srpm_name='kernel', filename='kernel-3.19.3-100.x86_64.rpm')
self.assertEqual(0, models.RPM.objects.count())
class RPMDepsFilterAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
"""
15 packages are created. They all have name test-X, where X is a
number. Each packages has a dependency of each type with the same
constraint. They are summarized in the table below.
0 (=1.0) 1 (<1.0) 2 (>1.0) 3 (<=1.0) 4 (>=1.0)
5 (=2.0) 6 (<2.0) 7 (>2.0) 8 (<=2.0) 9 (>=2.0)
10 (=3.0) 11 (<3.0) 12 (>3.0) 13 (<=3.0) 14 (>=3.0)
"""
counter = 0
for version in ['1.0', '2.0', '3.0']:
for op in '= < > <= >='.split():
name = 'test-{counter}'.format(counter=counter)
counter += 1
rpm = models.RPM.objects.create(name=name, epoch=0, version='1.0',
release='1', arch='x86_64', srpm_name='test-pkg',
srpm_nevra='test-pkg-1.0.1.x86_64',
filename='dummy')
for type in [t[0] for t in models.Dependency.DEPENDENCY_TYPE_CHOICES]:
rpm.dependency_set.create(name='pkg', version=version,
type=type, comparison=op)
#
# No contraint tests
#
def test_filter_without_version_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
def test_filter_without_version_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 15)
#
# Equality contraint tests
#
def test_filter_with_version_equality_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
def test_filter_with_version_equality_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
def test_filter_with_version_equality_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
def test_filter_with_version_equality_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
def test_filter_with_version_equality_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
def test_filter_with_version_equality_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 8, 9, 11, 13]])
#
# Greater than constraint tests
#
def test_filter_with_greater_version_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 7, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_version_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg>2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 7, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_version_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg>2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 7, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_version_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg>2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 7, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_version_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg>2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 7, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_version_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg>2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 7, 9, 10, 11, 12, 13, 14]])
#
# Lesser than constraint tests
#
def test_filter_with_lesser_version_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 6, 8, 11, 13]])
def test_filter_with_lesser_version_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg<2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 6, 8, 11, 13]])
def test_filter_with_lesser_version_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg<2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 6, 8, 11, 13]])
def test_filter_with_lesser_version_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg<2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 6, 8, 11, 13]])
def test_filter_with_lesser_version_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg<2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 6, 8, 11, 13]])
def test_filter_with_lesser_version_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg<2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 6, 8, 11, 13]])
#
# Greater than or equal constraint tests
#
def test_filter_with_greater_or_equal_version_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_or_equal_version_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg>=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_or_equal_version_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg>=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_or_equal_version_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg>=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_or_equal_version_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg>=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14]])
def test_filter_with_greater_or_equal_version_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg>=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [2, 4, 5, 7, 8, 9, 10, 11, 12, 13, 14]])
#
# Lesser than or equal constraint tests
#
def test_filter_with_lesser_or_equal_version_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 13]])
def test_filter_with_lesser_or_equal_version_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg<=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 13]])
def test_filter_with_lesser_or_equal_version_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg<=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 13]])
def test_filter_with_lesser_or_equal_version_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg<=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 13]])
def test_filter_with_lesser_or_equal_version_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg<=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 13]])
def test_filter_with_lesser_or_equal_version_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg<=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertItemsEqual([pkg['name'] for pkg in response.data['results']],
['test-{}'.format(i) for i in [0, 1, 2, 3, 4, 5, 6, 8, 9, 11, 13]])
class RPMDepsFilterWithReleaseTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
cls.rpm = models.RPM.objects.create(name='test-pkg', epoch=0, version='1.0',
release='1', arch='x86_64', srpm_name='test-pkg',
srpm_nevra='test-pkg-1.0.1.x86_64',
filename='dummy')
cls.rpm.dependency_set.create(name='pkg', version='3.0-1.fc22',
type=models.Dependency.REQUIRES, comparison='=')
def test_filter_with_same_release_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=3.0-1.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_same_release_lesser(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<3.0-1.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
def test_filter_with_same_release_greater(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>3.0-1.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
def test_filter_with_same_release_lesser_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<=3.0-1.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_same_release_greater_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>=3.0-1.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_different_release_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=3.0-2.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
def test_filter_with_different_release_lesser(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<3.0-2.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_different_release_greater(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>3.0-2.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
def test_filter_with_different_release_lesser_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<=3.0-2.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_different_release_greater_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>=3.0-2.fc22'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
class RPMDepsFilterWithEpochTestCase(APITestCase):
@classmethod
def setUpTestData(cls):
cls.rpm = models.RPM.objects.create(name='test-pkg', epoch=0, version='1.0',
release='1', arch='x86_64', srpm_name='test-pkg',
srpm_nevra='test-pkg-1.0.1.x86_64',
filename='dummy')
cls.rpm.dependency_set.create(name='pkg', version='3.0',
type=models.Dependency.REQUIRES, comparison='=')
def test_filter_with_same_epoch_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=0:3.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_same_epoch_lesser(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<0:4.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_same_epoch_greater(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>0:2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_same_epoch_lesser_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<=0:3.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_same_epoch_greater_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>=0:3.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_different_epoch_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=1:3.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
def test_filter_with_different_epoch_lesser(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<1:3.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_different_epoch_greater(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>1:2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
def test_filter_with_different_epoch_lesser_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg<=1:3.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_filter_with_different_epoch_greater_equal(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg>=1:3.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
class RPMDepsFilterRangeAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
rpm = models.RPM.objects.create(name='test-pkg', epoch=0, version='1.0',
release='1', arch='x86_64', srpm_name='test-pkg',
srpm_nevra='test-pkg-1.0.1.x86_64',
filename='dummy')
for type in [t[0] for t in models.Dependency.DEPENDENCY_TYPE_CHOICES]:
rpm.dependency_set.create(name='pkg', version='1.0',
type=type, comparison='>=')
rpm.dependency_set.create(name='pkg', version='3.0',
type=type, comparison='<')
def test_filter_with_range_match_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_with_range_no_match_requires(self):
response = self.client.get(reverse('rpms-list'), {'requires': 'pkg=4.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_filter_with_range_match_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_with_range_no_match_obsoletes(self):
response = self.client.get(reverse('rpms-list'), {'obsoletes': 'pkg=4.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_filter_with_range_match_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_with_range_no_match_provides(self):
response = self.client.get(reverse('rpms-list'), {'provides': 'pkg=4.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_filter_with_range_match_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_with_range_no_match_suggests(self):
response = self.client.get(reverse('rpms-list'), {'suggests': 'pkg=4.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_filter_with_range_match_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_with_range_no_match_recommends(self):
response = self.client.get(reverse('rpms-list'), {'recommends': 'pkg=4.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
def test_filter_with_range_match_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg=2.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 1)
def test_filter_with_range_no_match_conflicts(self):
response = self.client.get(reverse('rpms-list'), {'conflicts': 'pkg=4.0'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['count'], 0)
class RPMDepsAPITestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
'pdc/apps/common/fixtures/test/sigkey.json',
'pdc/apps/release/fixtures/tests/release.json',
'pdc/apps/package/fixtures/test/rpm.json',
'pdc/apps/compose/fixtures/tests/compose.json',
'pdc/apps/compose/fixtures/tests/compose_composerpm.json',
'pdc/apps/compose/fixtures/tests/variant_arch.json',
'pdc/apps/compose/fixtures/tests/variant.json'
]
def setUp(self):
self.maxDiff = None
def _create_deps(self):
models.Dependency.objects.create(type=models.Dependency.SUGGESTS,
name='suggested', rpm_id=1)
models.Dependency.objects.create(type=models.Dependency.CONFLICTS,
name='conflicting', rpm_id=1)
def test_create_rpm_with_deps(self):
data = {'name': 'fake_bash', 'version': '1.2.3', 'epoch': 0,
'release': '4.b1', 'arch': 'x86_64', 'srpm_name': 'bash',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm',
'linked_releases': [], 'srpm_nevra': 'fake_bash-0:1.2.3-4.b1.src',
'dependencies': {'requires': ['required-package'],
'obsoletes': ['obsolete-package'],
'suggests': ['suggested-package >= 1.0.0'],
'recommends': ['recommended = 0.1.0'],
'provides': ['/bin/bash', '/usr/bin/whatever'],
'conflicts': ['nothing']}}
response = self.client.post(reverse('rpms-list'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
response.data.pop('id')
data.update({'linked_composes': []})
self.assertDictEqual(dict(response.data), data)
self.assertEqual(7, models.Dependency.objects.count())
with_version = models.Dependency.objects.get(name='recommended')
self.assertEqual(with_version.comparison, '=')
self.assertEqual(with_version.version, '0.1.0')
self.assertNumChanges([1])
def test_create_rpm_with_duplicate_deps(self):
data = {'name': 'fake_bash', 'version': '1.2.3', 'epoch': 0,
'release': '4.b1', 'arch': 'x86_64', 'srpm_name': 'bash',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm',
'linked_releases': [], 'srpm_nevra': 'fake_bash-0:1.2.3-4.b1.src',
'dependencies': {'requires': ['required-package', 'required-package'],
'obsoletes': ['obsolete-package'],
'suggests': ['suggested-package >= 1.0.0', 'suggested-package >= 1.0.0'],
'recommends': ['recommended = 0.1.0'],
'provides': ['/bin/bash', '/usr/bin/whatever'],
'conflicts': ['nothing']}}
response = self.client.post(reverse('rpms-list'), data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([1])
def test_put_to_rpm_with_none(self):
data = {
'name': 'bash',
'epoch': 0,
'version': '1.2.3',
'release': '4.b1',
'arch': 'x86_64',
'srpm_name': 'bash',
'srpm_nevra': 'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm',
'dependencies': {
'requires': ['required-package']
}
}
response = self.client.put(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(1, models.Dependency.objects.count())
dep = models.Dependency.objects.first()
self.assertIsNone(dep.comparison)
self.assertIsNone(dep.version)
self.assertEqual(dep.rpm.pk, 1)
self.assertNumChanges([1])
def test_put_to_overwrite_existing(self):
models.Dependency.objects.create(type=models.Dependency.SUGGESTS,
name='suggested', rpm_id=1)
models.Dependency.objects.create(type=models.Dependency.CONFLICTS,
name='conflicting', rpm_id=1)
data = {'name': 'bash',
'epoch': 0,
'version': '1.2.3',
'release': '4.b1',
'arch': 'x86_64',
'srpm_name': 'bash',
'srpm_nevra': 'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm',
'dependencies': {'requires': ['required-package']}}
response = self.client.put(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(1, models.Dependency.objects.count())
dep = models.Dependency.objects.first()
self.assertIsNone(dep.comparison)
self.assertIsNone(dep.version)
self.assertEqual(dep.rpm.pk, 1)
self.assertEqual(dep.name, 'required-package')
self.assertEqual(dep.type, models.Dependency.REQUIRES)
self.assertNumChanges([1])
def test_patch_to_rpm_with_none(self):
data = {'dependencies': {'requires': ['required-package']}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(1, models.Dependency.objects.count())
dep = models.Dependency.objects.first()
self.assertIsNone(dep.comparison)
self.assertIsNone(dep.version)
self.assertEqual(dep.rpm.pk, 1)
self.assertEqual(dep.name, 'required-package')
self.assertEqual(dep.type, models.Dependency.REQUIRES)
self.assertNumChanges([1])
def test_patch_to_overwrite_existing(self):
self._create_deps()
data = {'dependencies': {'requires': ['required-package']}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(1, models.Dependency.objects.count())
dep = models.Dependency.objects.first()
self.assertIsNone(dep.comparison)
self.assertIsNone(dep.version)
self.assertEqual(dep.rpm.pk, 1)
self.assertEqual(dep.name, 'required-package')
self.assertEqual(dep.type, models.Dependency.REQUIRES)
self.assertNumChanges([1])
def test_put_to_remove(self):
self._create_deps()
data = {'name': 'bash',
'epoch': 0,
'version': '1.2.3',
'release': '4.b1',
'arch': 'x86_64',
'srpm_name': 'bash',
'srpm_nevra': 'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm',
'dependencies': {}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
self.assertEqual(0, models.Dependency.objects.count())
def test_patch_to_remove(self):
self._create_deps()
data = {'dependencies': {}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
self.assertEqual(0, models.Dependency.objects.count())
def test_bad_dependency_format(self):
data = {'dependencies': {'recommends': ['foo bar']}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_bad_dependency_type(self):
data = {'dependencies': {'wants': ['icecream']}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_deps_are_not_list(self):
data = {'dependencies': {'suggests': 'pony'}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_deps_with_too_many_lists(self):
data = {'dependencies': {'suggests': [['pony']]}}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_patch_without_deps_does_not_delete_existing(self):
self._create_deps()
data = {'name': 'new_name'}
response = self.client.patch(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
self.assertEqual(2, models.Dependency.objects.count())
def test_put_without_deps_deletes_existing(self):
self._create_deps()
data = {'name': 'new-name',
'epoch': 0,
'version': '1.2.3',
'release': '4.b1',
'arch': 'x86_64',
'srpm_name': 'bash',
'srpm_nevra': 'bash-0:1.2.3-4.b1.src',
'filename': 'bash-1.2.3-4.b1.x86_64.rpm'}
response = self.client.put(reverse('rpms-detail', args=[1]), data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([1])
self.assertEqual(0, models.Dependency.objects.count())
def test_has_no_deps_filter(self):
self._create_deps()
response = self.client.get(reverse('rpms-list'), {'has_no_deps': 'true'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
response = self.client.get(reverse('rpms-list'), {'has_no_deps': 'false'}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
class RPMAPIRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
'pdc/apps/common/fixtures/test/sigkey.json',
'pdc/apps/release/fixtures/tests/release.json',
'pdc/apps/package/fixtures/test/rpm.json',
'pdc/apps/compose/fixtures/tests/compose.json',
'pdc/apps/compose/fixtures/tests/compose_composerpm.json',
'pdc/apps/compose/fixtures/tests/variant_arch.json',
'pdc/apps/compose/fixtures/tests/variant.json'
]
def setUp(self):
self.empty_deps = {'conflicts': [], 'obsoletes': [], 'provides': [],
'recommends': [], 'requires': [], 'suggests': []}
def test_query_all_rpms(self):
url = reverse('rpms-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 3)
def test_query_with_params(self):
url = reverse('rpms-list')
response = self.client.get(url + '?name=bash', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(url + '?epoch=0', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 3)
response = self.client.get(url + '?version=1.2.3', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 3)
response = self.client.get(url + '?release=4.b1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(url + '?arch=x86_64', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
response = self.client.get(url + '?srpm_name=bash', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 3)
response = self.client.get(url + '?srpm_nevra=bash-0:1.2.3-4.b1.src', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(url + '?srpm_nevra=null', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(url + '?compose=compose-1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
results = response.data.get('results', [])
ids = []
for result in results:
ids.append(result['id'])
self.assertTrue(1 in ids)
def test_query_with_multi_value_against_same_key(self):
url = reverse('rpms-list')
response = self.client.get(url + '?name=bash&name=bash-doc', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
response = self.client.get(url + '?srpm_nevra=bash-0:1.2.3-4.b1.src&srpm_nevra=bash-0:1.2.3-4.b2.src',
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_with_different_key(self):
url = reverse('rpms-list')
response = self.client.get(url + '?name=bash&version=1.2.3', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_with_wrong_params(self):
url = reverse('rpms-list')
response = self.client.get(url + 'wrong_param/', format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_query_with_bad_epoch(self):
url = reverse('rpms-list')
response = self.client.get(url, {'epoch': 'foo'}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('epoch', response.data['detail'][0])
def test_query_with_only_key(self):
url = reverse('rpms-list')
response = self.client.get(url + '?name', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(url + '?name=', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(url + '?epoch', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(url + '?epoch=', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
def test_retrieve_rpm(self):
url = reverse('rpms-detail', args=[1])
response = self.client.get(url, format='json')
expect_data = {"id": 1, "name": "bash", "version": "1.2.3", "epoch": 0, "release": "4.b1",
"arch": "x86_64",
"srpm_name": "bash", "srpm_nevra": "bash-0:1.2.3-4.b1.src",
"filename": "bash-1.2.3-4.b1.x86_64.rpm", "linked_releases": [],
"linked_composes": ["compose-1"], "dependencies": self.empty_deps}
self.assertEqual(response.data, expect_data)
def test_retrieve_rpm_should_not_have_duplicated_composes(self):
url = reverse('rpms-detail', args=[2])
response = self.client.get(url, format='json')
self.assertEqual(response.data.get("linked_composes"), ['compose-1'])
def test_create_rpm(self):
url = reverse('rpms-list')
data = {"name": "fake_bash", "version": "1.2.3", "epoch": 0, "release": "4.b1", "arch": "x86_64",
"srpm_name": "bash", "filename": "bash-1.2.3-4.b1.x86_64.rpm", "linked_releases": ['release-1.0'],
"srpm_nevra": "fake_bash-0:1.2.3-4.b1.src"}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
expected_response_data = {"id": 4, 'linked_composes': [],
"name": "fake_bash", "version": "1.2.3", "epoch": 0, "release": "4.b1",
"arch": "x86_64", "srpm_name": "bash", "filename": "bash-1.2.3-4.b1.x86_64.rpm",
"linked_releases": ['release-1.0'], "srpm_nevra": "fake_bash-0:1.2.3-4.b1.src",
"dependencies": self.empty_deps}
self.assertEqual(response.data, expected_response_data)
self.assertNumChanges([1])
def test_create_rpm_with_wrong_release(self):
url = reverse('rpms-list')
data = {"name": "fake_bash", "version": "1.2.3", "epoch": 0, "release": "4.b1", "arch": "x86_64",
"srpm_name": "bash", "filename": "bash-1.2.3-4.b1.x86_64.rpm", "linked_releases": ['release-1.0-wrong'],
"srpm_nevra": "fake_bash-0:1.2.3-4.b1.src"}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_partial_update_rpm_with_assign_release(self):
url = reverse('rpms-detail', args=[1])
data = {"linked_releases": ['release-1.0']}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('linked_releases'), ['release-1.0'])
self.assertNumChanges([1])
def test_partial_update_does_not_break_filename(self):
url = reverse('rpms-detail', args=[1])
data = {'linked_releases': ['release-1.0']}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.data.get('filename'), 'bash-1.2.3-4.b1.x86_64.rpm')
def test_full_update_uses_default_filename(self):
url = reverse('rpms-detail', args=[1])
data = {'name': 'fake_bash', 'version': '1.2.3', 'epoch': 0, 'release': '4.b1', 'arch': 'x86_64',
'srpm_name': 'bash', 'linked_releases': ['release-1.0'],
'srpm_nevra': 'fake_bash-0:1.2.3-4.b1.src'}
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('filename'), 'fake_bash-1.2.3-4.b1.x86_64.rpm')
self.assertNumChanges([1])
def test_full_update_with_missing_fields_does_not_crash_on_default_filename(self):
url = reverse('rpms-detail', args=[1])
data = {'epoch': 0,
'srpm_name': 'bash', 'linked_releases': ['release-1.0'],
'srpm_nevra': 'fake_bash-0:1.2.3-4.b1.src'}
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_partial_update_rpm_with_assign_wrong_release(self):
url = reverse('rpms-detail', args=[1])
data = {"linked_releases": ['release-1.0-fake']}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_update_rpm(self):
data = {"name": "fake_bash", "version": "1.2.3", "epoch": 0, "release": "4.b1", "arch": "x86_64",
"srpm_name": "bash", "filename": "bash-1.2.3-4.b1.x86_64.rpm", "linked_releases": ['release-1.0'],
"srpm_nevra": "fake_bash-0:1.2.3-4.b1.src"}
url = reverse('rpms-detail', args=[1])
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
data.update({'id': 1, 'linked_composes': [u'compose-1'], 'dependencies': self.empty_deps})
self.assertDictEqual(dict(response.data), data)
self.assertNumChanges([1])
def test_update_rpm_with_linked_compose_should_read_only(self):
url = reverse('rpms-detail', args=[3])
data = {'linked_composes': [u'compose-1']}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNumChanges([])
def test_bulk_update_patch(self):
self.client.patch(reverse('rpms-list'),
{1: {"linked_releases": ['release-1.0']}}, format='json')
url = reverse('rpms-detail', args=[1])
response = self.client.get(url, format='json')
self.assertEqual(response.data.get("linked_releases"), ['release-1.0'])
self.assertNumChanges([1])
def test_delete_rpm_should_not_be_allowed(self):
url = reverse('rpms-detail', args=[1])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_bulk_delete_rpms_should_not_be_allowed(self):
url = reverse('rpms-list')
response = self.client.delete(url, [1, 2], format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class ImageRESTTestCase(APITestCase):
fixtures = [
'pdc/apps/release/fixtures/tests/release.json',
'pdc/apps/compose/fixtures/tests/compose.json',
'pdc/apps/compose/fixtures/tests/variant_arch.json',
'pdc/apps/compose/fixtures/tests/variant.json',
'pdc/apps/package/fixtures/test/image.json',
'pdc/apps/compose/fixtures/tests/compose_composeimage.json',
]
def test_list_all(self):
response = self.client.get(reverse('image-list'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 3)
def test_query_file_name(self):
response = self.client.get(reverse('image-list'), {'file_name': 'image-1'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'file_name': ['image-1', 'image-2']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_image_format(self):
response = self.client.get(reverse('image-list'), {'image_format': 'iso'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'image_format': ['iso', 'qcow']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_image_type(self):
response = self.client.get(reverse('image-list'), {'image_type': 'dvd'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'image_type': ['dvd', 'boot']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_disc_number(self):
response = self.client.get(reverse('image-list'), {'disc_number': 1})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'), {'disc_number': [1, 2]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_disc_count(self):
response = self.client.get(reverse('image-list'), {'disc_count': 1})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'), {'disc_count': [1, 2]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_arch(self):
response = self.client.get(reverse('image-list'), {'arch': 'src'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'), {'arch': ['src', 'x86_64']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_mtime(self):
response = self.client.get(reverse('image-list'), {'mtime': 111111111})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'), {'mtime': [111111111, 222222222]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_size(self):
response = self.client.get(reverse('image-list'), {'size': 444444444})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'), {'size': [444444444, 555555555]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_bootable(self):
response = self.client.get(reverse('image-list'), {'bootable': True})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_negative_bootable(self):
response = self.client.get(reverse('image-list'), {'bootable': 'false'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_active_bootable(self):
response = self.client.get(reverse('image-list'), {'bootable': 'true'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_implant_md5(self):
response = self.client.get(reverse('image-list'), {'implant_md5': 'a' * 32})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'implant_md5': ['a' * 32, 'b' * 32]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_volume_id(self):
response = self.client.get(reverse('image-list'), {'volume_id': 'image-1-volume_id'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'volume_id': ['image-1-volume_id', 'image-2-volume_id']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_md5(self):
response = self.client.get(reverse('image-list'), {'md5': '1' * 32})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'md5': ['1' * 32, '2' * 32]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_sha1(self):
response = self.client.get(reverse('image-list'), {'sha1': '1' * 40})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'sha1': ['1' * 40, '2' * 40]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_sha256(self):
response = self.client.get(reverse('image-list'), {'sha256': '1' * 64})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
response = self.client.get(reverse('image-list'),
{'sha256': ['1' * 64, '2' * 64]})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_compose(self):
response = self.client.get(reverse('image-list'), {'compose': 'foo'})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 0)
response = self.client.get(reverse('image-list'),
{'compose': ['compose-1', 'foo']})
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 3)
def test_query_disc_number_with_wrong_value(self):
key = 'disc_number'
value = 'wrongvalue'
response = self.client.get(reverse('image-list'), {key: value})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"detail": [u'Value [%s] of %s is not an integer' % (value, key)]})
def test_query_disc_count_with_wrong_value(self):
key = 'disc_count'
value = 'wrongvalue'
response = self.client.get(reverse('image-list'), {key: value})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"detail": [u'Value [%s] of %s is not an integer' % (value, key)]})
def test_query_mtime_with_wrong_value(self):
key = 'mtime'
value = 'wrongvalue'
response = self.client.get(reverse('image-list'), {key: value})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"detail": [u'Value [%s] of %s is not an integer' % (value, key)]})
def test_query_size_with_wrong_value(self):
key = 'size'
value = 'wrongvalue'
response = self.client.get(reverse('image-list'), {key: value})
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data, {"detail": [u'Value [%s] of %s is not an integer' % (value, key)]})
class BuildImageRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
'pdc/apps/package/fixtures/test/rpm.json',
'pdc/apps/package/fixtures/test/archive.json',
'pdc/apps/package/fixtures/test/release.json',
'pdc/apps/package/fixtures/test/build_image.json',
]
def test_create_with_new_rpms(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{'name': 'new_rpm', 'epoch': 0, 'version': '1.0.0',
'release': '1', 'arch': 'src', 'srpm_name': 'new_srpm'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_with_new_incorrect_rpms_1(self):
url = reverse('buildimage-list')
# rpm's arch is not src but srpm_nevra is empty
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{'name': 'new_rpm', 'epoch': 0, 'version': '1.0.0',
'release': '1', 'arch': 'x86-64', 'srpm_name': 'new_srpm'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('rpms'), ["RPM's srpm_nevra should be empty if and only if arch is src"])
def test_create_with_new_incorrect_rpms_2(self):
url = reverse('buildimage-list')
# rpm's arch is src but srpm_nevra is not empty
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{'name': 'new_rpm', 'epoch': 0, 'version': '1.0.0',
'release': '1', 'arch': 'src', 'srpm_name': 'new_srpm', 'srpm_nevra': 'fake_srpm_nevra'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('rpms'), ["RPM's srpm_nevra should be empty if and only if arch is src"])
def test_create_with_exist_rpms(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{
"name": "bash-doc",
"epoch": 0,
"version": "1.2.3",
"release": "4.b2",
"arch": "x86_64",
"srpm_name": "bash",
"srpm_nevra": "bash-0:1.2.3-4.b2.src"}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([1])
self.assertIn('bash-doc', response.content)
def test_create_with_exist_rpm_nevra(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{
"name": "bash-doc",
"epoch": 0,
"version": "1.2.3",
"release": "4.b2",
"arch": "x86_64",
"srpm_name": "bash",
"srpm_nevra": "new_bash-0:1.2.3-4.b2.src"}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([1])
self.assertIn('bash-doc', response.content)
def test_create_with_new_archives(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [],
'archives': [{'build_nvr': 'new_build', 'name': 'new_name',
'size': 123, 'md5': '1111222233334444aaaabbbbccccdddd'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([2])
def test_create_with_exist_release_id(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'releases': ["release-1.0", "release-2.0"]}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_with_non_exist_release_id(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'releases': ["release-1.0-fake-name"]}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_with_exist_archives(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{
"name": "bash-doc",
"epoch": 0,
"version": "1.2.3",
"release": "4.b2",
"arch": "x86_64",
"srpm_name": "bash",
"srpm_nevra": "bash-0:1.2.3-4.b2.src"}],
'archives': [{'build_nvr': 'my-server-docker-1.0-27', 'name': 'tdl-x86_64.xml',
'size': 641, 'md5': '22222222222222222222222222222222'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNumChanges([1])
self.assertIn('bash-doc', response.content)
def test_create_with_wrong_field(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [],
'archives': [{'build_name': 'new_build', 'name': 'new_name',
'size': 123, 'md5': '1111222233334444aaaabbbbccccdddd'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('archives', response.content)
self.assertIn('build_nvr', response.content)
def test_create_with_exist_rpms_missing_fields(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{'name': 'bash-doc'}],
'archives': []}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('rpms', response.content)
self.assertIn('epoch', response.content)
self.assertIn('version', response.content)
self.assertIn('release', response.content)
self.assertIn('arch', response.content)
self.assertIn('srpm_name', response.content)
def test_create_with_new_rpms_missing_fields(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{'name': 'new_rpm'}],
'archives': []}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('rpms', response.content)
self.assertIn('epoch', response.content)
self.assertIn('version', response.content)
self.assertIn('release', response.content)
self.assertIn('arch', response.content)
self.assertIn('srpm_name', response.content)
def test_create_with_exist_archives_missing_fields(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [],
'archives': [{'build_nvr': 'my-server-docker-1.0-27'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('archives', response.content)
self.assertIn('name', response.content)
self.assertIn('size', response.content)
self.assertIn('md5', response.content)
def test_create_with_new_archives_missing_fields(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [],
'archives': [{'build_nvr': 'new_build'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIn('archives', response.content)
self.assertIn('name', response.content)
self.assertIn('size', response.content)
self.assertIn('md5', response.content)
def test_get(self):
url = reverse('buildimage-detail', args=[1])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_list(self):
url = reverse('buildimage-list')
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_with_component_name(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?component_name=bash', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_component_name_with_srpm_name_mapping(self):
rpm = models.RPM.objects.create(
name='kernel', epoch=0, version='3.19.3', release='100',
arch='src', srpm_name='kernel', filename='kernel-3.19.3-100.src.rpm')
build_image = models.BuildImage.objects.first()
build_image.rpms.add(rpm)
global_component = component_models.GlobalComponent.objects.create(name='bash')
release = release_models.Release.objects.create(
release_type=release_models.ReleaseType.objects.get(short='ga'),
short='release',
version='1.1',
name='Awesome Release')
release_component = component_models.ReleaseComponent.objects.create(
global_component=global_component,
release=release,
name='bash')
binding_models.ReleaseComponentSRPMNameMapping.objects.create(
srpm_name='kernel',
release_component=release_component)
url = reverse('buildimage-list')
response = self.client.get(url + '?component_name=bash', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertIn('kernel', response.content)
def test_query_component_name_without_srpm_name_mapping(self):
rpm = models.RPM.objects.create(
name='kernel', epoch=0, version='3.19.3', release='100',
arch='src', srpm_name='kernel', filename='kernel-3.19.3-100.src.rpm')
build_image = models.BuildImage.objects.first()
build_image.rpms.add(rpm)
global_component = component_models.GlobalComponent.objects.create(name='kernel')
release = release_models.Release.objects.create(
release_type=release_models.ReleaseType.objects.get(short='ga'),
short='release',
version='7.1',
name='Awesome Release')
component_models.ReleaseComponent.objects.create(
global_component=global_component,
release=release,
name='kernel')
url = reverse('buildimage-list')
response = self.client.get(url + '?component_name=kernel', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertIn('kernel', response.content)
def test_query_with_rpm_version(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?rpm_version=1.2.3', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_with_rpm_release(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?rpm_release=4.b1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_with_image_id(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?image_id=my-server-docker-1.0-27', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
self.assertEqual(response.data.get('results')[0].get('image_id'), 'my-server-docker-1.0-27')
def test_query_with_archive_build_nvr(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?archive_build_nvr=my-server-docker-1.0-27', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_with_image_format(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?image_format=docker', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_with_md5(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?md5=0123456789abcdef0123456789abcdef',
format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_with_archive_name(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?archive_name=archive_1', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_with_archive_size(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?archive_size=666', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_query_with_archive_md5(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?archive_md5=22222222222222222222222222222222', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
def test_query_with_release_id(self):
url = reverse('buildimage-list')
response = self.client.get(url + '?release_id=release-1.0', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 2)
response = self.client.get(url + '?release_id=release-2.0', format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('count'), 1)
def test_update_image_with_release_id(self):
url = reverse('buildimage-detail', args=[1])
data = {"releases": ["release-1.0", "release-2.0"]}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('releases'), ["release-1.0", "release-2.0"])
self.assertNumChanges([1])
def test_patch_update(self):
url = reverse('buildimage-detail', args=[1])
data = {'image_id': 'new_build'}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data.get('image_id'), 'new_build')
def test_partial_update_empty(self):
response = self.client.patch(reverse('buildimage-detail', args=[1]), {}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_patch_update_failed(self):
url = reverse('buildimage-detail', args=[1])
data = {'image_format': 'new_format'}
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertEqual(response.data.get('image_format'), ["Object with name=new_format does not exist."])
def test_put_update(self):
url = reverse('buildimage-detail', args=[1])
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{
"name": "new_rpm",
"epoch": 0,
"version": "0.1.0",
"release": "1",
"arch": "x86_64",
"srpm_name": "new_srpm",
"srpm_nevra": "new_srpm_nevra"}],
'archives': [{'build_nvr': 'new_build', 'name': 'new_name',
'size': 123, 'md5': '1111222233334444aaaabbbbccccdddd'}]
}
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNumChanges([3])
self.assertIn('new_rpm', response.content)
def test_delete(self):
url = reverse('buildimage-detail', args=[1])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertNumChanges([1])
def test_create_same_image_id_with_different_format(self):
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{'name': 'new_rpm', 'epoch': 0, 'version': '1.0.0',
'release': '1', 'arch': 'src', 'srpm_name': 'new_srpm'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'iso',
'md5': "0123456789abcdef0123456789abcabc",
'rpms': [{'name': 'new_rpm', 'epoch': 0, 'version': '1.0.0',
'release': '1', 'arch': 'src', 'srpm_name': 'new_srpm'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
class BuildImageRTTTestsRESTTestCase(TestCaseWithChangeSetMixin, APITestCase):
fixtures = [
'pdc/apps/package/fixtures/test/rpm.json',
'pdc/apps/package/fixtures/test/archive.json',
'pdc/apps/package/fixtures/test/release.json',
'pdc/apps/package/fixtures/test/build_image.json',
]
def test_build_image_default_test_result_should_be_untested(self):
url = reverse('buildimage-list')
response = self.client.get(url, format='json')
total_count = response.data['count']
url = reverse('buildimagertttests-list')
response = self.client.get(url + '?test_result=untested', format='json')
untested_count = response.data['count']
self.assertEqual(total_count, untested_count)
url = reverse('buildimage-list')
data = {'image_id': 'new_build',
'image_format': 'docker',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{'name': 'new_rpm', 'epoch': 0, 'version': '1.0.0',
'release': '1', 'arch': 'src', 'srpm_name': 'new_srpm'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
total_count += 1
url = reverse('buildimagertttests-list')
response = self.client.get(url + '?test_result=untested', format='json')
untested_count = response.data['count']
self.assertEqual(total_count, untested_count)
def test_build_image_test_result_should_not_be_created(self):
url = reverse('buildimagertttests-list')
data = {'build_nvr': 'fake_nvr', 'format': 'iso', 'test_result': 'untested'}
response = self.client.post(url, data, format='json')
self.assertEqual(response.data, {u'detail': u'Method "POST" not allowed.'})
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_build_image_test_result_should_not_be_deleted(self):
url = reverse('buildimagertttests-detail', args=[1])
response = self.client.delete(url, format='json')
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_filter_build_image_test_results_with_test_result(self):
url = reverse('buildimagertttests-list')
response = self.client.get(url + '?test_result=untested', format='json')
untested_count = response.data['count']
self.assertGreater(untested_count, 0)
url = reverse('buildimagertttests-list')
response = self.client.get(url + '?test_result=passed', format='json')
untested_count = response.data['count']
self.assertEqual(0, untested_count)
url = reverse('buildimagertttests-list')
response = self.client.get(url + '?test_result=failed', format='json')
untested_count = response.data['count']
self.assertEqual(0, untested_count)
def test_filter_build_image_test_results_with_format(self):
url = reverse('buildimagertttests-list')
response = self.client.get(url + '?image_format=docker', format='json')
untested_count = response.data['count']
self.assertEqual(untested_count, 2)
response = self.client.get(url + '?image_format=iso', format='json')
untested_count = response.data['count']
self.assertEqual(0, untested_count)
def test_filter_build_image_test_results_with_combinations(self):
url = reverse('buildimagertttests-list')
response = self.client.get(url + '?build_nvr=my-server-docker-1.0-27', format='json')
count = response.data['count']
self.assertEqual(count, 1)
response = self.client.get(url + '?build_nvr=fake_nvr', format='json')
count = response.data['count']
self.assertEqual(count, 0)
url = reverse('buildimage-list')
data = {'image_id': 'my-server-docker-1.0-27',
'image_format': 'iso',
'md5': "0123456789abcdef0123456789abcdef",
'rpms': [{'name': 'new_rpm', 'epoch': 0, 'version': '1.0.0',
'release': '1', 'arch': 'src', 'srpm_name': 'new_srpm'}]
}
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
url = reverse('buildimagertttests-list')
response = self.client.get(url + '?build_nvr=my-server-docker-1.0-27', format='json')
count = response.data['count']
self.assertEqual(count, 2)
response = self.client.get(url + '?build_nvr=my-server-docker-1.0-27&test_result=untested', format='json')
count = response.data['count']
self.assertEqual(count, 2)
response = self.client.get(url + '?image_format=docker&build_nvr=my-client-docker', format='json')
untested_count = response.data['count']
self.assertEqual(1, untested_count)
response = self.client.get(url + '?build_nvr=my-server-docker-1.0-27&test_result=untested&image_format=iso',
format='json')
count = response.data['count']
self.assertEqual(count, 1)
response = self.client.get(url + '?build_nvr=my-server-docker-1.0-27&test_result=passed', format='json')
count = response.data['count']
self.assertEqual(count, 0)
def test_patch_build_image_test_results(self):
url = reverse('buildimagertttests-list')
response = self.client.get(url + '?test_result=untested', format='json')
ori_untested_count = response.data['count']
url = reverse('buildimagertttests-detail', args=[1])
data = {'test_result': 'passed'}
response = self.client.patch(url, data, format='json')
url = reverse('buildimagertttests-list')
response = self.client.get(url + '?test_result=passed', format='json')
untested_count = response.data['count']
self.assertEqual(1, untested_count)
url = reverse('buildimagertttests-list')
response = self.client.get(url + '?test_result=untested', format='json')
new_untested_count = response.data['count']
self.assertEqual(new_untested_count, ori_untested_count - 1)
def test_update_patch_build_image_test_results_not_allowed_fields(self):
data = {'build_nvr': 'fake_nvr', 'format': 'iso', 'test_result': 'untested'}
url = reverse('buildimagertttests-detail', args=[1])
response = self.client.put(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data = {'format': 'iso', 'test_result': 'untested'}
url = reverse('buildimagertttests-detail', args=[1])
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data = {'build_nvr': 'fake_nvr', 'test_result': 'untested'}
url = reverse('buildimagertttests-detail', args=[1])
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data = {'format': 'iso'}
url = reverse('buildimagertttests-detail', args=[1])
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
data = {'build_nvr': 'fake_nvr'}
url = reverse('buildimagertttests-detail', args=[1])
response = self.client.patch(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
| tzhaoredhat/automation | pdc/apps/package/tests.py | Python | mit | 88,111 |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import json
from sys import exit
import os
from .base import SERVICES
from .base import BaseConfig
from .decorators import (
ansible_play_var,
default,
)
from ..util import get_ephemeral_devices, get_arch
class Ec2DeployConfig(BaseConfig):
def __init__(
self,
deploy_path,
config_path,
hosts_path,
checksums_path,
templates_path,
cluster_name,
):
super(Ec2DeployConfig, self).__init__(
deploy_path,
config_path,
hosts_path,
checksums_path,
templates_path,
cluster_name,
)
self.sg_name = cluster_name + "-group"
self.ephemeral_root = "ephemeral"
self.cluster_template_d = None
self.metrics_drive_root = "media-" + self.ephemeral_root
self.init_template(templates_path)
def verify_config(self, action):
self._verify_config(action)
def verify_launch(self):
self.verify_instance_type(self.get("ec2", "default_instance_type"))
self.verify_instance_type(self.get("ec2", "worker_instance_type"))
def init_nodes(self):
self.node_d = {}
for (hostname, value) in self.items("nodes"):
if hostname in self.node_d:
exit(
"Hostname {0} already exists twice in nodes".format(
hostname
)
)
service_list = []
for service in value.split(","):
if service in SERVICES:
service_list.append(service)
else:
exit(
"Unknown service '{}' declared for node {}".format(
service, hostname
)
)
self.node_d[hostname] = service_list
def default_ephemeral_devices(self):
return get_ephemeral_devices(self.get("ec2", "default_instance_type"))
def worker_ephemeral_devices(self):
return get_ephemeral_devices(self.get("ec2", "worker_instance_type"))
def max_ephemeral(self):
return max(
(
len(self.default_ephemeral_devices()),
len(self.worker_ephemeral_devices()),
)
)
def node_type_map(self):
if self.cluster_template_d:
return self.cluster_template_d["devices"]
node_types = {}
node_list = [
("default", self.default_ephemeral_devices()),
("worker", self.worker_ephemeral_devices()),
]
for (ntype, devices) in node_list:
node_types[ntype] = {
"mounts": self.mounts(len(devices)),
"devices": devices,
}
return node_types
def mount_root(self):
return "/media/" + self.ephemeral_root
@ansible_play_var
@default("ext3")
def fstype(self):
return self.get("ec2", "fstype")
@ansible_play_var
@default("no")
def force_format(self):
return self.get("ec2", "force_format")
def data_dirs_common(self, nodeType):
return self.node_type_map()[nodeType]["mounts"]
def metrics_drive_ids(self):
drive_ids = []
for i in range(0, self.max_ephemeral()):
drive_ids.append(self.metrics_drive_root + str(i))
return drive_ids
def shutdown_delay_minutes(self):
return self.get("ec2", "shutdown_delay_minutes")
def verify_instance_type(self, instance_type):
if not self.cluster_template_d:
if get_arch(instance_type) == "pvm":
exit(
"ERROR - Configuration contains instance type '{0}' "
"that uses pvm architecture."
"Only hvm architecture is supported!".format(instance_type)
)
def instance_tags(self):
retd = {}
if self.has_option("ec2", "instance_tags"):
value = self.get("ec2", "instance_tags")
if value:
for kv in value.split(","):
(key, val) = kv.split(":")
retd[key] = val
return retd
def init_template(self, templates_path):
if self.has_option("ec2", "cluster_template"):
template_id = self.get("ec2", "cluster_template")
template_path = os.path.join(templates_path, template_id)
if os.path.exists(template_path):
self.cluster_template_d = {"id": template_id}
self.load_template_ec2_requests(template_path)
self.load_template_device_map(template_path)
self.validate_template()
def load_template_ec2_requests(self, template_dir):
for json_path in glob.glob(os.path.join(template_dir, "*.json")):
service = os.path.basename(json_path).rsplit(".", 1)[0]
if service not in SERVICES:
exit(
"ERROR - Template '{0}' has unrecognized option '{1}'. "
"Must be one of {2}".format(
self.cluster_template_d["id"], service, str(SERVICES)
)
)
with open(json_path, "r") as json_file:
# load as string, so we can use string.Template
# to inject config values
self.cluster_template_d[service] = json_file.read()
def load_template_device_map(self, template_dir):
device_map_path = os.path.join(template_dir, "devices")
if not os.path.isfile(device_map_path):
exit(
"ERROR - template '{0}' is missing 'devices' config".format(
self.cluster_template_d["id"]
)
)
with open(device_map_path, "r") as json_file:
self.cluster_template_d["devices"] = json.load(json_file)
def validate_template(self):
if not self.cluster_template_d:
exit(
"ERROR - Template '{0}' is not defined!".format(
self.get("ec2", "cluster_template")
)
)
if "worker" not in self.cluster_template_d:
exit(
"ERROR - '{0}' template config is invalid. No 'worker' "
"launch request is defined".format(
self.cluster_template_d["id"]
)
)
if "worker" not in self.cluster_template_d["devices"]:
exit(
"ERROR - '{0}' template is invalid. The devices file must "
"have a 'worker' device map".format(
self.cluster_template_d["id"]
)
)
if "default" not in self.cluster_template_d["devices"]:
exit(
"ERROR - '{0}' template is invalid. The devices file must "
"have a 'default' device map".format(
self.cluster_template_d["id"]
)
)
# Validate the selected launch template for each host
worker_count = 0
for hostname in self.node_d:
# first service listed denotes the selected template
selected_ec2_request = self.node_d[hostname][0]
if "worker" == selected_ec2_request:
worker_count = worker_count + 1
else:
if "worker" in self.node_d[hostname]:
exit(
"ERROR - '{0}' node config is invalid. The 'worker'"
" service should be listed first".format(hostname)
)
if selected_ec2_request not in self.cluster_template_d:
if len(self.node_d[hostname]) > 1:
print(
"Hint: In template mode, the first service listed"
" for a host denotes its EC2 template"
)
exit(
"ERROR - '{0}' node config is invalid. No EC2 "
"template defined for the '{1}' service".format(
hostname, selected_ec2_request
)
)
if worker_count == 0:
exit(
"ERROR - No worker instances are defined "
"for template '{0}'".format(self.cluster_template_d["id"])
)
| fluo-io/fluo-deploy | lib/muchos/config/ec2.py | Python | apache-2.0 | 9,189 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9c1 on 2015-11-24 13:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wiki', '0003_item_related_items'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='related_items',
),
migrations.RemoveField(
model_name='item',
name='related',
),
migrations.AddField(
model_name='item',
name='related',
field=models.ManyToManyField(blank=True, related_name='_item_related_+', to='wiki.Item'),
),
]
| n2o/labbook | wiki/migrations/0004_auto_20151124_1316.py | Python | mit | 705 |
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import
import functools
from argparse import ArgumentParser, _SubParsersAction
from contextlib import contextmanager
from pex.commands.command import Command
from pex.result import Result
from pex.typing import TYPE_CHECKING, Generic, cast
if TYPE_CHECKING:
from typing import Callable, ClassVar, Iterator, Optional, Type, TypeVar
_C = TypeVar("_C", bound="BuildTimeCommand")
class BuildTimeCommand(Command):
class Subcommands(Generic["_C"]):
def __init__(
self,
subparsers, # type: _SubParsersAction
include_verbosity, # type: bool
):
# type: (...) -> None
self._subparsers = subparsers
self._include_verbosity = include_verbosity
@contextmanager
def parser(
self,
name, # type: str
help, # type: str
func=None, # type: Optional[Callable[[_C], Result]]
include_verbosity=None, # type: Optional[bool]
):
# type: (...) -> Iterator[ArgumentParser]
subcommand_parser = self._subparsers.add_parser(name=name, help=help)
yield subcommand_parser
if func:
subcommand_parser.set_defaults(subcommand_func=func)
Command.register_global_arguments(
subcommand_parser,
include_verbosity=include_verbosity
if include_verbosity is not None
else self._include_verbosity,
)
include_global_verbosity_option = True # type: ClassVar[bool]
@classmethod
def add_arguments(cls, parser):
# type: (ArgumentParser) -> None
cls.add_extra_arguments(parser)
if not parser.get_default("subcommand_func"):
cls.register_global_arguments(
parser, include_verbosity=cls.include_global_verbosity_option
)
@classmethod
def add_extra_arguments(cls, parser):
# type: (ArgumentParser) -> None
pass
@classmethod
def create_subcommands(
cls, # type: Type[_C]
parser, # type: ArgumentParser
description=None, # type: Optional[str]
):
# type: (...) -> Subcommands[_C]
parser.set_defaults(subcommand_func=functools.partial(cls.show_help, parser))
subparsers = parser.add_subparsers(description=description)
return cls.Subcommands(subparsers, include_verbosity=cls.include_global_verbosity_option)
def run(self):
# type: (_C) -> Result
subcommand_func = cast(
"Optional[Callable[[_C], Result]]", getattr(self.options, "subcommand_func", None)
)
if subcommand_func is not None:
return subcommand_func(self)
raise NotImplementedError()
| pantsbuild/pex | pex/cli/command.py | Python | apache-2.0 | 2,971 |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from django.conf import settings
from django.http import HttpResponseRedirect # noqa
from horizon import exceptions
from horizon import middleware
from horizon.test import helpers as test
class MiddlewareTests(test.TestCase):
def test_redirect_login_fail_to_login(self):
url = settings.LOGIN_URL
request = self.factory.post(url)
mw = middleware.HorizonMiddleware()
resp = mw.process_exception(request, exceptions.NotAuthorized())
resp.client = self.client
self.assertRedirects(resp, url)
def test_redirect_session_timeout(self):
requested_url = '/project/instances/'
response_url = '%s?next=%s' % (settings.LOGOUT_URL, requested_url)
request = self.factory.get(requested_url)
try:
timeout = settings.SESSION_TIMEOUT
except AttributeError:
timeout = 1800
request.session['last_activity'] = int(time.time()) - (timeout + 10)
mw = middleware.HorizonMiddleware()
resp = mw.process_request(request)
self.assertEqual(302, resp.status_code)
self.assertEqual(response_url, resp.get('Location'))
def test_process_response_redirect_on_ajax_request(self):
url = settings.LOGIN_URL
mw = middleware.HorizonMiddleware()
request = self.factory.post(url,
HTTP_X_REQUESTED_WITH='XMLHttpRequest')
request.META['HTTP_X_REQUESTED_WITH'] = 'XMLHttpRequest'
request.horizon = {'async_messages':
[('error', 'error_msg', 'extra_tag')]}
response = HttpResponseRedirect(url)
response.client = self.client
resp = mw.process_response(request, response)
self.assertEqual(200, resp.status_code)
self.assertEqual(url, resp['X-Horizon-Location'])
| jumpstarter-io/horizon | horizon/test/tests/middleware.py | Python | apache-2.0 | 2,488 |
# flake8: noqa
try:
from deepchem.metalearning.maml import MAML, MetaLearner
except ModuleNotFoundError:
pass
| deepchem/deepchem | deepchem/metalearning/__init__.py | Python | mit | 114 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2010, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import os
import time
import string
import gettext
__trans = gettext.translation('pisi', fallback=True)
_ = __trans.ugettext
import pisilinux
import pisilinux.context as ctx
class Error(pisi.Error):
pass
try:
import comar
import dbus
except ImportError:
raise Error(_("comar-api package is not fully installed"))
def is_char_valid(char):
"""Test if char is valid object path character."""
return char in string.ascii_letters + string.digits + "_"
def is_method_missing(exception):
"""Tells if exception is about missing method in COMAR script"""
if exception._dbus_error_name in ("tr.org.pardus.comar.python.missing",
"tr.org.pardus.comar.Missing"):
return True
return False
def safe_script_name(package):
"""Generates DBus-safe object name for package script names."""
object = package
for char in package:
if not is_char_valid(char):
object = object.replace(char, '_')
if object[0].isdigit():
object = '_%s' % object
return object
def get_link():
"""Connect to the COMAR daemon and return the link."""
sockname = "/var/run/dbus/system_bus_socket"
# YALI starts comar chrooted in the install target, but uses PiSi
# outside of the chroot environment, so Pisi needs to use a different
# socket path to be able to connect true dbus (and comar).
# (usually /var/run/dbus/system_bus_socket)
if ctx.dbus_sockname:
sockname = ctx.dbus_sockname
alternate = False
# If COMAR package is updated, all new configuration requests should be
# made through new COMAR service. Passing alternate=True to Link() class
# will ensure this.
if ctx.comar_updated:
alternate = True
# This function is sometimes called when comar has recently started
# or restarting after an update. So we give comar a chance to become
# active in a reasonable time.
timeout = 7
exceptions = []
while timeout > 0:
try:
link = comar.Link(socket=sockname, alternate=alternate)
link.setLocale()
return link
except dbus.DBusException as e:
exceptions.append(str(e))
except Exception as e:
exceptions.append(str(e))
time.sleep(0.2)
timeout -= 0.2
raise Error(_("Cannot connect to COMAR: \n %s\n")
% "\n ".join(exceptions))
def post_install(package_name, provided_scripts,
scriptpath, metapath, filepath,
fromVersion, fromRelease, toVersion, toRelease):
"""Do package's post install operations"""
ctx.ui.info(_("Configuring %s package") % package_name)
self_post = False
package_name = safe_script_name(package_name)
if package_name == 'comar':
ctx.ui.debug(_("COMAR package updated. From now on,"
" using new COMAR daemon."))
pisi.api.set_comar_updated(True)
link = get_link()
for script in provided_scripts:
ctx.ui.debug(_("Registering %s comar script") % script.om)
script_name = safe_script_name(script.name) \
if script.name else package_name
if script.om == "System.Package":
self_post = True
try:
link.register(script_name, script.om,
os.path.join(scriptpath, script.script))
except dbus.DBusException as exception:
raise Error(_("Script error: %s") % exception)
if script.om == "System.Service":
try:
link.System.Service[script_name].registerState()
except dbus.DBusException as exception:
raise Error(_("Script error: %s") % exception)
ctx.ui.debug(_("Calling post install handlers"))
for handler in link.System.PackageHandler:
try:
link.System.PackageHandler[handler].setupPackage(
metapath,
filepath,
timeout=ctx.dbus_timeout)
except dbus.DBusException as exception:
# Do nothing if setupPackage method is not defined
# in package script
if not is_method_missing(exception):
raise Error(_("Script error: %s") % exception)
if self_post:
if not fromVersion:
fromVersion = ""
if not fromRelease:
fromRelease = ""
ctx.ui.debug(_("Running package's post install script"))
try:
link.System.Package[package_name].postInstall(
fromVersion, fromRelease, toVersion, toRelease,
timeout=ctx.dbus_timeout)
except dbus.DBusException as exception:
# Do nothing if postInstall method is not defined in package script
if not is_method_missing(exception):
raise Error(_("Script error: %s") % exception)
def pre_remove(package_name, metapath, filepath):
"""Do package's pre removal operations"""
ctx.ui.info(_("Running pre removal operations for %s") % package_name)
link = get_link()
package_name = safe_script_name(package_name)
if package_name in list(link.System.Package):
ctx.ui.debug(_("Running package's pre remove script"))
try:
link.System.Package[package_name].preRemove(
timeout=ctx.dbus_timeout)
except dbus.DBusException as exception:
# Do nothing if preRemove method is not defined in package script
if not is_method_missing(exception):
raise Error(_("Script error: %s") % exception)
ctx.ui.debug(_("Calling pre remove handlers"))
for handler in list(link.System.PackageHandler):
try:
link.System.PackageHandler[handler].cleanupPackage(
metapath, filepath, timeout=ctx.dbus_timeout)
except dbus.DBusException as exception:
# Do nothing if cleanupPackage method is not defined
# in package script
if not is_method_missing(exception):
raise Error(_("Script error: %s") % exception)
def post_remove(package_name, metapath, filepath, provided_scripts=[]):
"""Do package's post removal operations"""
ctx.ui.info(_("Running post removal operations for %s") % package_name)
link = get_link()
package_name = safe_script_name(package_name)
scripts = set([safe_script_name(s.name) for s \
in provided_scripts if s.name])
scripts.add(package_name)
if package_name in list(link.System.Package):
ctx.ui.debug(_("Running package's postremove script"))
try:
link.System.Package[package_name].postRemove(
timeout=ctx.dbus_timeout)
except dbus.DBusException as exception:
# Do nothing if postRemove method is not defined in package script
if not is_method_missing(exception):
raise Error(_("Script error: %s") % exception)
ctx.ui.debug(_("Calling post remove handlers"))
for handler in list(link.System.PackageHandler):
try:
link.System.PackageHandler[handler].postCleanupPackage(
metapath, filepath, timeout=ctx.dbus_timeout)
except dbus.DBusException as exception:
# Do nothing if postCleanupPackage method is not defined
# in package script
if not is_method_missing(exception):
raise Error(_("Script error: %s") % exception)
ctx.ui.debug(_("Unregistering comar scripts"))
for scr in scripts:
try:
link.remove(scr, timeout=ctx.dbus_timeout)
except dbus.DBusException as exception:
raise Error(_("Script error: %s") % exception)
| hknyldz/pisitools | pisilinux/pisilinux/comariface.py | Python | gpl-3.0 | 8,110 |
from __future__ import unicode_literals, division, absolute_import
import re
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from unicodedata import normalize
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.requests import RequestException
from flexget.utils.soup import get_soup
from flexget.components.sites.utils import torrent_availability
from flexget.utils.tools import parse_filesize
log = logging.getLogger('limetorrents')
def clean_symbols(text):
"""Replaces common symbols with spaces. Also normalize unicode strings in decomposed form."""
result = text
if isinstance(result, str):
result = normalize('NFKD', result)
result = re.sub(r'[ \(\)\-_\[\]\.]+', ' ', result).lower()
# Leftovers
result = re.sub(r"[^a-zA-Z0-9 ]", "", result)
return result
class Limetorrents(object):
"""
Limetorrents search plugin.
"""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'category': {
'type': 'string',
'enum': [
'all',
'anime',
'applications',
'games',
'movies',
'music',
'tv',
'other',
],
'default': 'all',
},
'order_by': {'type': 'string', 'enum': ['date', 'seeds'], 'default': 'date'},
},
'additionalProperties': False,
},
]
}
base_url = 'https://www.limetorrents.cc/'
errors = False
@plugin.internet(log)
def search(self, task, entry, config):
"""
Search for entries on Limetorrents
"""
if not isinstance(config, dict):
config = {'category': config}
order_by = ''
if isinstance(config.get('order_by'), str):
if config['order_by'] != 'date':
order_by = '{0}/1'.format(config['order_by'])
category = 'all'
if isinstance(config.get('category'), str):
category = '{0}'.format(config['category'])
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
# No special characters - use dashes instead of %20
cleaned_search_string = clean_symbols(search_string).replace(' ', '-')
query = 'search/{0}/{1}/{2}'.format(
category, cleaned_search_string.encode('utf8'), order_by
)
log.debug(
'Using search: %s; category: %s; ordering: %s',
cleaned_search_string,
category,
order_by or 'default',
)
try:
page = task.requests.get(self.base_url + query)
log.debug('requesting: %s', page.url)
except RequestException as e:
log.error('Limetorrents request failed: %s', e)
continue
soup = get_soup(page.content)
if soup.find('a', attrs={'class': 'csprite_dl14'}) is not None:
for link in soup.findAll('a', attrs={'class': 'csprite_dl14'}):
row = link.find_parent('tr')
info_url = str(link.get('href'))
# Get the title from the URL as it's complete versus the actual Title text which gets cut off
title = str(link.next_sibling.get('href'))
title = title[: title.rfind('-torrent')].replace('-', ' ')
title = title[1:]
data = row.findAll('td', attrs={'class': 'tdnormal'})
size = str(data[1].text).replace(',', '')
seeds = int(row.find('td', attrs={'class': 'tdseed'}).text.replace(',', ''))
leeches = int(row.find('td', attrs={'class': 'tdleech'}).text.replace(',', ''))
size = parse_filesize(size)
e = Entry()
e['url'] = info_url
e['title'] = title
e['torrent_seeds'] = seeds
e['torrent_leeches'] = leeches
e['torrent_availability'] = torrent_availability(
e['torrent_seeds'], e['torrent_leeches']
)
e['content_size'] = size
entries.add(e)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(Limetorrents, 'limetorrents', interfaces=['search'], api_ver=2)
| gazpachoking/Flexget | flexget/components/sites/sites/limetorrents.py | Python | mit | 4,889 |
# -*- coding: utf-8 -*-
# ============================================================================ #
# SMB2_Header.py
#
# Copyright:
# Copyright (C) 2016 by Christopher R. Hertel
#
# $Id: SMB2_Header.py; 2019-06-18 17:56:20 -0500; crh$
#
# ---------------------------------------------------------------------------- #
#
# Description:
# Carnaval Toolkit: SMB2+ message header parsing and composition.
#
# ---------------------------------------------------------------------------- #
#
# License:
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# See Also:
# The 0.README file included with the distribution.
#
# ---------------------------------------------------------------------------- #
# This code was developed in participation with the
# Protocol Freedom Information Foundation.
# <www.protocolfreedom.org>
# ---------------------------------------------------------------------------- #
#
# Notes:
#
# - This module provides the basic tools used to compose and decompose
# SMB2/3 message headers. This module can be used by both client and
# server implementations.
#
# - The specific dialects considered by this module are:
# Common Name | Official Name | Dialect ID
# ============|===============|===========
# SMB2.0 | SMB 2.0.2 | 0x0202
# SMB2.1 | SMB 2.1 | 0x0210
# SMB3.0 | SMB 3.0 | 0x0300
# SMB3.02 | SMB 3.0.2 | 0x0302
# SMB3.11 | SMB 3.1.1 | 0x0311
#
# Others can be added as they are conjured up from the underworld.
#
# - The Python <int> type is "at least" 32 bits, but it's signed, so to
# be safe we use the <long> type to handle ULONG field values. That
# ensures that unsigned 32-bit field values are handled correctly.
# The <long> type can be used to store UINT32 and UINT64 values, as
# well as shorter integer types.
# See: https://docs.python.org/2/library/stdtypes.html#typesnumeric
#
# - This project, overall, is designed to protect against sending invalid
# field values. It also, to some extent, protects against invalid values
# in received messages. However, to make it easy to do protocol testing,
# these protections can be easily bypassed.
#
# References:
#
# [MS-SMB2] Microsoft Corporation, "Server Message Block (SMB)
# Protocol Versions 2 and 3",
# http://msdn.microsoft.com/en-us/library/cc246482.aspx
#
# ToDo:
# - Add more unit tests.
# - Add support for "related commands" (NextCommand chaining).
# - Add support for transform headers (\xfdSMB).
# - Extend the context information to include more connection-related
# data, including GUID, flags, etc.
# - Check the assert() calls in setters when decomposing a message header.
# We want consistent error handling, and asserts() can be compiled out.
# - Allow (and keep) invalid values where docs say "must ignore".
#
# FIX:
# - Use exceptions from SMB_Core.
#
# Moose:
#
# \_\_ _/_/
# \__/
# (oo)
# (..)
# --
#
# ============================================================================ #
#
"""Carnaval Toolkit: SMB2+ message header packing and parsing.
Common classes, functions, etc., for packing and unpacking SMB2+ Headers.
This module deals with structures common to both the client and server.
CONSTANTS:
Protocol constants:
SMB2_MSG_PROTOCOL : \\xFESMB; SMB2 message prefix (protocol ID).
4 bytes.
SMB2_HDR_SIZE : The fixed length of an SMB2+ message header
(64 bytes).
Supported SMB2+ dialect revision codes:
SMB2_DIALECT_202 : SMB 2.0.2 dialect revision (Vista, W2K8 Server)
SMB2_DIALECT_210 : SMB 2.1 dialect revision (Win7, W2K8r2 Server)
SMB2_DIALECT_300 : SMB 3.0 dialect revision (Win8, W2K12 Server)
SMB2_DIALECT_302 : SMB 3.0.2 dialect revision (Win8.1, W2K12r2 Server)
SMB2_DIALECT_311 : SMB 3.1.1 dialect revision (Win10, 2016 Server)
SMB2_DIALECT_LIST : A list of all supported dialects, ordered from
lowest to highest.
SMB2_DIALECT_MIN : The lowest supported dialect.
SMB2_DIALECT_MAX : The highest supported dialect.
SMB2+ command codes:
SMB2_COM_NEGOTIATE : Dialect and feature support negotiation.
SMB2_COM_SESSION_SETUP : Authentication and session establishment.
SMB2_COM_LOGOFF : Close a session; log out.
SMB2_COM_TREE_CONNECT : Connect to a remote share; mount.
SMB2_COM_TREE_DISCONNECT : Disconnect a connected share; umount.
SMB2_COM_CREATE : Create/open a filesystem object (file).
SMB2_COM_CLOSE : Close a previously opened handle.
SMB2_COM_FLUSH : Push data to disk (or thereabouts).
SMB2_COM_READ : Get some data.
SMB2_COM_WRITE : Put some data.
SMB2_COM_LOCK : Byte-range locks.
SMB2_COM_IOCTL : Do fiddly stuff.
SMB2_COM_CANCEL : Don't do whatever you're waiting to do.
SMB2_COM_ECHO : Ping!
SMB2_COM_QUERY_DIRECTORY : Find things in the Object Store.
SMB2_COM_CHANGE_NOTIFY : Let me know if something happens.
SMB2_COM_QUERY_INFO : Get some metadata.
SMB2_COM_SET_INFO : Put some metadata.
SMB2_COM_OPLOCK_BREAK : Server->client lease/oplock break.
SMB2+ header flags:
SMB2_FLAGS_SERVER_TO_REDIR : Response
SMB2_FLAGS_ASYNC_COMMAND : Async
SMB2_FLAGS_RELATED_OPERATIONS : Chained command
SMB2_FLAGS_SIGNED : Signed packet
SMB2_FLAGS_DFS_OPERATIONS : Distributed File System
SMB2_FLAGS_REPLAY_OPERATION : SMB3 Replay
SMB2_FLAGS_MASK : Flags Bitmask
"""
# Imports -------------------------------------------------------------------- #
#
import struct # Binary data handling.
from SMB_Status import * # Windows NT Status Codes.
from common.HexDump import hexstr # Convert binary data to readable output.
from common.HexDump import hexstrchop # Ditto, but with linewrap.
from common.HexDump import hexdump # Formatted hex dump à la hexdump(1).
# Constants ------------------------------------------------------------------ #
#
# Protocol constants
SMB2_MSG_PROTOCOL = '\xFESMB' # Standard SMB2 message prefix (protocol ID).
SMB2_HDR_SIZE = 64 # Fixed SMB2+ header size.
# Known SMB2+ dialect revision codes.
# An unknown or undefined dialect is indicated using <None>.
SMB2_DIALECT_202 = 0x0202 # SMB 2.0.2 dialect revision (Vista/W2K8 Server)
SMB2_DIALECT_210 = 0x0210 # SMB 2.1 dialect revision (Win7/W2K8r2 Server)
SMB2_DIALECT_300 = 0x0300 # SMB 3.0 dialect revision (Win8/W2K12 Server)
SMB2_DIALECT_302 = 0x0302 # SMB 3.0.2 dialect revision (Win8.1/W2K12r2 Server)
SMB2_DIALECT_311 = 0x0311 # SMB 3.1.1 dialect revision (Win10/W2K16 Server)
# List of supported dialects, in order from oldest to newest.
SMB2_DIALECT_LIST = [ SMB2_DIALECT_202,
SMB2_DIALECT_210,
SMB2_DIALECT_300,
SMB2_DIALECT_302,
SMB2_DIALECT_311 ]
SMB2_DIALECT_MIN = SMB2_DIALECT_LIST[0] # Oldest supported revision.
SMB2_DIALECT_MAX = SMB2_DIALECT_LIST[-1] # Newest supported revision.
# SMB2/3 command codes (there are, currently, 19 SMB2+ command codes).
SMB2_COM_NEGOTIATE = 0x0000 # 0
SMB2_COM_SESSION_SETUP = 0x0001 # 1
SMB2_COM_LOGOFF = 0x0002 # 2
SMB2_COM_TREE_CONNECT = 0x0003 # 3
SMB2_COM_TREE_DISCONNECT = 0x0004 # 4
SMB2_COM_CREATE = 0x0005 # 5
SMB2_COM_CLOSE = 0x0006 # 6
SMB2_COM_FLUSH = 0x0007 # 7
SMB2_COM_READ = 0x0008 # 8
SMB2_COM_WRITE = 0x0009 # 9
SMB2_COM_LOCK = 0x000A # 10
SMB2_COM_IOCTL = 0x000B # 11
SMB2_COM_CANCEL = 0x000C # 12
SMB2_COM_ECHO = 0x000D # 13
SMB2_COM_QUERY_DIRECTORY = 0x000E # 14
SMB2_COM_CHANGE_NOTIFY = 0x000F # 15
SMB2_COM_QUERY_INFO = 0x0010 # 16
SMB2_COM_SET_INFO = 0x0011 # 17
SMB2_COM_OPLOCK_BREAK = 0x0012 # 18
# SMB2/3 header flags
SMB2_FLAGS_SERVER_TO_REDIR = 0x00000001 # Response
SMB2_FLAGS_ASYNC_COMMAND = 0x00000002 # Async
SMB2_FLAGS_RELATED_OPERATIONS = 0x00000004 # ANDX
SMB2_FLAGS_SIGNED = 0x00000008 # Signed packet
SMB2_FLAGS_DFS_OPERATIONS = 0x10000000 # Distributed File System (DFS)
SMB2_FLAGS_REPLAY_OPERATION = 0x20000000 # SMB3 Replay
SMB2_FLAGS_PRIORITY_MASK = 0x00000070 # SMB311 priority bits
SMB2_FLAGS_MASK = 0x3000007F # Bitmask
# Max Size values
_UCHAR_MAX = 0xFF # Bitmask for Unsigned 8-bit (UCHAR) values.
_USHORT_MAX = 0xFFFF # Bitmask for Unsigned 16-bit (USHORT) values.
_ULONG_MAX = 0xFFFFFFFF # Bitmask for Unsigned 32-bit (ULONG) values.
_UINT64_MAX = (2**64) - 1 # Bitmask for Unsigned 64-bit (UINT64) values.
# Classes -------------------------------------------------------------------- #
#
class _SMB2_Header( object ):
# SMB2/SMB3 Message Header; [MS-SMB; 2.2.1].
#
# This class is used to format both Sync and Async SMB2 headers.
#
# Reminder: SMB2 and SMB3 are names for different sets of dialects of the
# same protocol; SMB3.0 was originally SMB2.2. Can you say
# "Marketing Upgrade"?
#
# Class values:
# Values instanciated once for the class (so that all instances can use them).
#
# These represent the four possible header formats defined for the
# supported SMB2 dialects. It's basically a 2x2 matrix.
#
# _format_SMB2_StatAsync - Async header, with <status> and <asyncId>.
# _format_SMB2_StatTreeId - Sync header, with <status> and <treeId>.
# _format_SMB2_cSeqAsync - Async header, with <channelSeq> and <asyncId>.
# _format_SMB2_cSeqTreeId - Sync header, with <channelSeq> and <treeId>.
#
# In general, Async headers are sent in server responses that are used to
# tell the client to wait for a pending operation to complete. That is,
# they are "hang on a bit" messages, telling the client not to time out.
#
# A client uses an async header when it is sending a CANCEL request for
# a command for which the server has already sent an Async response.
# That is:
# Command --> (sync)
# <-- Hang on a bit (async)
# Nevermind --> (async)
# <-- Command canceled (sync)
# The middle two are sent using Async headers.
#
# These two additional patterns are used for decoding header variants.
# _format_2H - Two unsigned 16-bit integers.
# _format_Q - One unsigned 64-bit integer.
#
# [MS-SMB2; 2.2.1] also mystically says that the Async header "MAY be used
# for any request", but doesn't explain when or why a client would do such
# a confusing thing.
#
# _cmd_LookupDict - A dictionary that maps command codes to strings.
# This is used for composing error messages, and when
# providing a header dump.
#
_format_SMB2_StatAsync = struct.Struct( '<4s H H L H H L L Q Q Q 16s' )
_format_SMB2_StatTreeId = struct.Struct( '<4s H H L H H L L Q L L Q 16s' )
_format_SMB2_cSeqAsync = struct.Struct( '<4s H H H H H H L L Q Q Q 16s' )
_format_SMB2_cSeqTreeId = struct.Struct( '<4s H H H H H H L L Q L L Q 16s' )
_format_2H = struct.Struct( "<H H" )
_format_Q = struct.Struct( "<Q" )
_cmd_LookupDict = \
{
SMB2_COM_NEGOTIATE : "NEGOTIATE",
SMB2_COM_SESSION_SETUP : "SESSION_SETUP",
SMB2_COM_LOGOFF : "LOGOFF",
SMB2_COM_TREE_CONNECT : "TREE_CONNECT",
SMB2_COM_TREE_DISCONNECT: "TREE_DISCONNECT",
SMB2_COM_CREATE : "CREATE",
SMB2_COM_CLOSE : "CLOSE",
SMB2_COM_FLUSH : "FLUSH",
SMB2_COM_READ : "READ",
SMB2_COM_WRITE : "WRITE",
SMB2_COM_LOCK : "LOCK",
SMB2_COM_IOCTL : "IOCTL",
SMB2_COM_CANCEL : "CANCEL",
SMB2_COM_ECHO : "ECHO",
SMB2_COM_QUERY_DIRECTORY: "QUERY_DIRECTORY",
SMB2_COM_CHANGE_NOTIFY : "CHANGE_NOTIFY",
SMB2_COM_QUERY_INFO : "QUERY_INFO",
SMB2_COM_SET_INFO : "SET_INFO",
SMB2_COM_OPLOCK_BREAK : "OPLOCK_BREAK"
}
# _SMB2_Header class methods:
#
@classmethod
def parseMsg( cls, msgBlob=None, dialect=SMB2_DIALECT_MIN ):
"""Decompose wire data and return an _SMB2_Header object.
Input:
cls - This class.
msgBlob - An array of at least 64 bytes, representing an SMB2+
message in wire format.
dialect - The minimum dialect under which to parse the header.
Output:
An <_SMB2_Header> object.
Errors:
AssertionError - Thrown if:
+ The length of <msgBlob> is less than the
minimum of 64 bytes.
+ The command code parsed from the message is
not a valid command code.
+ The given dialect is not known.
ValueError - Thrown if the packet cannot possibly contain a
valid SMB2+ message header. This exception is
raised if either the ProtocolId field doesn't
contain the correct string, or if the
StructureSize value is incorrect.
Notes:
- This function does not parse SMB3 Transform Headers. An SMB3
Transform header will be rejected with a ValueError.
- Beyond the basics of verifying that ProtocolId and StructureSize
are correct, this function does _no_ validation of the input.
"""
# Fundamental sanity check.
assert( SMB2_HDR_SIZE <= len( msgBlob ) ), "Incomplete message header."
# Parse it. Use the simple sync response format.
tup = cls._format_SMB2_StatTreeId.unpack( msgBlob[:SMB2_HDR_SIZE] )
# Look for trouble.
if( SMB2_MSG_PROTOCOL != tup[0] ):
raise ValueError( "Malformed SMB2 ProtocolId: [%s]." % repr( tup[0] ) )
elif( SMB2_HDR_SIZE != tup[1] ):
s = "The SMB2 Header StructureSize must be 64, not %d." % tup[1]
raise ValueError( s )
# Create and populate a header record instance.
hdr = cls( tup[4], dialect )
hdr._creditCharge = tup[2]
# 3: Status/ChannelSeq/Reserved1; see below
hdr.command = tup[4]
hdr._creditReqResp = tup[5]
hdr._flags = tup[6]
hdr._nextCommand = tup[7]
hdr._messageId = tup[8]
# 9, 10: Reserved2/TreeId/AsyncId; see below
hdr._sessionId = tup[11]
hdr._signature = tup[12]
# Handle the overloaded fields.
if( hdr.flagReply or (dialect < SMB2_DIALECT_300) ):
hdr._status = tup[3]
else:
hdr._channelSeq, hdr._reserved1 = cls._format_2H.unpack( msgBlob[8:12] )
if( hdr.flagAsync ):
hdr._asyncId = cls._format_Q.unpack( msgBlob[32:40] )
else:
hdr._reserved2 = tup[9]
hdr._treeId = tup[10]
# All done.
return( hdr )
@classmethod
def commandName( self, CmdId=0xFF ):
"""Given an SMB2 command code, return the name of the command.
Input:
CmdId - An SMB2/3 command code.
Output: A string.
If <CmdId> is a known SMB2/3 command code, the string
will be the command name. Otherwise, the empty string
is returned.
"""
if( CmdId in self._cmd_LookupDict ):
return( self._cmd_LookupDict[CmdId] )
return( '' )
def __init__( self, command=None, dialect=SMB2_DIALECT_MIN ):
# Create an SMB2 message header object.
#
# Input:
# command - The command code; one of the SMB2_COM_* values.
# dialect - The dialect version under which this header is being
# created. This is contextual information; in future
# revisions we may need to expand the context data to
# include things like negotiated flag settings, etc.
# Errors:
# AssertionError - Thrown if the given command code is not a
# known code, or if the given dialect is not
# in the list of supported dialects.
# [ TypeError, - Either of these may be thrown if an input value
# ValueError ] cannot be converted into the expected type.
#
# Notes:
# Several SMB2 Header fields are overloaded. For example, the
# <Status> field is a four byte field at offset 8.
# * In the 2.0 and 2.1 dialects, this field MUST be zero in
# Request messages.
# * In the 3.x dalects, in a request message only, the same
# bytes are used for a 2-byte <ChannelSequence> field,
# followed by a 2-byte Reserved-must-be-zero field.
# * In SMB2/3 Response messages, the field is always the 4-byte
# <Status> field.
#
# Similarly, in an Async header the 8 bytes at offset 32 are used
# for the <AsyncId>. In a Sync header, the first four bytes are
# Reserved-must-be-zero, and the next four bytes are the TreeID.
#
self._protocolId = SMB2_MSG_PROTOCOL # 4 bytes
self._headerSize = SMB2_HDR_SIZE # 2 bytes
self._creditCharge = 0 # 2 bytes
self._status = 0 # 4 bytes -- <status> --
self._channelSeq = 0 # 2 bytes \ Same bytes
self._reserved1 = 0 # 2 bytes / as <status>
self.command = command # 2 bytes
self._creditReqResp = 0 # 2 bytes
self._flags = 0 # 4 bytes
self._nextCommand = 0 # 4 bytes
self._messageId = 0 # 8 bytes
self._reserved2 = 0 # 4 bytes \ Same bytes
self._treeId = 0 # 4 bytes / as <asyncId>
self._asyncId = 0 # 8 bytes -- <asyncId> --
self._sessionId = 0 # 8 bytes
self._signature = (16 * '\0') # 16 bytes
# 64 bytes total.
# Context information:
#
assert( dialect in SMB2_DIALECT_LIST ), "Unknown Dialect: %0x04X" % dialect
self._dialect = int( dialect )
@property
def creditCharge( self ):
"""Get/set the SMB2_Header.CreditCharge field value (USHORT).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to an <int>) is either negative or greater
than 0xFFFF.
- Thrown if the assigned value is non-zero and
the current dialect is SMBv2.0.2.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into an <int>.
Notes:
It is out of character to throw an exception based on the given
dialect level. This layer does minimal enforcement of
per-dialect syntax rules, generally allowing the caller to make
their own mess. You can, of course, still bypass the assertion
by setting <instance>._creditCharge directly.
"""
return( self._creditCharge )
@creditCharge.setter
def creditCharge( self, cc ):
cc = int( cc )
assert( 0 <= cc <= _USHORT_MAX ), "Assigned value (%d) out of range." % cc
assert( (cc == 0) or (self._dialect > SMB2_DIALECT_202) ), \
"Reserved; Value must be zero in SMBv2.0.2."
self._creditCharge = cc
@property
def status( self ):
"""Get/set the SMB2_Header.status field (ULONG).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative or greater
than 0xFFFFFFFF.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
Notes:
This field should only be set in response messages, and should
be considered "reserved; must be zero" in all requests.
Starting with SMBv3.0.0, this field is superceeded in request
messages by the 16-bit ChannelSequence field (plus an additional
16-bit Reserved field).
It is probably easiest to think of it this way:
- There is no <Status> field in request messages; it only exists
in response messages.
- If the dialect is less than 0x0300, then there is a 32-bit
"Reserved Must Be Zero" field where the <Status> field might
otherwise exist.
- If the dialect is 0x0300 or greater, then there is a 16-bit
<ChannelSequence> field followed by a 16-bit "Reserved Must Be
Zero" field where the <Status> might otherwise exist.
"""
return( self._status )
@status.setter
def status( self, st ):
st = 0L if( not st ) else long( st )
assert( 0 <= st <= _ULONG_MAX ), \
"Assigned value (0x%08X) out of range." % st
self._status = st
@property
def channelSeq( self ):
"""Get/set the Channel Sequence value (USHORT).
AssertionError - Thrown if the assigned value (after conversion
to an <int>) is either negative or greater
than 0xFFFF.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into an <int>.
Notes:
The ChannelSequence value is only recognized in request messages,
and only if the dialect is 0x0300 or greater. That is, this
field does not not exist in SMB2.x, only in SMB3.x. In all
responses, and in dialcts prior to 0x0300, the bytes of this
field are always seen as part of the Status field.
"""
return( self._channelSeq )
@channelSeq.setter
def channelSeq( self, cs ):
cs = int( cs )
assert( 0 <= cs <= _USHORT_MAX ), "Assigned value (%d) out of range." % cs
self._channelSeq = cs
@property
def command( self ):
"""Get/set the SMB2_Header.Command (UCHAR).
Errors: [ AssertionError, TypeError, ValueError ]
Thrown if the assigned value cannot be converted into a valid
SMB2 command code.
"""
return( self._command )
@command.setter
def command( self, cmd ):
cmd = int( cmd )
assert( 0 <= cmd <= 0x12 ), "Unknown command code: 0x%04X." % cmd
self._command = cmd
@property
def creditReqResp( self ):
"""Get/set the Credit Request / Credit Response value (USHORT).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to an <int>) is either negative or greater
than 0xFFFF.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into an <int>.
ToDo: Document how and when this is used; references.
The credit management subsystem needs study.
"""
return( self._creditReqResp )
@creditReqResp.setter
def creditReqResp( self, crr ):
crr = int( crr )
assert( 0 <= crr <= _USHORT_MAX ), \
"Assigned value (%d) out of range." % crr
self._creditReqResp = crr
@property
def flags( self ):
"""Get/set the Flags field (ULONG).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) has bits that are set which do not
represent a known SMB2+ flag.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._flags )
@flags.setter
def flags( self, flags ):
flgs = long( flags )
assert( flgs == (flgs & SMB2_FLAGS_MASK) ), "Unrecognized flag bit(s)."
self._flags = flgs
# Note: See below for per-flag get/set properties.
@property
def nextCommand( self ):
"""Get/set the Next Command offset value (ULONG).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative, or greater
than (2^32)-1.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._nextCommand )
@nextCommand.setter
def nextCommand( self, nextOffset ):
nc = long( nextOffset )
assert( 0 <= nc <= _ULONG_MAX ), \
"Invalid Related Command Offset: %d." % nc
self._nextCommand = nc
@property
def messageId( self ):
"""Get/set the Message ID value (UINT64).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative, or greater
than (2^64)-1.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._messageId )
@messageId.setter
def messageId( self, messageId ):
mi = long( messageId )
assert( 0 <= mi <= _UINT64_MAX ), \
"Assigned value (%d) out of range." % mi
self._messageId = mi
@property
def treeId( self ):
"""Get/set the Tree Connect ID (ULONG).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative or greater
than 0xFFFFFFFF.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._treeId )
@treeId.setter
def treeId( self, treeId ):
tid = long( treeId )
assert( 0 <= tid <= _ULONG_MAX ), \
"Assigned value (%d) out of range." % tid
self._treeId = tid
@property
def asyncId( self ):
"""Get/set the Async Id (UINT64).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative or greater
than (2^64)-1.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._asyncId )
@asyncId.setter
def asyncId( selfd, asyncId ):
ai = long( asyncId )
assert( 0 <= ai <= _UINT64_MAX ), \
"Assigned value (%d) out of range." % ai
self._asyncId = ai
@property
def sessionId( self ):
"""Get/set the Session Id (UINT64).
Errors:
AssertionError - Thrown if the assigned value (after conversion
to a <long>) is either negative or greater
than (2^64)-1.
[ TypeError, - Either of these may be thrown if the assigned
ValueError ] value cannot be converted into a <long>.
"""
return( self._sessionId )
@sessionId.setter
def sessionId( self, sessionId ):
si = long( sessionId )
assert( 0 <= si <= _UINT64_MAX ), \
"Assigned value (%d) out of range." % si
self._sessionId = si
@property
def signature( self ):
"""Get/set the packet signature.
Errors:
AssertionError - Thrown if the string representation of the
assigned value is not exactly 16 bytes.
SyntaxError - Thrown if the assigned value is not of type
<str> and cannot be converted to type <str>.
"""
return( self._signature )
@signature.setter
def signature( self, signature ):
sig = str( signature )
assert( 16 == len( sig ) ), "Exactly 16 bytes required."
self._signature = sig
# Flag bitfield properties.
# _flag[S|G]et() generically handles getting and setting of
# individual flag bits.
def _flagGet( self, flag ):
return( bool( flag & self._flags ) )
def _flagSet( self, flag, bitState ):
if( bitState ):
self._flags |= flag
else:
self._flags &= ~flag
@property
def flagReply( self ):
"""Get/set the SMB2_FLAGS_SERVER_TO_REDIR (Reply) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_SERVER_TO_REDIR ) )
@flagReply.setter
def flagReply( self, bitState ):
self._flagSet( SMB2_FLAGS_SERVER_TO_REDIR, bitState )
@property
def flagAsync( self ):
"""Get/set the SMB2_FLAGS_ASYNC_COMMAND (Async) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_ASYNC_COMMAND ) )
@flagAsync.setter
def flagAsync( self, bitState ):
self._flagSet( SMB2_FLAGS_ASYNC_COMMAND, bitState )
@property
def flagNext( self ):
"""Get/set the SMB2_FLAGS_RELATED_OPERATIONS (Next) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_RELATED_OPERATIONS ) )
@flagNext.setter
def flagNext( self, bitState ):
self._flagSet( SMB2_FLAGS_RELATED_OPERATIONS, bitState )
@property
def flagSigned( self ):
"""Get/set the SMB2_FLAGS_SIGNED (Signed) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_SIGNED ) )
@flagSigned.setter
def flagSigned( self, bitState ):
self._flagSet( SMB2_FLAGS_SIGNED, bitState )
@property
def flagDFS( self ):
"""Get/set the SMB2_FLAGS_DFS_OPERATIONS (DFS) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_DFS_OPERATIONS ) )
@flagDFS.setter
def flagDFS( self, bitState ):
self._flagSet( SMB2_FLAGS_DFS_OPERATIONS, bitState )
@property
def flagReplay( self ):
"""Get/set the SMB2_FLAGS_REPLAY_OPERATION (Replay) bit.
The assigned value is evaluated as a boolean:
True = set the bit; False = clear it.
"""
return( self._flagGet( SMB2_FLAGS_REPLAY_OPERATION ) )
@flagReplay.setter
def flagReplay( self, bitState ):
self._flagSet( SMB2_FLAGS_REPLAY_OPERATION, bitState )
@property
def flagPriority( self ):
"""Get/set the SMBv3.1.1+ Priority subfield.
This value is actually a 3-bit integer (in the range 0..7).
Errors:
ValueError - Thrown if the assigned value is outside of the
valid range.
"""
return( (self._flags & SMB2_FLAGS_PRIORITY_MASK) >> 4 )
@flagPriority.setter
def flagPriority( self, prioVal ):
if( prioVal not in range( 8 ) ):
raise ValueError( "Assigned value (%d) out of range." % prioVal )
self._flags &= ~SMB2_FLAGS_PRIORITY_MASK
self._flags |= (prioVal << 4)
def dump( self, indent=0 ):
# Produce a nicely formatted dump of the SMB2 header.
#
# Input:
# indent - Number of spaces to indent the formatted output.
#
# Output: A string, presentng the formatted SMB2 header fields.
#
# Notes: If the message is a request and the dialect is at least
# 0x0300, the ChannelSequence (and a Reserved field) will
# replace the Status field (which would otherwise go unused
# in a request). This is a protocol modification introduced
# with the 3.0 dialect.
#
ind = ' ' * indent
cmdName = self.commandName( self._command )
cmdName = "<unknown>" if( not cmdName ) else cmdName
statName = NTStatus( self._status )
statName = "\n" if( statName is None ) else " [%s]\n" % statName.name
# Stuff...
s = ind + "ProtocolId...: %s\n" % hexstr( self._protocolId[:4] )
s += ind + "StructureSize: 0x{0:04X} ({0:d})\n".format( self._headerSize )
s += ind + "CreditCharge.: 0x{0:04X} ({0:d})\n".format( self._creditCharge )
# Status/Reserved1
if( self.flagReply or self._dialect < SMB2_DIALECT_300 ):
s += ind + "Status.......: 0x{0:08X}".format( self._status ) + statName
else:
s += ind + "ChannelSeq...: 0x{0:04X} ({0:d})\n".format( self._channelSeq )
s += ind + "Reserved1....: 0x{0:04X} ({0:d})\n".format( self._reserved1 )
# More stuff...
s += ind + "Command......: 0x{0:02X} ({0:d})".format( self._command ) \
+ " [{0:s}]\n".format( self.commandName( self._command ) )
s += ind + "CreditReqResp: 0x{0:04X} ({0:d})\n".format( self.creditReqResp )
s += ind + "Flags........: 0x{0:08X} ({0:d})\n".format( self._flags )
# Flag subfields.
s += ind + " Response.....: %s\n" % self.flagReply
s += ind + " Async........: %s\n" % self.flagAsync
s += ind + " Related Op...: %s\n" % self.flagNext
s += ind + " Signed.......: %s\n" % self.flagSigned
if( self._dialect >= SMB2_DIALECT_311 ):
s += ind + " Priority.....: {0:d}\n".format( self.flagPriority )
s += ind + " DFS Operation: %s\n" % self.flagDFS
s += ind + " SMB3.x Replay: %s\n" % self.flagReplay
# Yet more stuff...
s += ind + "NextCommand..: 0x{0:08X} ({0:d})\n".format( self._nextCommand )
s += ind + "MessageId....: 0x{0:016X} ({0:d})\n".format( self._messageId )
# AsyncId/Reserved2+TreeId
if( self.flagAsync ):
s += ind + "AsyncId......: 0x{0:016X} ({0:d})\n".format( self._asyncId )
else:
s += ind + "Reserved2....: 0x{0:08X} ({0:d})\n".format( self._reserved2 )
s += ind + "TreeId.......: 0x{0:08X} ({0:d})\n".format( self._treeId )
# SessionId and Signature
s += ind + "SessionId....: 0x{0:016X} ({0:d})\n".format( self._sessionId )
s += ind + "Signature....: ["
tmp = (16 + indent)
s += ('\n' + (' ' * tmp)).join( hexstrchop( self._signature, 32 ) ) + "]\n"
return( s )
def compose( self ):
# Marshall the SMB2 header fields into a stream of bytes.
#
# Output: A string of bytes; the wire format of the SMB2 header.
#
# Notes: It's probably okay if the dialect version isn't
# specified. The default values of <channelSeq> and
# <reserved1> are zero, so the encoded format would be
# zero for either interpretation.
#
if( self.flagReply or (self._dialect < 0x0300) ):
# Bytes 8..11 are <status>
if( self.flagAsync ):
# Bytes 32..39 are <async>
msg = self._format_SMB2_StatAsync.pack( self._protocolId,
self._headerSize,
self._creditCharge,
self._status,
self._command,
self._creditReqResp,
self._flags,
self._nextCommand,
self._messageId,
self._asyncId,
self._sessionId,
self._signature )
else:
# Bytes 32..39 are <reserved2>/<treeId>
msg = self._format_SMB2_StatTreeId.pack( self._protocolId,
self._headerSize,
self._creditCharge,
self._status,
self._command,
self._creditReqResp,
self._flags,
self._nextCommand,
self._messageId,
self._reserved2,
self._treeId,
self._sessionId,
self._signature )
else:
# Bytes 8..11 are <channelSeq>/<reserved1>
if( self.flagAsync ):
# Bytes 32..39 are <async>
msg = self._format_SMB2_cSeqAsync.pack( self._protocolId,
self._headerSize,
self._creditCharge,
self._channelSeq,
self._reserved1,
self._command,
self._creditReqResp,
self._flags,
self._nextCommand,
self._messageId,
self._asyncId,
self._sessionId,
self._signature )
else:
# Bytes 32..39 are <reserved2>/<treeId>
msg = self._format_SMB2_cSeqTreeId.pack( self._protocolId,
self._headerSize,
self._creditCharge,
self._channelSeq,
self._reserved1,
self._command,
self._creditReqResp,
self._flags,
self._nextCommand,
self._messageId,
self._reserved2,
self._treeId,
self._sessionId,
self._signature )
return( msg )
# Unit Tests ----------------------------------------------------------------- #
#
def _unit_test():
# Module unit tests.
#
"""
Doctest:
>>> _unit_test()
Success
"""
if( __debug__ ):
# 1.Baseline test.
# Just verify that we can store and retrieve the basic attributes
# of an _SMB2_Header object.
#
hdr = _SMB2_Header( SMB2_COM_LOGOFF, SMB2_DIALECT_302 )
hdr.creditCharge = 213
hdr.channelSeq = 42607
hdr.creditReqResp = 42
hdr.flagReply = False
hdr.flagAsync = False
hdr.flagNext = False
hdr.flagSigned = False
hdr.flagPriority = 5
hdr.flagDFS = True
hdr.flagReplay = False
hdr.nextCommand = 0x87654321
hdr.messageId = _SMB2_Header._format_Q.unpack( "Fooberry" )[0]
hdr.treeId = 0xBEADED
hdr.sessionId = _SMB2_Header._format_Q.unpack( "Icecream" )[0]
hdr.signature = "Reginald".center( 16 )
# Create a header dump, compose a message, then parse the message.
dmp0 = hdr.dump()
msg = hdr.compose()
hdr = _SMB2_Header.parseMsg( msg, SMB2_DIALECT_302 )
# Dump the newly reparsed header, and compare against the original.
dmp1 = hdr.dump()
if( dmp0 != dmp1 ):
print "Failure: Reparsing a composed header resulted in differences."
print "As composed:\n", dmp0
print "As parsed:\n", dmp1
return
# 2.Add additional tests hereafter.
# Bottom line.
print "Success"
# ============================================================================ #
# Reginald fidgeted uneasily in his seat. "I realize", he said, pensively,
# "that I do have unusually large dorsal fins, for a carrot".
# ============================================================================ #
| ubiqx-org/Carnaval | carnaval/smb/SMB2_Header.py | Python | agpl-3.0 | 40,520 |
import numpy
import os
from PyML.utils import misc
from baseClassifiers import Classifier
from composite import CompositeClassifier
from PyML.evaluators import assess
from PyML.containers.labels import oneAgainstRest
"""classes for multi-class classification"""
__docformat__ = "restructuredtext en"
class OneAgainstOne (CompositeClassifier) :
'''One-against-one Multi-class classification
using a two class classifier.
For a k class problem k(k-1) binary classes are trained for all
pairs of classes; an instance is classified to the class that
receives the highest number of votes; an instance is constructed
using a classifier that is used as a template for constructing
the actual classifiers.
'''
def train(self, data, **args) :
'''train k(k-1)/2 classifiers'''
Classifier.train(self, data, **args)
numClasses = self.labels.numClasses
if numClasses <= 2:
raise ValueError, 'Not a multi class problem'
self.classifiers = misc.matrix((numClasses, numClasses))
for i in range(numClasses - 1) :
for j in range(i+1, numClasses) :
self.classifiers[i][j] = self.classifier.__class__(self.classifier)
dataij=data.__class__(data, deepcopy = self.classifier.deepcopy,
classID = [i,j])
self.classifiers[i][j].train(dataij)
self.log.trainingTime = self.getTrainingTime()
def classify(self, data, p):
'''Suppose that x is classified to class c, then the margin is
defined as the minimum margin found against the k-1 other classes
'''
numClasses = self.labels.numClasses
r = numpy.zeros((numClasses, numClasses),numpy.float_)
vote = numpy.zeros(numClasses)
for i in range(numClasses - 1) :
for j in range(i+1, numClasses) :
r[i][j] = self.classifiers[i][j].decisionFunc(data, p)
# afterwards I take the minimum or r, so assign:
r[j][i] = r[i][j]
if r[i][j] > 0 :
vote[j] += 1
else:
vote[i] += 1
maxvote = numpy.argmax(vote)
return maxvote, numpy.min(numpy.absolute(r[maxvote]))
def preproject(self, data) :
for i in range(self.labels.numClasses-1):
for j in range(i+1, self.labels.numClasses):
self.classifiers[i][j].preproject(data)
test = assess.test
class OneAgainstRest (CompositeClassifier) :
'''A one-against-the-rest multi-class classifier'''
def train(self, data, **args) :
'''train k classifiers'''
Classifier.train(self, data, **args)
numClasses = self.labels.numClasses
if numClasses <= 2:
raise ValueError, 'Not a multi class problem'
self.classifiers = [self.classifier.__class__(self.classifier)
for i in range(numClasses)]
for i in range(numClasses) :
# make a copy of the data; this is done in case the classifier modifies the data
datai = data.__class__(data, deepcopy = self.classifier.deepcopy)
datai = oneAgainstRest(datai, data.labels.classLabels[i])
self.classifiers[i].train(datai)
self.log.trainingTime = self.getTrainingTime()
def classify(self, data, i) :
r = numpy.zeros(self.labels.numClasses, numpy.float_)
for j in range(self.labels.numClasses) :
r[j] = self.classifiers[j].decisionFunc(data, i)
return numpy.argmax(r), numpy.max(r)
def preproject(self, data) :
for i in range(self.labels.numClasses) :
self.classifiers[i].preproject(data)
test = assess.test
def save(self, fileName) :
"""save the trained classifier to a file.
assumes the classifier is an SVM"""
file_handle = open(fileName, 'w')
for classifier in self.classifiers :
classifier.save(file_handle)
def load(self, fileName, data) :
"""load a trained classifier from a file. Also provide the data on which
the classifier was trained. It assumes the underlying binary classifier is
an SVM"""
from PyML import svm
Classifier.train(self, data)
file_handle = open(fileName)
numClasses = self.labels.numClasses
self.classifiers = [self.classifier.__class__(self.classifier)
for i in range(numClasses)]
for i in range(numClasses) :
datai = data.__class__(data, deepcopy = self.classifier.deepcopy)
datai = oneAgainstRest(datai, data.labels.classLabels[i])
self.classifiers[i] = svm.loadSVM(file_handle, datai)
def allOneAgainstRest(classifier, data, resultsFile, numFolds = 5, minClassSize = 8) :
import myio
labels = data.labels
results = {}
if os.path.exists(resultsFile) :
results = assess.loadResults(resultsFile)
for label in labels.classLabels :
if label in results : continue
if (minClassSize is not None and
labels.classSize[labels.classDict[label]] < minClassSize) : continue
myio.log('class: ' + label + '\n')
data = oneAgainstRest(data, label)
try :
results[label] = classifier.stratifiedCV(data, numFolds)
except :
results[label] = None
assess.saveResultObjects(results, resultsFile)
data.attachLabels(labels)
| cathywu/Sentiment-Analysis | PyML-0.7.9/PyML/classifiers/multi.py | Python | gpl-2.0 | 5,608 |
# (c) 2014, James Tanner <tanner.jc@gmail.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# ansible-vault is a script that encrypts/decrypts YAML files. See
# http://docs.ansible.com/playbooks_vault.html for more details.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import traceback
import textwrap
from ansible.compat.six import iteritems, string_types
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleOptionsError
from ansible.plugins import module_loader, action_loader
from ansible.cli import CLI
from ansible.utils import module_docs
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class DocCLI(CLI):
""" Vault command line class """
def __init__(self, args):
super(DocCLI, self).__init__(args)
self.module_list = []
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [options] [module...]',
epilog='Show Ansible module documentation',
module_opts=True,
)
self.parser.add_option("-l", "--list", action="store_true", default=False, dest='list_dir',
help='List available modules')
self.parser.add_option("-s", "--snippet", action="store_true", default=False, dest='show_snippet',
help='Show playbook snippet for specified module(s)')
super(DocCLI, self).parse()
display.verbosity = self.options.verbosity
def run(self):
super(DocCLI, self).run()
if self.options.module_path is not None:
for i in self.options.module_path.split(os.pathsep):
module_loader.add_directory(i)
# list modules
if self.options.list_dir:
paths = module_loader._get_paths()
for path in paths:
self.find_modules(path)
self.pager(self.get_module_list_text())
return 0
if len(self.args) == 0:
raise AnsibleOptionsError("Incorrect options passed")
# process command line module list
text = ''
for module in self.args:
try:
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
display.warning("module %s not found in %s\n" % (module, DocCLI.print_paths(module_loader)))
continue
if any(filename.endswith(x) for x in C.BLACKLIST_EXTS):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename, verbose=(self.options.verbosity > 0))
except:
display.vvv(traceback.format_exc())
display.error("module %s has a documentation error formatting or is missing documentation\nTo see exact traceback use -vvv" % module)
continue
if doc is not None:
# is there corresponding action plugin?
if module in action_loader:
doc['action'] = True
else:
doc['action'] = False
all_keys = []
for (k,v) in iteritems(doc['options']):
all_keys.append(k)
all_keys = sorted(all_keys)
doc['option_keys'] = all_keys
doc['filename'] = filename
doc['docuri'] = doc['module'].replace('_', '-')
doc['now_date'] = datetime.date.today().strftime('%Y-%m-%d')
doc['plainexamples'] = plainexamples
doc['returndocs'] = returndocs
if self.options.show_snippet:
text += self.get_snippet_text(doc)
else:
text += self.get_man_text(doc)
else:
# this typically means we couldn't even parse the docstring, not just that the YAML is busted,
# probably a quoting issue.
raise AnsibleError("Parsing produced an empty object.")
except Exception as e:
display.vvv(traceback.format_exc())
raise AnsibleError("module %s missing documentation (or could not parse documentation): %s\n" % (module, str(e)))
if text:
self.pager(text)
return 0
def find_modules(self, path):
if os.path.isdir(path):
for module in os.listdir(path):
if module.startswith('.'):
continue
elif os.path.isdir(module):
self.find_modules(module)
elif any(module.endswith(x) for x in C.BLACKLIST_EXTS):
continue
elif module.startswith('__'):
continue
elif module in C.IGNORE_FILES:
continue
elif module.startswith('_'):
fullpath = '/'.join([path,module])
if os.path.islink(fullpath): # avoids aliases
continue
module = os.path.splitext(module)[0] # removes the extension
self.module_list.append(module)
def get_module_list_text(self):
columns = display.columns
displace = max(len(x) for x in self.module_list)
linelimit = columns - displace - 5
text = []
deprecated = []
for module in sorted(set(self.module_list)):
if module in module_docs.BLACKLIST_MODULES:
continue
# if the module lives in a non-python file (eg, win_X.ps1), require the corresponding python file for docs
filename = module_loader.find_plugin(module, mod_type='.py')
if filename is None:
continue
if filename.endswith(".ps1"):
continue
if os.path.isdir(filename):
continue
try:
doc, plainexamples, returndocs = module_docs.get_docstring(filename)
desc = self.tty_ify(doc.get('short_description', '?')).strip()
if len(desc) > linelimit:
desc = desc[:linelimit] + '...'
if module.startswith('_'): # Handle deprecated
deprecated.append("%-*s %-*.*s" % (displace, module[1:], linelimit, len(desc), desc))
else:
text.append("%-*s %-*.*s" % (displace, module, linelimit, len(desc), desc))
except:
raise AnsibleError("module %s has a documentation error formatting or is missing documentation\n" % module)
if len(deprecated) > 0:
text.append("\nDEPRECATED:")
text.extend(deprecated)
return "\n".join(text)
@staticmethod
def print_paths(finder):
''' Returns a string suitable for printing of the search path '''
# Uses a list to get the order right
ret = []
for i in finder._get_paths():
if i not in ret:
ret.append(i)
return os.pathsep.join(ret)
def get_snippet_text(self, doc):
text = []
desc = CLI.tty_ify(doc['short_description'])
text.append("- name: %s" % (desc))
text.append(" action: %s" % (doc['module']))
pad = 31
subdent = " " * pad
limit = display.columns - pad
for o in sorted(doc['options'].keys()):
opt = doc['options'][o]
desc = CLI.tty_ify(" ".join(opt['description']))
required = opt.get('required', False)
if not isinstance(required, bool):
raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
s = o + "="
else:
s = o
text.append(" %-20s # %s" % (s, textwrap.fill(desc, limit, subsequent_indent=subdent)))
text.append('')
return "\n".join(text)
def get_man_text(self, doc):
opt_indent=" "
text = []
text.append("> %s\n" % doc['module'].upper())
pad = display.columns * 0.20
limit = max(display.columns - int(pad), 70)
if isinstance(doc['description'], list):
desc = " ".join(doc['description'])
else:
desc = doc['description']
text.append("%s\n" % textwrap.fill(CLI.tty_ify(desc), limit, initial_indent=" ", subsequent_indent=" "))
if 'deprecated' in doc and doc['deprecated'] is not None and len(doc['deprecated']) > 0:
text.append("DEPRECATED: \n%s\n" % doc['deprecated'])
if 'action' in doc and doc['action']:
text.append(" * note: %s\n" % "This module has a corresponding action plugin.")
if 'option_keys' in doc and len(doc['option_keys']) > 0:
text.append("Options (= is mandatory):\n")
for o in sorted(doc['option_keys']):
opt = doc['options'][o]
required = opt.get('required', False)
if not isinstance(required, bool):
raise("Incorrect value for 'Required', a boolean is needed.: %s" % required)
if required:
opt_leadin = "="
else:
opt_leadin = "-"
text.append("%s %s" % (opt_leadin, o))
if isinstance(opt['description'], list):
for entry in opt['description']:
text.append(textwrap.fill(CLI.tty_ify(entry), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
else:
text.append(textwrap.fill(CLI.tty_ify(opt['description']), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
choices = ''
if 'choices' in opt:
choices = "(Choices: " + ", ".join(str(i) for i in opt['choices']) + ")"
default = ''
if 'default' in opt or not required:
default = "[Default: " + str(opt.get('default', '(null)')) + "]"
text.append(textwrap.fill(CLI.tty_ify(choices + default), limit, initial_indent=opt_indent, subsequent_indent=opt_indent))
if 'notes' in doc and doc['notes'] and len(doc['notes']) > 0:
text.append("Notes:")
for note in doc['notes']:
text.append(textwrap.fill(CLI.tty_ify(note), limit-6, initial_indent=" * ", subsequent_indent=opt_indent))
if 'requirements' in doc and doc['requirements'] is not None and len(doc['requirements']) > 0:
req = ", ".join(doc['requirements'])
text.append("Requirements:%s\n" % textwrap.fill(CLI.tty_ify(req), limit-16, initial_indent=" ", subsequent_indent=opt_indent))
if 'examples' in doc and len(doc['examples']) > 0:
text.append("Example%s:\n" % ('' if len(doc['examples']) < 2 else 's'))
for ex in doc['examples']:
text.append("%s\n" % (ex['code']))
if 'plainexamples' in doc and doc['plainexamples'] is not None:
text.append("EXAMPLES:")
text.append(doc['plainexamples'])
if 'returndocs' in doc and doc['returndocs'] is not None:
text.append("RETURN VALUES:")
text.append(doc['returndocs'])
text.append('')
maintainers = set()
if 'author' in doc:
if isinstance(doc['author'], string_types):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
if 'maintainers' in doc:
if isinstance(doc['maintainers'], string_types):
maintainers.add(doc['author'])
else:
maintainers.update(doc['author'])
text.append('MAINTAINERS: ' + ', '.join(maintainers))
text.append('')
return "\n".join(text)
| wenottingham/ansible | lib/ansible/cli/doc.py | Python | gpl-3.0 | 12,788 |
# -*- coding: utf-8 -*-
import sys
import os
from recommonmark.parser import CommonMarkParser
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Frank'
copyright = u'2017, Kyle Fuller'
author = u'Kyle Fuller'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
html_sidebars = {
'index': ['sidebar_intro.html', 'searchbox.html'],
'**': ['sidebar_intro.html', 'localtoc.html', 'relations.html', 'searchbox.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
html_show_sourcelink = True
html_show_sphinx = False
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Frankdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Frank.tex', u'Frank Documentation',
u'Kyle Fuller', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'frank', u'Frank Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Frank', u'Frank Documentation',
author, 'Frank', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| nestproject/Frank | docs/conf.py | Python | bsd-2-clause | 8,306 |
# -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# Standard library imports
from __future__ import print_function
from collections import OrderedDict
import time
# Third party imports
from qtpy.QtCore import QObject, QTimer, Signal
from qtpy.QtWidgets import QApplication
# Local imports
from spyder import dependencies
from spyder.config.base import _, DEBUG, debug_print, get_conf_path
from spyder.utils import sourcecode
from spyder.utils.introspection.plugin_client import PluginClient
from spyder.utils.introspection.utils import CodeInfo
PLUGINS = ['rope', 'jedi', 'fallback']
LOG_FILENAME = get_conf_path('introspection.log')
DEBUG_EDITOR = DEBUG >= 3
LEAD_TIME_SEC = 0.25
ROPE_REQVER = '>=0.9.4'
dependencies.add('rope',
_("Editor's code completion, go-to-definition and help"),
required_version=ROPE_REQVER)
JEDI_REQVER = '=0.9.0'
dependencies.add('jedi',
_("Editor's code completion, go-to-definition and help"),
required_version=JEDI_REQVER)
class PluginManager(QObject):
introspection_complete = Signal(object)
def __init__(self, executable):
super(PluginManager, self).__init__()
plugins = OrderedDict()
for name in PLUGINS:
try:
plugin = PluginClient(name, executable)
plugin.run()
except Exception as e:
debug_print('Introspection Plugin Failed: %s' % name)
debug_print(str(e))
continue
debug_print('Introspection Plugin Loaded: %s' % name)
plugins[name] = plugin
plugin.received.connect(self.handle_response)
self.plugins = plugins
self.timer = QTimer()
self.desired = []
self.ids = dict()
self.info = None
self.request = None
self.pending = None
self.pending_request = None
self.waiting = False
def send_request(self, info):
"""Handle an incoming request from the user."""
if self.waiting:
if info.serialize() != self.info.serialize():
self.pending_request = info
else:
debug_print('skipping duplicate request')
return
debug_print('%s request' % info.name)
desired = None
self.info = info
editor = info.editor
if (info.name == 'completion' and 'jedi' not in self.plugins and
info.line.lstrip().startswith(('import ', 'from '))):
desired = 'fallback'
if ((not editor.is_python_like()) or
sourcecode.is_keyword(info.obj) or
(editor.in_comment_or_string() and info.name != 'info')):
desired = 'fallback'
plugins = self.plugins.values()
if desired:
plugins = [self.plugins[desired]]
self.desired = [desired]
elif (info.name == 'definition' and not info.editor.is_python() or
info.name == 'info'):
self.desired = list(self.plugins.keys())
else:
# Use all but the fallback
plugins = list(self.plugins.values())[:-1]
self.desired = list(self.plugins.keys())[:-1]
self._start_time = time.time()
self.waiting = True
method = 'get_%s' % info.name
value = info.serialize()
self.ids = dict()
for plugin in plugins:
request_id = plugin.request(method, value)
self.ids[request_id] = plugin.name
self.timer.stop()
self.timer.singleShot(LEAD_TIME_SEC * 1000, self._handle_timeout)
def validate(self):
for plugin in self.plugins.values():
plugin.request('validate')
def handle_response(self, response):
name = self.ids.get(response['request_id'], None)
if not name:
return
if response.get('error', None):
debug_print('Response error:', response['error'])
return
if name == self.desired[0] or not self.waiting:
if response.get('result', None):
self._finalize(response)
else:
self.pending = response
def close(self):
[plugin.close() for plugin in self.plugins]
def _finalize(self, response):
self.waiting = False
self.pending = None
if self.info:
delta = time.time() - self._start_time
debug_print('%s request from %s finished: "%s" in %.1f sec'
% (self.info.name, response['name'],
str(response['result'])[:100], delta))
response['info'] = self.info
self.introspection_complete.emit(response)
self.info = None
if self.pending_request:
info = self.pending_request
self.pending_request = None
self.send_request(info)
def _handle_timeout(self):
self.waiting = False
if self.pending:
self._finalize(self.pending)
else:
debug_print('No valid responses acquired')
class IntrospectionManager(QObject):
send_to_help = Signal(str, str, str, str, bool)
edit_goto = Signal(str, int, str)
def __init__(self, executable=None):
super(IntrospectionManager, self).__init__()
self.editor_widget = None
self.pending = None
self.plugin_manager = PluginManager(executable)
self.plugin_manager.introspection_complete.connect(
self._introspection_complete)
def change_executable(self, executable):
self.plugin_manager.close()
self.plugin_manager = PluginManager(executable)
self.plugin_manager.introspection_complete.connect(
self._introspection_complete)
def set_editor_widget(self, editor_widget):
self.editor_widget = editor_widget
def _get_code_info(self, name, position=None, **kwargs):
editor = self.editor_widget.get_current_editor()
finfo = self.editor_widget.get_current_finfo()
in_comment_or_string = editor.in_comment_or_string()
if position is None:
position = editor.get_position('cursor')
kwargs['editor'] = editor
kwargs['finfo'] = finfo
kwargs['editor_widget'] = self.editor_widget
return CodeInfo(name, finfo.get_source_code(), position,
finfo.filename, editor.is_python_like, in_comment_or_string,
**kwargs)
def get_completions(self, automatic):
"""Get code completion"""
info = self._get_code_info('completions', automatic=automatic)
self.plugin_manager.send_request(info)
def go_to_definition(self, position):
"""Go to definition"""
info = self._get_code_info('definition', position)
self.plugin_manager.send_request(info)
def show_object_info(self, position, auto=True):
"""Show signature calltip and/or docstring in the Help plugin"""
# auto is True means that this method was called automatically,
# i.e. the user has just entered an opening parenthesis -- in that
# case, we don't want to force Help to be visible, to avoid polluting
# the window layout
info = self._get_code_info('info', position, auto=auto)
self.plugin_manager.send_request(info)
def validate(self):
"""Validate the plugins"""
self.plugin_manager.validate()
def is_editor_ready(self):
"""Check if the main app is starting up"""
if self.editor_widget:
window = self.editor_widget.window()
if hasattr(window, 'is_starting_up') and not window.is_starting_up:
return True
def _introspection_complete(self, response):
"""
Handle an introspection response completion.
Route the response to the correct handler.
"""
result = response.get('result', None)
if result is None:
return
info = response['info']
current = self._get_code_info(response['info']['name'])
if result and current.filename == info.filename:
func = getattr(self, '_handle_%s_result' % info.name)
try:
func(result, current, info)
except Exception as e:
debug_print(e)
def _handle_completions_result(self, comp_list, info, prev_info):
"""
Handle a `completions` result.
Only handle the response if we are on the same line of text and
on the same `obj` as the original request.
"""
if info.line_num != prev_info.line_num:
return
completion_text = info.obj
prev_text = prev_info.obj
if prev_info.obj is None:
completion_text = ''
prev_text = ''
if not completion_text.startswith(prev_text):
return
if info.full_obj and len(info.full_obj) > len(info.obj):
new_list = [(c, t) for (c, t) in comp_list
if c.startswith(info.full_obj)]
if new_list:
pos = info.editor.get_position('cursor')
new_pos = pos + len(info.full_obj) - len(info.obj)
info.editor.set_cursor_position(new_pos)
completion_text = info.full_obj
comp_list = new_list
if '.' in completion_text:
completion_text = completion_text.split('.')[-1]
comp_list = [(c.split('.')[-1], t) for (c, t) in comp_list]
comp_list = [(c, t) for (c, t) in comp_list
if c.startswith(completion_text)]
info.editor.show_completion_list(comp_list, completion_text,
prev_info.automatic)
def _handle_info_result(self, resp, info, prev_info):
"""
Handle an `info` result, triggering a calltip and/or docstring.
Only handle the response if we are on the same line of text as
when the request was initiated.
"""
if info.line_num != prev_info.line_num:
return
if resp['calltip']:
info.editor.show_calltip('Arguments', resp['calltip'],
signature=True,
at_position=prev_info.position)
if resp['name']:
self.send_to_help.emit(
resp['name'], resp['argspec'],
resp['note'], resp['docstring'],
not prev_info.auto)
def _handle_definition_result(self, resp, info, prev_info):
"""Handle a `definition` result"""
fname, lineno = resp
self.edit_goto.emit(fname, lineno, "")
def _post_message(self, message, timeout=60000):
"""
Post a message to the main window status bar with a timeout in ms
"""
if self.editor_widget:
try:
statusbar = self.editor_widget.window().statusBar()
statusbar.showMessage(message, timeout)
QApplication.processEvents()
except AttributeError:
pass
class IntrospectionPlugin(object):
def load_plugin(self):
"""Initialize the plugin"""
pass
def get_completions(self, info):
"""Get a list of completions"""
pass
def get_info(self, info):
"""
Find the calltip and docs
Returns a dict like the following:
{'note': 'Function of numpy.core.numeric...',
'argspec': "(shape, dtype=None, order='C')'
'docstring': 'Return an array of given...'
'name': 'ones',
'calltip': 'ones(shape, dtype=None, order='C')'}
"""
pass
def get_definition(self, info):
"""Get a (filename, line_num) location for a definition"""
pass
def validate(self):
"""Validate the plugin"""
pass
| bgris/ODL_bgris | lib/python3.5/site-packages/spyder/utils/introspection/manager.py | Python | gpl-3.0 | 12,359 |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUL2DomainsFetcher
from .fetchers import NUMACFilterProfilesFetcher
from .fetchers import NUSAPEgressQoSProfilesFetcher
from .fetchers import NUSAPIngressQoSProfilesFetcher
from .fetchers import NUGatewaySecuritiesFetcher
from .fetchers import NUPATNATPoolsFetcher
from .fetchers import NUDeploymentFailuresFetcher
from .fetchers import NUPermissionsFetcher
from .fetchers import NUWANServicesFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUEgressProfilesFetcher
from .fetchers import NUAlarmsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUInfrastructureConfigsFetcher
from .fetchers import NUIngressProfilesFetcher
from .fetchers import NUEnterprisePermissionsFetcher
from .fetchers import NUJobsFetcher
from .fetchers import NULocationsFetcher
from .fetchers import NUDomainsFetcher
from .fetchers import NUBootstrapsFetcher
from .fetchers import NUBootstrapActivationsFetcher
from .fetchers import NUPortsFetcher
from .fetchers import NUIPFilterProfilesFetcher
from .fetchers import NUIPv6FilterProfilesFetcher
from .fetchers import NUSubnetsFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUGateway(NURESTObject):
""" Represents a Gateway in the VSD
Notes:
Represents Gateway object.
"""
__rest_name__ = "gateway"
__resource_name__ = "gateways"
## Constants
CONST_FAMILY_NSG_C = "NSG_C"
CONST_PERMITTED_ACTION_ALL = "ALL"
CONST_FAMILY_NSG_E = "NSG_E"
CONST_PERSONALITY_EVDF = "EVDF"
CONST_PERSONALITY_NUAGE_210_WBX_32_Q = "NUAGE_210_WBX_32_Q"
CONST_ZFB_MATCH_ATTRIBUTE_MAC_ADDRESS = "MAC_ADDRESS"
CONST_FAMILY_NSG_V = "NSG_V"
CONST_VENDOR_CISCO = "CISCO"
CONST_BOOTSTRAP_STATUS_ACTIVE = "ACTIVE"
CONST_FAMILY_NSG_X = "NSG_X"
CONST_ZFB_MATCH_ATTRIBUTE_IP_ADDRESS = "IP_ADDRESS"
CONST_FAMILY_VRS = "VRS"
CONST_FAMILY_NSG_E200 = "NSG_E200"
CONST_BOOTSTRAP_STATUS_NOTIFICATION_APP_REQ_SENT = "NOTIFICATION_APP_REQ_SENT"
CONST_PERSONALITY_EVDFB = "EVDFB"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_PERSONALITY_OTHER = "OTHER"
CONST_ZFB_MATCH_ATTRIBUTE_HOSTNAME = "HOSTNAME"
CONST_PERSONALITY_VDFG = "VDFG"
CONST_BOOTSTRAP_STATUS_NOTIFICATION_APP_REQ_ACK = "NOTIFICATION_APP_REQ_ACK"
CONST_PERMITTED_ACTION_EXTEND = "EXTEND"
CONST_PERMITTED_ACTION_INSTANTIATE = "INSTANTIATE"
CONST_PERSONALITY_DC7X50 = "DC7X50"
CONST_BOOTSTRAP_STATUS_CERTIFICATE_SIGNED = "CERTIFICATE_SIGNED"
CONST_FAMILY_NSG_AZ = "NSG_AZ"
CONST_FAMILY_ANY = "ANY"
CONST_ZFB_MATCH_ATTRIBUTE_NONE = "NONE"
CONST_PERSONALITY_VSA = "VSA"
CONST_PERSONALITY_VSG = "VSG"
CONST_PERMITTED_ACTION_READ = "READ"
CONST_PERSONALITY_VRSB = "VRSB"
CONST_PERMITTED_ACTION_USE = "USE"
CONST_PERSONALITY_NETCONF_7X50 = "NETCONF_7X50"
CONST_PERSONALITY_NUAGE_210_WBX_48_S = "NUAGE_210_WBX_48_S"
CONST_FAMILY_NSG_X200 = "NSG_X200"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_FAMILY_NSG_E300 = "NSG_E300"
CONST_PERSONALITY_VRSG = "VRSG"
CONST_ZFB_MATCH_ATTRIBUTE_SERIAL_NUMBER = "SERIAL_NUMBER"
CONST_ZFB_MATCH_ATTRIBUTE_UUID = "UUID"
CONST_PERSONALITY_HARDWARE_VTEP = "HARDWARE_VTEP"
CONST_PERSONALITY_NETCONF_THIRDPARTY_HW_VTEP = "NETCONF_THIRDPARTY_HW_VTEP"
CONST_FAMILY_NSG_AMI = "NSG_AMI"
CONST_PERMITTED_ACTION_DEPLOY = "DEPLOY"
CONST_BOOTSTRAP_STATUS_INACTIVE = "INACTIVE"
def __init__(self, **kwargs):
""" Initializes a Gateway instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> gateway = NUGateway(id=u'xxxx-xxx-xxx-xxx', name=u'Gateway')
>>> gateway = NUGateway(data=my_dict)
"""
super(NUGateway, self).__init__()
# Read/Write Attributes
self._mac_address = None
self._zfb_match_attribute = None
self._zfb_match_value = None
self._bios_release_date = None
self._bios_version = None
self._cpu_type = None
self._uuid = None
self._name = None
self._family = None
self._management_id = None
self._last_updated_by = None
self._datapath_id = None
self._patches = None
self._gateway_connected = None
self._gateway_model = None
self._gateway_version = None
self._redundancy_group_id = None
self._peer = None
self._template_id = None
self._pending = None
self._vendor = None
self._serial_number = None
self._permitted_action = None
self._personality = None
self._description = None
self._libraries = None
self._enterprise_id = None
self._entity_scope = None
self._location_id = None
self._bootstrap_id = None
self._bootstrap_status = None
self._product_name = None
self._use_gateway_vlanvnid = None
self._associated_gateway_security_id = None
self._associated_gateway_security_profile_id = None
self._associated_nsg_info_id = None
self._associated_netconf_profile_id = None
self._vtep = None
self._auto_disc_gateway_id = None
self._external_id = None
self._system_id = None
self.expose_attribute(local_name="mac_address", remote_name="MACAddress", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="zfb_match_attribute", remote_name="ZFBMatchAttribute", attribute_type=str, is_required=False, is_unique=False, choices=[u'HOSTNAME', u'IP_ADDRESS', u'MAC_ADDRESS', u'NONE', u'SERIAL_NUMBER', u'UUID'])
self.expose_attribute(local_name="zfb_match_value", remote_name="ZFBMatchValue", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="bios_release_date", remote_name="BIOSReleaseDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="bios_version", remote_name="BIOSVersion", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="cpu_type", remote_name="CPUType", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="uuid", remote_name="UUID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="family", remote_name="family", attribute_type=str, is_required=False, is_unique=False, choices=[u'ANY', u'NSG_AMI', u'NSG_AZ', u'NSG_C', u'NSG_E', u'NSG_E200', u'NSG_E300', u'NSG_V', u'NSG_X', u'NSG_X200', u'VRS'])
self.expose_attribute(local_name="management_id", remote_name="managementID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="datapath_id", remote_name="datapathID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="patches", remote_name="patches", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_connected", remote_name="gatewayConnected", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_model", remote_name="gatewayModel", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="gateway_version", remote_name="gatewayVersion", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="redundancy_group_id", remote_name="redundancyGroupID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="peer", remote_name="peer", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="template_id", remote_name="templateID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="pending", remote_name="pending", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="vendor", remote_name="vendor", attribute_type=str, is_required=False, is_unique=False, choices=[u'CISCO'])
self.expose_attribute(local_name="serial_number", remote_name="serialNumber", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="permitted_action", remote_name="permittedAction", attribute_type=str, is_required=False, is_unique=False, choices=[u'ALL', u'DEPLOY', u'EXTEND', u'INSTANTIATE', u'READ', u'USE'])
self.expose_attribute(local_name="personality", remote_name="personality", attribute_type=str, is_required=False, is_unique=False, choices=[u'DC7X50', u'EVDF', u'EVDFB', u'HARDWARE_VTEP', u'NETCONF_7X50', u'NETCONF_THIRDPARTY_HW_VTEP', u'NUAGE_210_WBX_32_Q', u'NUAGE_210_WBX_48_S', u'OTHER', u'VDFG', u'VRSB', u'VRSG', u'VSA', u'VSG'])
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="libraries", remote_name="libraries", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="location_id", remote_name="locationID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="bootstrap_id", remote_name="bootstrapID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="bootstrap_status", remote_name="bootstrapStatus", attribute_type=str, is_required=False, is_unique=False, choices=[u'ACTIVE', u'CERTIFICATE_SIGNED', u'INACTIVE', u'NOTIFICATION_APP_REQ_ACK', u'NOTIFICATION_APP_REQ_SENT'])
self.expose_attribute(local_name="product_name", remote_name="productName", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="use_gateway_vlanvnid", remote_name="useGatewayVLANVNID", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_gateway_security_id", remote_name="associatedGatewaySecurityID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_gateway_security_profile_id", remote_name="associatedGatewaySecurityProfileID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_nsg_info_id", remote_name="associatedNSGInfoID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_netconf_profile_id", remote_name="associatedNetconfProfileID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="vtep", remote_name="vtep", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="auto_disc_gateway_id", remote_name="autoDiscGatewayID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="system_id", remote_name="systemID", attribute_type=str, is_required=False, is_unique=False)
# Fetchers
self.l2_domains = NUL2DomainsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.mac_filter_profiles = NUMACFilterProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.sap_egress_qo_s_profiles = NUSAPEgressQoSProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.sap_ingress_qo_s_profiles = NUSAPIngressQoSProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.gateway_securities = NUGatewaySecuritiesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.patnat_pools = NUPATNATPoolsFetcher.fetcher_with_object(parent_object=self, relationship="member")
self.deployment_failures = NUDeploymentFailuresFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.wan_services = NUWANServicesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.egress_profiles = NUEgressProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.alarms = NUAlarmsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.infrastructure_configs = NUInfrastructureConfigsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ingress_profiles = NUIngressProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.enterprise_permissions = NUEnterprisePermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.jobs = NUJobsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.locations = NULocationsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.domains = NUDomainsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.bootstraps = NUBootstrapsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.bootstrap_activations = NUBootstrapActivationsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ports = NUPortsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ip_filter_profiles = NUIPFilterProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ipv6_filter_profiles = NUIPv6FilterProfilesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.subnets = NUSubnetsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def mac_address(self):
""" Get mac_address value.
Notes:
MAC Address of the first interface
This attribute is named `MACAddress` in VSD API.
"""
return self._mac_address
@mac_address.setter
def mac_address(self, value):
""" Set mac_address value.
Notes:
MAC Address of the first interface
This attribute is named `MACAddress` in VSD API.
"""
self._mac_address = value
@property
def zfb_match_attribute(self):
""" Get zfb_match_attribute value.
Notes:
The Zero Factor Bootstrapping (ZFB) Attribute that should be used to match the gateway on when it tries to bootstrap.
This attribute is named `ZFBMatchAttribute` in VSD API.
"""
return self._zfb_match_attribute
@zfb_match_attribute.setter
def zfb_match_attribute(self, value):
""" Set zfb_match_attribute value.
Notes:
The Zero Factor Bootstrapping (ZFB) Attribute that should be used to match the gateway on when it tries to bootstrap.
This attribute is named `ZFBMatchAttribute` in VSD API.
"""
self._zfb_match_attribute = value
@property
def zfb_match_value(self):
""" Get zfb_match_value value.
Notes:
The Zero Factor Bootstrapping (ZFB) value that needs to match with the gateway during the bootstrap attempt. This value needs to match with the ZFB Match Attribute.
This attribute is named `ZFBMatchValue` in VSD API.
"""
return self._zfb_match_value
@zfb_match_value.setter
def zfb_match_value(self, value):
""" Set zfb_match_value value.
Notes:
The Zero Factor Bootstrapping (ZFB) value that needs to match with the gateway during the bootstrap attempt. This value needs to match with the ZFB Match Attribute.
This attribute is named `ZFBMatchValue` in VSD API.
"""
self._zfb_match_value = value
@property
def bios_release_date(self):
""" Get bios_release_date value.
Notes:
Release Date of the BIOS. The format can vary based on the manufacturer but normally includes year/month/day or year/week details (eg. 01/01/2011 or 2018/06/15 or 2018/22)
This attribute is named `BIOSReleaseDate` in VSD API.
"""
return self._bios_release_date
@bios_release_date.setter
def bios_release_date(self, value):
""" Set bios_release_date value.
Notes:
Release Date of the BIOS. The format can vary based on the manufacturer but normally includes year/month/day or year/week details (eg. 01/01/2011 or 2018/06/15 or 2018/22)
This attribute is named `BIOSReleaseDate` in VSD API.
"""
self._bios_release_date = value
@property
def bios_version(self):
""" Get bios_version value.
Notes:
BIOS Version (eg. 0.5.1)
This attribute is named `BIOSVersion` in VSD API.
"""
return self._bios_version
@bios_version.setter
def bios_version(self, value):
""" Set bios_version value.
Notes:
BIOS Version (eg. 0.5.1)
This attribute is named `BIOSVersion` in VSD API.
"""
self._bios_version = value
@property
def cpu_type(self):
""" Get cpu_type value.
Notes:
The Processor Type as reported during bootstrapping.
This attribute is named `CPUType` in VSD API.
"""
return self._cpu_type
@cpu_type.setter
def cpu_type(self, value):
""" Set cpu_type value.
Notes:
The Processor Type as reported during bootstrapping.
This attribute is named `CPUType` in VSD API.
"""
self._cpu_type = value
@property
def uuid(self):
""" Get uuid value.
Notes:
UUID of the device
This attribute is named `UUID` in VSD API.
"""
return self._uuid
@uuid.setter
def uuid(self, value):
""" Set uuid value.
Notes:
UUID of the device
This attribute is named `UUID` in VSD API.
"""
self._uuid = value
@property
def name(self):
""" Get name value.
Notes:
Name of the Gateway
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the Gateway
"""
self._name = value
@property
def family(self):
""" Get family value.
Notes:
The family type of the gateway based on common characteristics with other members of a particular variation of an NSG hardware or of a virtual deployment.
"""
return self._family
@family.setter
def family(self, value):
""" Set family value.
Notes:
The family type of the gateway based on common characteristics with other members of a particular variation of an NSG hardware or of a virtual deployment.
"""
self._family = value
@property
def management_id(self):
""" Get management_id value.
Notes:
The identifier of this gateway's management interface.
This attribute is named `managementID` in VSD API.
"""
return self._management_id
@management_id.setter
def management_id(self, value):
""" Set management_id value.
Notes:
The identifier of this gateway's management interface.
This attribute is named `managementID` in VSD API.
"""
self._management_id = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def datapath_id(self):
""" Get datapath_id value.
Notes:
Identifier of the Gateway, based on the systemID which is generated when the instance is created in VSD.
This attribute is named `datapathID` in VSD API.
"""
return self._datapath_id
@datapath_id.setter
def datapath_id(self, value):
""" Set datapath_id value.
Notes:
Identifier of the Gateway, based on the systemID which is generated when the instance is created in VSD.
This attribute is named `datapathID` in VSD API.
"""
self._datapath_id = value
@property
def patches(self):
""" Get patches value.
Notes:
Patches that have been installed on the NSG
"""
return self._patches
@patches.setter
def patches(self, value):
""" Set patches value.
Notes:
Patches that have been installed on the NSG
"""
self._patches = value
@property
def gateway_connected(self):
""" Get gateway_connected value.
Notes:
A boolean flag indicating the status of the gateway.
This attribute is named `gatewayConnected` in VSD API.
"""
return self._gateway_connected
@gateway_connected.setter
def gateway_connected(self, value):
""" Set gateway_connected value.
Notes:
A boolean flag indicating the status of the gateway.
This attribute is named `gatewayConnected` in VSD API.
"""
self._gateway_connected = value
@property
def gateway_model(self):
""" Get gateway_model value.
Notes:
The model string of the gateway. Applicable to netconf managed gateways
This attribute is named `gatewayModel` in VSD API.
"""
return self._gateway_model
@gateway_model.setter
def gateway_model(self, value):
""" Set gateway_model value.
Notes:
The model string of the gateway. Applicable to netconf managed gateways
This attribute is named `gatewayModel` in VSD API.
"""
self._gateway_model = value
@property
def gateway_version(self):
""" Get gateway_version value.
Notes:
The Gateway Software Version as reported during bootstrapping.
This attribute is named `gatewayVersion` in VSD API.
"""
return self._gateway_version
@gateway_version.setter
def gateway_version(self, value):
""" Set gateway_version value.
Notes:
The Gateway Software Version as reported during bootstrapping.
This attribute is named `gatewayVersion` in VSD API.
"""
self._gateway_version = value
@property
def redundancy_group_id(self):
""" Get redundancy_group_id value.
Notes:
The Redundancy Gateway Group associated with this Gateway Instance. This is a read only attribute
This attribute is named `redundancyGroupID` in VSD API.
"""
return self._redundancy_group_id
@redundancy_group_id.setter
def redundancy_group_id(self, value):
""" Set redundancy_group_id value.
Notes:
The Redundancy Gateway Group associated with this Gateway Instance. This is a read only attribute
This attribute is named `redundancyGroupID` in VSD API.
"""
self._redundancy_group_id = value
@property
def peer(self):
""" Get peer value.
Notes:
The System ID of the peer gateway associated with this Gateway instance when it is discovered by the network manager (VSD) as being redundant.
"""
return self._peer
@peer.setter
def peer(self, value):
""" Set peer value.
Notes:
The System ID of the peer gateway associated with this Gateway instance when it is discovered by the network manager (VSD) as being redundant.
"""
self._peer = value
@property
def template_id(self):
""" Get template_id value.
Notes:
The ID of the template that this Gateway was created from. This should be set when instantiating a Gateway
This attribute is named `templateID` in VSD API.
"""
return self._template_id
@template_id.setter
def template_id(self, value):
""" Set template_id value.
Notes:
The ID of the template that this Gateway was created from. This should be set when instantiating a Gateway
This attribute is named `templateID` in VSD API.
"""
self._template_id = value
@property
def pending(self):
""" Get pending value.
Notes:
Indicates that this gateway is pending state or state. When in pending state it cannot be modified from REST.
"""
return self._pending
@pending.setter
def pending(self, value):
""" Set pending value.
Notes:
Indicates that this gateway is pending state or state. When in pending state it cannot be modified from REST.
"""
self._pending = value
@property
def vendor(self):
""" Get vendor value.
Notes:
The vendor of the gateway. Applicable to netconf managed gateways
"""
return self._vendor
@vendor.setter
def vendor(self, value):
""" Set vendor value.
Notes:
The vendor of the gateway. Applicable to netconf managed gateways
"""
self._vendor = value
@property
def serial_number(self):
""" Get serial_number value.
Notes:
The device's serial number
This attribute is named `serialNumber` in VSD API.
"""
return self._serial_number
@serial_number.setter
def serial_number(self, value):
""" Set serial_number value.
Notes:
The device's serial number
This attribute is named `serialNumber` in VSD API.
"""
self._serial_number = value
@property
def permitted_action(self):
""" Get permitted_action value.
Notes:
The permitted action to USE/EXTEND this Gateway.
This attribute is named `permittedAction` in VSD API.
"""
return self._permitted_action
@permitted_action.setter
def permitted_action(self, value):
""" Set permitted_action value.
Notes:
The permitted action to USE/EXTEND this Gateway.
This attribute is named `permittedAction` in VSD API.
"""
self._permitted_action = value
@property
def personality(self):
""" Get personality value.
Notes:
Personality of the Gateway, cannot be changed after creation.
"""
return self._personality
@personality.setter
def personality(self, value):
""" Set personality value.
Notes:
Personality of the Gateway, cannot be changed after creation.
"""
self._personality = value
@property
def description(self):
""" Get description value.
Notes:
A description of the Gateway
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the Gateway
"""
self._description = value
@property
def libraries(self):
""" Get libraries value.
Notes:
Versions of monitored libraries currently installed on the Gateway.
"""
return self._libraries
@libraries.setter
def libraries(self, value):
""" Set libraries value.
Notes:
Versions of monitored libraries currently installed on the Gateway.
"""
self._libraries = value
@property
def enterprise_id(self):
""" Get enterprise_id value.
Notes:
The enterprise associated with this Gateway. This is a read only attribute
This attribute is named `enterpriseID` in VSD API.
"""
return self._enterprise_id
@enterprise_id.setter
def enterprise_id(self, value):
""" Set enterprise_id value.
Notes:
The enterprise associated with this Gateway. This is a read only attribute
This attribute is named `enterpriseID` in VSD API.
"""
self._enterprise_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def location_id(self):
""" Get location_id value.
Notes:
Association to an object which contains location information about this gateway instance.
This attribute is named `locationID` in VSD API.
"""
return self._location_id
@location_id.setter
def location_id(self, value):
""" Set location_id value.
Notes:
Association to an object which contains location information about this gateway instance.
This attribute is named `locationID` in VSD API.
"""
self._location_id = value
@property
def bootstrap_id(self):
""" Get bootstrap_id value.
Notes:
The bootstrap details associated with this Gateway. NOTE: This is a read only property, it can only be set during creation of a gateway.
This attribute is named `bootstrapID` in VSD API.
"""
return self._bootstrap_id
@bootstrap_id.setter
def bootstrap_id(self, value):
""" Set bootstrap_id value.
Notes:
The bootstrap details associated with this Gateway. NOTE: This is a read only property, it can only be set during creation of a gateway.
This attribute is named `bootstrapID` in VSD API.
"""
self._bootstrap_id = value
@property
def bootstrap_status(self):
""" Get bootstrap_status value.
Notes:
The bootstrap status of this Gateway. NOTE: This is a read only property.
This attribute is named `bootstrapStatus` in VSD API.
"""
return self._bootstrap_status
@bootstrap_status.setter
def bootstrap_status(self, value):
""" Set bootstrap_status value.
Notes:
The bootstrap status of this Gateway. NOTE: This is a read only property.
This attribute is named `bootstrapStatus` in VSD API.
"""
self._bootstrap_status = value
@property
def product_name(self):
""" Get product_name value.
Notes:
Product Name as reported during bootstrapping.
This attribute is named `productName` in VSD API.
"""
return self._product_name
@product_name.setter
def product_name(self, value):
""" Set product_name value.
Notes:
Product Name as reported during bootstrapping.
This attribute is named `productName` in VSD API.
"""
self._product_name = value
@property
def use_gateway_vlanvnid(self):
""" Get use_gateway_vlanvnid value.
Notes:
When set, VLAN-VNID mapping must be unique for all the vports of the gateway
This attribute is named `useGatewayVLANVNID` in VSD API.
"""
return self._use_gateway_vlanvnid
@use_gateway_vlanvnid.setter
def use_gateway_vlanvnid(self, value):
""" Set use_gateway_vlanvnid value.
Notes:
When set, VLAN-VNID mapping must be unique for all the vports of the gateway
This attribute is named `useGatewayVLANVNID` in VSD API.
"""
self._use_gateway_vlanvnid = value
@property
def associated_gateway_security_id(self):
""" Get associated_gateway_security_id value.
Notes:
Read only ID of the associated gateway security object.
This attribute is named `associatedGatewaySecurityID` in VSD API.
"""
return self._associated_gateway_security_id
@associated_gateway_security_id.setter
def associated_gateway_security_id(self, value):
""" Set associated_gateway_security_id value.
Notes:
Read only ID of the associated gateway security object.
This attribute is named `associatedGatewaySecurityID` in VSD API.
"""
self._associated_gateway_security_id = value
@property
def associated_gateway_security_profile_id(self):
""" Get associated_gateway_security_profile_id value.
Notes:
Readonly Id of the associated gateway security profile object
This attribute is named `associatedGatewaySecurityProfileID` in VSD API.
"""
return self._associated_gateway_security_profile_id
@associated_gateway_security_profile_id.setter
def associated_gateway_security_profile_id(self, value):
""" Set associated_gateway_security_profile_id value.
Notes:
Readonly Id of the associated gateway security profile object
This attribute is named `associatedGatewaySecurityProfileID` in VSD API.
"""
self._associated_gateway_security_profile_id = value
@property
def associated_nsg_info_id(self):
""" Get associated_nsg_info_id value.
Notes:
Read only ID of the associated gateway information object
This attribute is named `associatedNSGInfoID` in VSD API.
"""
return self._associated_nsg_info_id
@associated_nsg_info_id.setter
def associated_nsg_info_id(self, value):
""" Set associated_nsg_info_id value.
Notes:
Read only ID of the associated gateway information object
This attribute is named `associatedNSGInfoID` in VSD API.
"""
self._associated_nsg_info_id = value
@property
def associated_netconf_profile_id(self):
""" Get associated_netconf_profile_id value.
Notes:
UUID of the Netconf Profile associated to this gateway.
This attribute is named `associatedNetconfProfileID` in VSD API.
"""
return self._associated_netconf_profile_id
@associated_netconf_profile_id.setter
def associated_netconf_profile_id(self, value):
""" Set associated_netconf_profile_id value.
Notes:
UUID of the Netconf Profile associated to this gateway.
This attribute is named `associatedNetconfProfileID` in VSD API.
"""
self._associated_netconf_profile_id = value
@property
def vtep(self):
""" Get vtep value.
Notes:
Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address.
"""
return self._vtep
@vtep.setter
def vtep(self, value):
""" Set vtep value.
Notes:
Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address.
"""
self._vtep = value
@property
def auto_disc_gateway_id(self):
""" Get auto_disc_gateway_id value.
Notes:
The Auto Discovered Gateway associated with this Gateway Instance
This attribute is named `autoDiscGatewayID` in VSD API.
"""
return self._auto_disc_gateway_id
@auto_disc_gateway_id.setter
def auto_disc_gateway_id(self, value):
""" Set auto_disc_gateway_id value.
Notes:
The Auto Discovered Gateway associated with this Gateway Instance
This attribute is named `autoDiscGatewayID` in VSD API.
"""
self._auto_disc_gateway_id = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def system_id(self):
""" Get system_id value.
Notes:
Identifier of the Gateway, cannot be modified after creation
This attribute is named `systemID` in VSD API.
"""
return self._system_id
@system_id.setter
def system_id(self, value):
""" Set system_id value.
Notes:
Identifier of the Gateway, cannot be modified after creation
This attribute is named `systemID` in VSD API.
"""
self._system_id = value
## Custom methods
def is_template(self):
""" Verify that the object is a template
Returns:
(bool): True if the object is a template
"""
return False
def is_from_template(self):
""" Verify if the object has been instantiated from a template
Note:
The object has to be fetched. Otherwise, it does not
have information from its parent
Returns:
(bool): True if the object is a template
"""
return self.template_id
| nuagenetworks/vspk-python | vspk/v5_0/nugateway.py | Python | bsd-3-clause | 45,810 |
# -*- coding: utf-8 -*-
from setuptools import setup
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='torrentsearch',
version='0.0.0',
description='Torrent search library.',
long_description=readme(),
url='https://github.com/romanpitak/torrentsearch',
author='Roman Piták',
author_email='roman@pitak.net',
license='MIT',
classifiers=[
'Development Status :: 1 - Planning',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Indexing/Search',
],
keywords='torrent',
packages=['torrentsearch'],
test_suite='nose.collector',
tests_require=['nose'],
zip_safe=False,
)
| romanpitak/torrentsearch | setup.py | Python | mit | 761 |
## dea_bandindices.py
'''
Description: This file contains a set of python functions for computing
remote sensing band indices on Digital Earth Australia data.
License: The code in this notebook is licensed under the Apache License,
Version 2.0 (https://www.apache.org/licenses/LICENSE-2.0). Digital Earth
Australia data is licensed under the Creative Commons by Attribution 4.0
license (https://creativecommons.org/licenses/by/4.0/).
Contact: If you need assistance, please post a question on the Open Data
Cube Slack channel (http://slack.opendatacube.org/) or on the GIS Stack
Exchange (https://gis.stackexchange.com/questions/ask?tags=open-data-cube)
using the `open-data-cube` tag (you can view previously asked questions
here: https://gis.stackexchange.com/questions/tagged/open-data-cube).
If you would like to report an issue with this script, you can file one
on Github (https://github.com/GeoscienceAustralia/dea-notebooks/issues/new).
Last modified: March 2021
'''
# Import required packages
import warnings
import numpy as np
# Define custom functions
def calculate_indices(ds,
index=None,
collection=None,
custom_varname=None,
normalise=True,
drop=False,
inplace=False):
"""
Takes an xarray dataset containing spectral bands, calculates one of
a set of remote sensing indices, and adds the resulting array as a
new variable in the original dataset.
Note: by default, this function will create a new copy of the data
in memory. This can be a memory-expensive operation, so to avoid
this, set `inplace=True`.
Last modified: March 2021
Parameters
----------
ds : xarray Dataset
A two-dimensional or multi-dimensional array with containing the
spectral bands required to calculate the index. These bands are
used as inputs to calculate the selected water index.
index : str or list of strs
A string giving the name of the index to calculate or a list of
strings giving the names of the indices to calculate:
'AWEI_ns (Automated Water Extraction Index,
no shadows, Feyisa 2014)
'AWEI_sh' (Automated Water Extraction Index,
shadows, Feyisa 2014)
'BAEI' (Built-Up Area Extraction Index, Bouzekri et al. 2015)
'BAI' (Burn Area Index, Martin 1998)
'BSI' (Bare Soil Index, Rikimaru et al. 2002)
'BUI' (Built-Up Index, He et al. 2010)
'CMR' (Clay Minerals Ratio, Drury 1987)
'EVI' (Enhanced Vegetation Index, Huete 2002)
'FMR' (Ferrous Minerals Ratio, Segal 1982)
'IOR' (Iron Oxide Ratio, Segal 1982)
'LAI' (Leaf Area Index, Boegh 2002)
'MNDWI' (Modified Normalised Difference Water Index, Xu 1996)
'MSAVI' (Modified Soil Adjusted Vegetation Index,
Qi et al. 1994)
'NBI' (New Built-Up Index, Jieli et al. 2010)
'NBR' (Normalised Burn Ratio, Lopez Garcia 1991)
'NDBI' (Normalised Difference Built-Up Index, Zha 2003)
'NDCI' (Normalised Difference Chlorophyll Index,
Mishra & Mishra, 2012)
'NDMI' (Normalised Difference Moisture Index, Gao 1996)
'NDSI' (Normalised Difference Snow Index, Hall 1995)
'NDTI' (Normalise Difference Tillage Index,
Van Deventeret et al. 1997)
'NDVI' (Normalised Difference Vegetation Index, Rouse 1973)
'NDWI' (Normalised Difference Water Index, McFeeters 1996)
'SAVI' (Soil Adjusted Vegetation Index, Huete 1988)
'TCB' (Tasseled Cap Brightness, Crist 1985)
'TCG' (Tasseled Cap Greeness, Crist 1985)
'TCW' (Tasseled Cap Wetness, Crist 1985)
'TCB_GSO' (Tasseled Cap Brightness, Nedkov 2017)
'TCG_GSO' (Tasseled Cap Greeness, Nedkov 2017)
'TCW_GSO' (Tasseled Cap Wetness, Nedkov 2017)
'WI' (Water Index, Fisher 2016)
'kNDVI' (Non-linear Normalised Difference Vegation Index,
Camps-Valls et al. 2021)
collection : str
An string that tells the function what data collection is
being used to calculate the index. This is necessary because
different collections use different names for bands covering
a similar spectra. Valid options are 'ga_ls_2' (for GA
Landsat Collection 2), 'ga_ls_3' (for GA Landsat Collection 3)
and 'ga_s2_1' (for GA Sentinel 2 Collection 1).
custom_varname : str, optional
By default, the original dataset will be returned with
a new index variable named after `index` (e.g. 'NDVI'). To
specify a custom name instead, you can supply e.g.
`custom_varname='custom_name'`. Defaults to None, which uses
`index` to name the variable.
normalise : bool, optional
Some coefficient-based indices (e.g. 'WI', 'BAEI', 'AWEI_ns',
'AWEI_sh', 'TCW', 'TCG', 'TCB', 'TCW_GSO', 'TCG_GSO', 'TCB_GSO',
'EVI', 'LAI', 'SAVI', 'MSAVI') produce different results if
surface reflectance values are not scaled between 0.0 and 1.0
prior to calculating the index. Setting `normalise=True` first
scales values to a 0.0-1.0 range by dividing by 10000.0.
Defaults to True.
drop : bool, optional
Provides the option to drop the original input data, thus saving
space. if drop = True, returns only the index and its values.
inplace: bool, optional
If `inplace=True`, calculate_indices will modify the original
array in-place, adding bands to the input dataset. The default
is `inplace=False`, which will instead make a new copy of the
original data (and use twice the memory).
Returns
-------
ds : xarray Dataset
The original xarray Dataset inputted into the function, with a
new varible containing the remote sensing index as a DataArray.
If drop = True, the new variable/s as DataArrays in the
original Dataset.
"""
# Set ds equal to a copy of itself in order to prevent the function
# from editing the input dataset. This can prevent unexpected
# behaviour though it uses twice as much memory.
if not inplace:
ds = ds.copy(deep=True)
# Capture input band names in order to drop these if drop=True
if drop:
bands_to_drop=list(ds.data_vars)
print(f'Dropping bands {bands_to_drop}')
# Dictionary containing remote sensing index band recipes
index_dict = {
# Normalised Difference Vegation Index, Rouse 1973
'NDVI': lambda ds: (ds.nir - ds.red) /
(ds.nir + ds.red),
# Non-linear Normalised Difference Vegation Index,
# Camps-Valls et al. 2021
'kNDVI': lambda ds: np.tanh(((ds.nir - ds.red) /
(ds.nir + ds.red)) ** 2),
# Enhanced Vegetation Index, Huete 2002
'EVI': lambda ds: ((2.5 * (ds.nir - ds.red)) /
(ds.nir + 6 * ds.red -
7.5 * ds.blue + 1)),
# Leaf Area Index, Boegh 2002
'LAI': lambda ds: (3.618 * ((2.5 * (ds.nir - ds.red)) /
(ds.nir + 6 * ds.red -
7.5 * ds.blue + 1)) - 0.118),
# Soil Adjusted Vegetation Index, Huete 1988
'SAVI': lambda ds: ((1.5 * (ds.nir - ds.red)) /
(ds.nir + ds.red + 0.5)),
# Mod. Soil Adjusted Vegetation Index, Qi et al. 1994
'MSAVI': lambda ds: ((2 * ds.nir + 1 -
((2 * ds.nir + 1)**2 -
8 * (ds.nir - ds.red))**0.5) / 2),
# Normalised Difference Moisture Index, Gao 1996
'NDMI': lambda ds: (ds.nir - ds.swir1) /
(ds.nir + ds.swir1),
# Normalised Burn Ratio, Lopez Garcia 1991
'NBR': lambda ds: (ds.nir - ds.swir2) /
(ds.nir + ds.swir2),
# Burn Area Index, Martin 1998
'BAI': lambda ds: (1.0 / ((0.10 - ds.red) ** 2 +
(0.06 - ds.nir) ** 2)),
# Normalised Difference Chlorophyll Index,
# (Mishra & Mishra, 2012)
'NDCI': lambda ds: (ds.red_edge_1 - ds.red) /
(ds.red_edge_1 + ds.red),
# Normalised Difference Snow Index, Hall 1995
'NDSI': lambda ds: (ds.green - ds.swir1) /
(ds.green + ds.swir1),
# Normalised Difference Tillage Index,
# Van Deventer et al. 1997
'NDTI': lambda ds: (ds.swir1 - ds.swir2) /
(ds.swir1 + ds.swir2),
# Normalised Difference Water Index, McFeeters 1996
'NDWI': lambda ds: (ds.green - ds.nir) /
(ds.green + ds.nir),
# Modified Normalised Difference Water Index, Xu 2006
'MNDWI': lambda ds: (ds.green - ds.swir1) /
(ds.green + ds.swir1),
# Normalised Difference Built-Up Index, Zha 2003
'NDBI': lambda ds: (ds.swir1 - ds.nir) /
(ds.swir1 + ds.nir),
# Built-Up Index, He et al. 2010
'BUI': lambda ds: ((ds.swir1 - ds.nir) /
(ds.swir1 + ds.nir)) -
((ds.nir - ds.red) /
(ds.nir + ds.red)),
# Built-up Area Extraction Index, Bouzekri et al. 2015
'BAEI': lambda ds: (ds.red + 0.3) /
(ds.green + ds.swir1),
# New Built-up Index, Jieli et al. 2010
'NBI': lambda ds: (ds.swir1 + ds.red) / ds.nir,
# Bare Soil Index, Rikimaru et al. 2002
'BSI': lambda ds: ((ds.swir1 + ds.red) -
(ds.nir + ds.blue)) /
((ds.swir1 + ds.red) +
(ds.nir + ds.blue)),
# Automated Water Extraction Index (no shadows), Feyisa 2014
'AWEI_ns': lambda ds: (4 * (ds.green - ds.swir1) -
(0.25 * ds.nir * + 2.75 * ds.swir2)),
# Automated Water Extraction Index (shadows), Feyisa 2014
'AWEI_sh': lambda ds: (ds.blue + 2.5 * ds.green -
1.5 * (ds.nir + ds.swir1) -
0.25 * ds.swir2),
# Water Index, Fisher 2016
'WI': lambda ds: (1.7204 + 171 * ds.green + 3 * ds.red -
70 * ds.nir - 45 * ds.swir1 -
71 * ds.swir2),
# Tasseled Cap Wetness, Crist 1985
'TCW': lambda ds: (0.0315 * ds.blue + 0.2021 * ds.green +
0.3102 * ds.red + 0.1594 * ds.nir +
-0.6806 * ds.swir1 + -0.6109 * ds.swir2),
# Tasseled Cap Greeness, Crist 1985
'TCG': lambda ds: (-0.1603 * ds.blue + -0.2819 * ds.green +
-0.4934 * ds.red + 0.7940 * ds.nir +
-0.0002 * ds.swir1 + -0.1446 * ds.swir2),
# Tasseled Cap Brightness, Crist 1985
'TCB': lambda ds: (0.2043 * ds.blue + 0.4158 * ds.green +
0.5524 * ds.red + 0.5741 * ds.nir +
0.3124 * ds.swir1 + -0.2303 * ds.swir2),
# Tasseled Cap Transformations with Sentinel-2 coefficients
# after Nedkov 2017 using Gram-Schmidt orthogonalization (GSO)
# Tasseled Cap Wetness, Nedkov 2017
'TCW_GSO': lambda ds: (0.0649 * ds.blue + 0.2802 * ds.green +
0.3072 * ds.red + -0.0807 * ds.nir +
-0.4064 * ds.swir1 + -0.5602 * ds.swir2),
# Tasseled Cap Greeness, Nedkov 2017
'TCG_GSO': lambda ds: (-0.0635 * ds.blue + -0.168 * ds.green +
-0.348 * ds.red + 0.3895 * ds.nir +
-0.4587 * ds.swir1 + -0.4064 * ds.swir2),
# Tasseled Cap Brightness, Nedkov 2017
'TCB_GSO': lambda ds: (0.0822 * ds.blue + 0.136 * ds.green +
0.2611 * ds.red + 0.5741 * ds.nir +
0.3882 * ds.swir1 + 0.1366 * ds.swir2),
# Clay Minerals Ratio, Drury 1987
'CMR': lambda ds: (ds.swir1 / ds.swir2),
# Ferrous Minerals Ratio, Segal 1982
'FMR': lambda ds: (ds.swir1 / ds.nir),
# Iron Oxide Ratio, Segal 1982
'IOR': lambda ds: (ds.red / ds.blue)
}
# If index supplied is not a list, convert to list. This allows us to
# iterate through either multiple or single indices in the loop below
indices = index if isinstance(index, list) else [index]
#calculate for each index in the list of indices supplied (indexes)
for index in indices:
# Select an index function from the dictionary
index_func = index_dict.get(str(index))
# If no index is provided or if no function is returned due to an
# invalid option being provided, raise an exception informing user to
# choose from the list of valid options
if index is None:
raise ValueError(f"No remote sensing `index` was provided. Please "
"refer to the function \ndocumentation for a full "
"list of valid options for `index` (e.g. 'NDVI')")
elif (index in ['WI', 'BAEI', 'AWEI_ns', 'AWEI_sh', 'TCW',
'TCG', 'TCB', 'TCW_GSO', 'TCG_GSO', 'TCB_GSO',
'EVI', 'LAI', 'SAVI', 'MSAVI']
and not normalise):
warnings.warn(f"\nA coefficient-based index ('{index}') normally "
"applied to surface reflectance values in the \n"
"0.0-1.0 range was applied to values in the 0-10000 "
"range. This can produce unexpected results; \nif "
"required, resolve this by setting `normalise=True`")
elif index_func is None:
raise ValueError(f"The selected index '{index}' is not one of the "
"valid remote sensing index options. \nPlease "
"refer to the function documentation for a full "
"list of valid options for `index`")
# Rename bands to a consistent format if depending on what collection
# is specified in `collection`. This allows the same index calculations
# to be applied to all collections. If no collection was provided,
# raise an exception.
if collection is None:
raise ValueError("'No `collection` was provided. Please specify "
"either 'ga_ls_2', 'ga_ls_3' or 'ga_s2_1' \nto "
"ensure the function calculates indices using the "
"correct spectral bands")
elif collection == 'ga_ls_3':
# Dictionary mapping full data names to simpler 'red' alias names
bandnames_dict = {
'nbart_nir': 'nir',
'nbart_red': 'red',
'nbart_green': 'green',
'nbart_blue': 'blue',
'nbart_swir_1': 'swir1',
'nbart_swir_2': 'swir2',
'nbar_red': 'red',
'nbar_green': 'green',
'nbar_blue': 'blue',
'nbar_nir': 'nir',
'nbar_swir_1': 'swir1',
'nbar_swir_2': 'swir2'
}
# Rename bands in dataset to use simple names (e.g. 'red')
bands_to_rename = {
a: b for a, b in bandnames_dict.items() if a in ds.variables
}
elif collection == 'ga_s2_1':
# Dictionary mapping full data names to simpler 'red' alias names
bandnames_dict = {
'nbart_red': 'red',
'nbart_green': 'green',
'nbart_blue': 'blue',
'nbart_nir_1': 'nir',
'nbart_red_edge_1': 'red_edge_1',
'nbart_red_edge_2': 'red_edge_2',
'nbart_swir_2': 'swir1',
'nbart_swir_3': 'swir2',
'nbar_red': 'red',
'nbar_green': 'green',
'nbar_blue': 'blue',
'nbar_nir_1': 'nir',
'nbar_red_edge_1': 'red_edge_1',
'nbar_red_edge_2': 'red_edge_2',
'nbar_swir_2': 'swir1',
'nbar_swir_3': 'swir2'
}
# Rename bands in dataset to use simple names (e.g. 'red')
bands_to_rename = {
a: b for a, b in bandnames_dict.items() if a in ds.variables
}
elif collection == 'ga_ls_2':
# Pass an empty dict as no bands need renaming
bands_to_rename = {}
# Raise error if no valid collection name is provided:
else:
raise ValueError(f"'{collection}' is not a valid option for "
"`collection`. Please specify either \n"
"'ga_ls_2', 'ga_ls_3' or 'ga_s2_1'")
# Apply index function
try:
# If normalised=True, divide data by 10,000 before applying func
mult = 10000.0 if normalise else 1.0
index_array = index_func(ds.rename(bands_to_rename) / mult)
except AttributeError:
raise ValueError(f'Please verify that all bands required to '
f'compute {index} are present in `ds`. \n'
f'These bands may vary depending on the `collection` '
f'(e.g. the Landsat `nbart_nir` band \n'
f'is equivelent to `nbart_nir_1` for Sentinel 2)')
# Add as a new variable in dataset
output_band_name = custom_varname if custom_varname else index
ds[output_band_name] = index_array
# Once all indexes are calculated, drop input bands if inplace=False
if drop and not inplace:
ds = ds.drop(bands_to_drop)
# If inplace == True, delete bands in-place instead of using drop
if drop and inplace:
for band_to_drop in bands_to_drop:
del ds[band_to_drop]
# Return input dataset with added water index variable
return ds
| ceos-seo/data_cube_utilities | dea_tools/dea_tools/bandindices.py | Python | apache-2.0 | 19,668 |
#! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
import os.path
path_to_script = os.path.dirname(os.path.abspath(__file__))
# sys.path.append(os.path.join(path_to_script, "../extern/pyseg_base/src/"))
import unittest
import numpy as np
import os
from imtools import qmisc
from imtools import misc
#
class QmiscTest(unittest.TestCase):
interactivetTest = False
# interactivetTest = True
# @unittest.skip("waiting for implementation")
def test_suggest_filename(self):
"""
Testing some files. Not testing recursion in filenames. It is situation
if there exist file0, file1, file2 and input file is file
"""
filename = "mujsoubor"
# import ipdb; ipdb.set_trace() # BREAKPOINT
new_filename = misc.suggest_filename(filename, exists=True)
# self.assertTrue(new_filename == "mujsoubor2")
self.assertEqual(new_filename, "mujsoubor_2")
filename = "mujsoubor_112"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor_113")
filename = "mujsoubor_2.txt"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor_3.txt")
filename = "mujsoubor27.txt"
new_filename = misc.suggest_filename(filename, exists=True)
self.assertTrue(new_filename == "mujsoubor27_2.txt")
filename = "mujsoubor-a24.txt"
new_filename = misc.suggest_filename(filename, exists=False)
self.assertEqual(new_filename, "mujsoubor-a24.txt", "Rewrite")
@unittest.skip("getVersionString is not used anymore")
def test_getVersionString(self):
"""
getVersionString is not used anymore
"""
vfn = "../__VERSION__"
existed = False
if not os.path.exists(vfn):
with open(vfn, 'a') as the_file:
the_file.write('1.1.1\n')
existed = False
verstr = qmisc.getVersionString()
self.assertTrue(type(verstr) == str)
if existed:
os.remove(vfn)
def test_obj_to_and_from_file_yaml(self):
testdata = np.random.random([4, 4, 3])
test_object = {'a': 1, 'data': testdata}
filename = 'test_obj_to_and_from_file.yaml'
misc.obj_to_file(test_object, filename, 'yaml')
saved_object = misc.obj_from_file(filename, 'yaml')
self.assertTrue(saved_object['a'] == 1)
self.assertTrue(saved_object['data'][1, 1, 1] == testdata[1, 1, 1])
os.remove(filename)
def test_obj_to_and_from_file_pickle(self):
testdata = np.random.random([4, 4, 3])
test_object = {'a': 1, 'data': testdata}
filename = 'test_obj_to_and_from_file.pkl'
misc.obj_to_file(test_object, filename, 'pickle')
saved_object = misc.obj_from_file(filename, 'pickle')
self.assertTrue(saved_object['a'] == 1)
self.assertTrue(saved_object['data'][1, 1, 1] == testdata[1, 1, 1])
os.remove(filename)
# def test_obj_to_and_from_file_exeption(self):
# test_object = [1]
# filename = 'test_obj_to_and_from_file_exeption'
# self.assertRaises(misc.obj_to_file(test_object, filename ,'yaml'))
def test_obj_to_and_from_file_with_directories(self):
import shutil
testdata = np.random.random([4, 4, 3])
test_object = {'a': 1, 'data': testdata}
dirname = '__test_write_and_read'
filename = '__test_write_and_read/test_obj_to_and_from_file.pkl'
misc.obj_to_file(test_object, filename, 'pickle')
saved_object = misc.obj_from_file(filename, 'pickle')
self.assertTrue(saved_object['a'] == 1)
self.assertTrue(saved_object['data'][1, 1, 1] == testdata[1, 1, 1])
shutil.rmtree(dirname)
if __name__ == "__main__":
unittest.main()
| mjirik/imtools | tests/qmisc_test.py | Python | mit | 3,908 |
import db_utils
db_utils.init_db(db_utils.db_info_test)
import unittest
import models.user as user
class TestUserFunctions(unittest.TestCase):
def setUp(self):
pass
def test_init(self):
code, msg = user.register_user(email="c4pt0r@126.com", password="shit")
self.assertEqual(code, user.UserErrorCode.ERR_OK)
code, msg = user.register_user(email="c4pt0r@126.com", password="shit")
self.assertEqual(code, user.UserErrorCode.ERR_EMAIL_DUPLICATED)
info = user.get_user_info("c4pt0r@126.com", [])
self.assertEqual(code, user.UserErrorCode.ERR_EMAIL_DUPLICATED)
self.assertEqual(info['uid'], 1)
code, msg = user.auth("c4pt0r@126.com", "shit")
self.assertEqual(code, user.UserErrorCode.ERR_OK)
code, msg = user.auth("c4pt0r@126.com", "shit2")
self.assertEqual(code, user.UserErrorCode.ERR_PASSWORD_ERROR)
code, msg = user.auth("c4pt01r@126.com", "shit")
self.assertEqual(code, user.UserErrorCode.ERR_PASSWORD_ERROR)
def tearDown(self):
db_utils.db.drop_collection('user')
pass
| c4pt0r/tornado-user | tests/user_test.py | Python | mit | 1,119 |
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
import Autodesk
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
def DuplicateView(view, name, doc):
try:
newViewId = view.Duplicate(Autodesk.Revit.DB.ViewDuplicateOption.WithDetailing)
newView = doc.GetElement(newViewId)
try: newView.Name = name
except: pass
return newView
except: return None
doc = DocumentManager.Instance.CurrentDBDocument
views = UnwrapElement(IN[0])
TransactionManager.Instance.EnsureInTransaction(doc)
if isinstance(IN[0], list):
OUT = []
for view, name in zip(views, IN[1]):
if isinstance(name, list): OUT.append([DuplicateView(view, x, doc) for x in name])
else: OUT.append(DuplicateView(view, name, doc))
else:
if isinstance(IN[1], list): OUT = [DuplicateView(views, x, doc) for x in IN[1]]
else: OUT = DuplicateView(views, IN[1], doc)
TransactionManager.Instance.TransactionTaskDone() | andydandy74/ClockworkForDynamo | nodes/2.x/python/View.DuplicateWithDetailing.py | Python | mit | 1,027 |
# -*- coding: utf-8 -*-
from datetime import datetime
from app import db
from app.models import components_tags
from app.users.models import User
from app.tags.models import Tag
from app.util import unix_time
class WebComponent(db.Model):
__tablename__ = 'web_component'
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.DateTime)
name = db.Column(
db.String,
index=True,
unique=True)
description = db.Column(db.String)
owner_id = db.Column(db.Integer, db.ForeignKey('user.id'))
owner = db.relationship(
User, backref=db.backref('web_components', lazy='dynamic'))
repository_url = db.Column(db.String(256))
tags = db.relationship(
Tag,
secondary=components_tags,
backref=db.backref('web_components', lazy='dynamic'))
def __init__(
self,
name,
description,
owner,
repository_url):
self.created = datetime.now()
self.name = name
self.description = description
self.owner = owner
self.repository_url = repository_url
def __iter__(self):
return {
'id': self.id,
'created': unix_time(self.created),
'name': self.name,
'description': self.description,
'owner': dict(self.owner),
'repository_url': self.repository_url,
'tags': [dict(tag) for tag in self.tags]
}.iteritems()
def __repr__(self):
return '<WebComponent:%s>' % self.name
| nrempel/rucksack-api | app/web_components/models.py | Python | mit | 1,563 |
#!/usr/bin/env python
# Flowtools - a suite of tools for handling and drawing flow data
# Copyright (C) 2013 Petter Johansson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import numpy as np
import pylab as plt
from flowtools.draw import plot_line
from flowtools.datamaps import Spread
from flowtools.utils import combine_spread, get_colours, get_labels, get_linestyles, get_shift
def com_plot(args):
"""Draw the center of mass height as a function of time."""
# Get colours, labels and line styles from default
colours = get_colours(args.colour, len(args.spreading))
labels, draw_legend = get_labels(args.label, len(args.spreading))
linestyles = {}
linestyles['line'] = get_linestyles(args.linestyle, len(args.spreading))
linestyles['error'] = get_linestyles(args.errorstyle, len(args.spreading),
'dashed')
# Find shift array for synchronisation
shift_array = get_shift(args.spreading, sync=args.sync)
for i, spread_list in enumerate(args.spreading):
spread, data = combine_spread(spread_list, shift=shift_array[i])
# Check if error bars need to be included
error = args.error and len(spread_list) > 1
# Create graph
if args.nomean:
label = labels[i]
for spread_data in data:
domain = spread_data.times
line = spread_data.dist
plot_line(line=line, domain=domain, color=colours[i],
label=label, linestyle=linestyles['line'][i])
label = '_nolegend_'
else:
domain = spread.times
line = spread.dist
plot_line(line=line, domain=domain, color=colours[i],
label=labels[i], linestyle=linestyles['line'][i])
if error:
domain = spread.times
line = list(np.array(spread.dist)
+ np.array(spread.spread['dist']['std_error'])
* args.sigma)
plot_line(line=line, domain=domain, color=colours[i],
linestyle=linestyles['error'][i])
line = list(np.array(spread.dist)
- np.array(spread.spread['dist']['std_error'])
* args.sigma)
plot_line(line=line, domain=domain, color=colours[i],
linestyle=linestyles['error'][i])
plt.title(args.title)
plt.xlabel(args.xlabel)
plt.ylabel(args.ylabel)
plt.axis('normal')
plt.xlim([args.t0, args.tend])
if draw_legend:
plt.legend()
# Finish by saving and / or showing
if args.save:
plt.savefig(args.save)
if args.show:
plt.show()
return None
if __name__ == '__main__':
# Initiate subparsers for operations
parser = argparse.ArgumentParser()
line = parser.add_argument_group(title="Main")
line.add_argument('-f', '--files', dest='spreading', action='append',
nargs='+', metavar="FILES", required=True,
help="list of spreading data files")
line.add_argument('-s', '--save', metavar="PATH",
help="optionally save output image to path")
# --show or --noshow for drawing figure
line_show = line.add_mutually_exclusive_group()
line_show.add_argument('--show', action='store_true', dest='show',
default=True,
help=("show graph (default: true, --noshow to turn off)"))
line_show.add_argument('--noshow', action='store_false', dest='show',
help=argparse.SUPPRESS)
line.add_argument('--sync', choices=['com', 'impact'], default='com',
help="synchronise times of different data to a common distance "
"to the center of mass ('com', default), or to time of impact "
"('impact')")
# Specifially add --nomean to plot
line.add_argument('--nomean', action='store_true',
help="don't take the mean of spread lines, "
"instead draw individually")
# Error plotting
error = parser.add_argument_group(title="Error",
description="options for error of data")
# --error or --noerror for drawing error bars
error_group = error.add_mutually_exclusive_group()
error_group.add_argument('--error', action='store_true', dest='error',
default=True,
help=("show error for lines if multiple files entered "
"(default: true, --noerror to turn off)"))
error_group.add_argument('--noerror', action='store_false',
dest='error', help=argparse.SUPPRESS)
error.add_argument('--sigma', type=float, default=1.,
help="number of standard deviations from mean, or Z-score, "
"for error lines (default: 1)")
# Decoration options
decoration = parser.add_argument_group(title="Graph decoration",
description="options for decorating the graph")
decoration.add_argument('-c', '--colour', action='append', default=[],
help="line colour, add once per line")
decoration.add_argument('-l', '--label', action='append', default=[],
help="line label, add once per line")
decoration.add_argument('--linestyle', action='append', default=[],
choices=['solid', 'dashed', 'dashdot', 'dotted'],
help="line style (default: solid)")
decoration.add_argument('--errorstyle', action='append', default=[],
choices=['solid', 'dashed', 'dashdot', 'dotted'],
help="error line style (default: dashed)")
decoration.add_argument('-t0', type=float, default=None, metavar="TIME",
dest='t0', help="start time of graph")
decoration.add_argument('-tend', type=float, default=None, metavar="TIME",
dest='tend', help="maximum time of graph")
decoration.add_argument('--title',
default="Spreading of droplet on substrate", help="graph title")
decoration.add_argument('--xlabel', metavar="LABEL",
default="Time (ps)", help="label of x axis")
decoration.add_argument('--ylabel', metavar="LABEL",
default="Spreading from center of mass (nm)",
help="label of y axis")
# Parse arguments and call function
args = parser.parse_args()
com_plot(args)
| pjohansson/flowtools | scripts/f_spread_com.py | Python | gpl-3.0 | 6,847 |
# Legobot
# Copyright (C) 2016 Brenton Briggs, Kevin McCabe, and Drew Bronson
import logging
from Legobot.Lego import Lego
logger = logging.getLogger(__name__)
class Help(Lego):
@staticmethod
def listening_for(message):
if message['text'] is not None:
try:
return message['text'].split()[0] == '!help'
except Exception as e:
logger.error(
'Help lego failed to check message text: {0!s}'.format(e))
return False
def handle(self, message):
logger.info(message)
try:
target = message['metadata']['source_channel']
except IndexError:
logger.error('Could not identify message source in message: {0!s}'
.format(str(message)))
try:
function = message['text'].split()[1]
except IndexError:
function = None
baseplate_proxy = self.baseplate.proxy()
legos = baseplate_proxy.children.get()
help_str = 'No help is available. Sorry.'
if not function:
lego_names = []
for lego in legos:
lego_proxy = lego.proxy()
if lego_proxy.get_name().get() is not None:
lego_names.append(lego_proxy.get_name().get())
help_str = 'Available functions: ' + ', '.join(lego_names)
if function:
for lego in legos:
lego_proxy = lego.proxy()
if lego_proxy.get_name().get() == function:
help_str = lego_proxy.get_help().get()
opts = {'target': target}
self.reply(message, help_str, opts=opts)
@staticmethod
def get_name():
return None
| p4rsec/Legobot | Legobot/Legos/Help.py | Python | gpl-2.0 | 1,759 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-13 08:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Cluster',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('clid', models.CharField(max_length=128, null=True)),
('name', models.CharField(max_length=256)),
('discovered', models.DateTimeField(editable=False)),
('updated', models.DateTimeField()),
],
),
migrations.CreateModel(
name='DataCenter',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('dcid', models.CharField(max_length=128, null=True)),
('name', models.CharField(max_length=256)),
('discovered', models.DateTimeField(editable=False)),
('updated', models.DateTimeField()),
],
),
migrations.CreateModel(
name='Manager',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=256)),
('fqdn', models.CharField(max_length=256)),
('url', models.CharField(max_length=512)),
('username', models.CharField(max_length=64)),
('password', models.CharField(max_length=64)),
('version', models.CharField(max_length=16)),
('discovered', models.DateTimeField(editable=False)),
('updated', models.DateTimeField()),
],
),
migrations.CreateModel(
name='StorageDomain',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('sdid', models.CharField(max_length=128)),
('name', models.CharField(max_length=256)),
('type', models.CharField(max_length=16)),
('status', models.CharField(max_length=16)),
],
),
migrations.CreateModel(
name='VM',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=256)),
('vmid', models.CharField(max_length=128)),
('discovered', models.DateTimeField(editable=False)),
('updated', models.DateTimeField()),
('status', models.CharField(max_length=16)),
('protected', models.BooleanField(default=False)),
('last_backup', models.DateTimeField(null=True)),
('cluster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='manager.Cluster')),
],
),
migrations.CreateModel(
name='VmBackups',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=256)),
('export', models.CharField(max_length=64)),
('status', models.IntegerField(default=3)),
('start', models.DateTimeField(null=True)),
('end', models.DateTimeField(null=True)),
('updated', models.DateTimeField(auto_now=True)),
('size', models.IntegerField(default=0)),
('log', models.TextField(null=True)),
('vmid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='manager.VM')),
],
),
migrations.AddField(
model_name='datacenter',
name='manager',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='manager.Manager'),
),
migrations.AddField(
model_name='cluster',
name='dc',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='manager.DataCenter'),
),
]
| openbacchus/bacchus | manager/migrations/0001_initial.py | Python | apache-2.0 | 4,200 |
"""
Module for miscellaneous functions.
"""
import random
from collections import defaultdict
from string import letters
from lxml.builder import ElementMaker
from lxml import etree
def host_and_page(url):
""" Splits a `url` into the hostname and the rest of the url. """
url = url.split('//')[1]
parts = url.split('/')
host = parts[0]
page = "/".join(parts[1:])
return host, '/' + page
def read_from_url(url):
""" GET this `url` and read the response. """
import httplib
host, page = host_and_page(url)
conn = httplib.HTTPConnection(host)
conn.request("GET", page)
response = conn.getresponse()
return response.read()
def write_tmx(stream, sentence_pairs, language_a, language_b):
""" Writes the SentencePair's out in tmx format, """
maker = ElementMaker()
token = "".join(random.sample(letters * 3, 50))
token_a = "".join(random.sample(letters * 3, 50))
token_b = "".join(random.sample(letters * 3, 50))
header = maker.header(srclang=language_a,
segtype="sentence",
creationtool="MTrans",
datatype="PlainText")
stream.write("<?xml version=\"1.0\" ?>\n")
stream.write("<!DOCTYPE tmx SYSTEM \"tmx14.dtd\">\n")
stream.write("<tmx version=\"1.4\">\n")
stream.write(etree.tostring(header, encoding="utf-8"))
stream.write("\n<body>\n")
for sentence_a, sentence_b in sentence_pairs:
src_tuv = maker.tuv({token: language_a}, maker.seg(token_a))
tgt_tuv = maker.tuv({token: language_b}, maker.seg(token_b))
tu = maker.tu(src_tuv, tgt_tuv)
tu_text = etree.tostring(tu, encoding="utf-8",
pretty_print=True)
tu_text = tu_text.replace(token, "xml:lang")
if sentence_a and sentence_b:
tu_text = tu_text.replace(token_a, sentence_a.to_text())
tu_text = tu_text.replace(token_b, sentence_b.to_text())
stream.write(tu_text)
stream.write("</body>\n</tmx>")
class CacheOfSizeOne(object):
""" Function wrapper that provides caching. """
f = None
def __init__(self, f):
self.f = f
self.args = None
self.kwargs = None
def __call__(self, *args, **kwargs):
if args != self.args or kwargs != self.kwargs:
self.result = self.f(*args, **kwargs)
self.args = args
self.kwargs = kwargs
return self.result
def __getattr__(self, name):
return getattr(self.f, name)
class Memoized(defaultdict):
def __missing__(self, key):
x = self.default_factory(key)
self[key] = x
return x
| pombredanne/yalign | yalign/utils.py | Python | bsd-3-clause | 2,689 |
"""runpy.py - locating and running Python code using the module namespace
Provides support for locating and running Python scripts using the Python
module namespace instead of the native filesystem.
This allows Python code to play nicely with non-filesystem based PEP 302
importers when locating support scripts as well as when importing modules.
"""
# Written by Nick Coghlan <ncoghlan at gmail.com>
# to implement PEP 338 (Executing Modules as Scripts)
import sys
import imp
try:
from imp import get_loader
except ImportError:
from pkgutil import get_loader
__all__ = [
"run_module",
]
def _run_code(code, run_globals, init_globals=None,
mod_name=None, mod_fname=None,
mod_loader=None, pkg_name=None):
"""Helper for _run_module_code"""
if init_globals is not None:
run_globals.update(init_globals)
run_globals.update(__name__ = mod_name,
__file__ = mod_fname,
__loader__ = mod_loader,
__package__ = pkg_name)
exec code in run_globals
return run_globals
def _run_module_code(code, init_globals=None,
mod_name=None, mod_fname=None,
mod_loader=None, pkg_name=None):
"""Helper for run_module"""
# Set up the top level namespace dictionary
temp_module = imp.new_module(mod_name)
mod_globals = temp_module.__dict__
# Modify sys.argv[0] and sys.module[mod_name]
saved_argv0 = sys.argv[0]
restore_module = mod_name in sys.modules
if restore_module:
saved_module = sys.modules[mod_name]
sys.argv[0] = mod_fname
sys.modules[mod_name] = temp_module
try:
_run_code(code, mod_globals, init_globals,
mod_name, mod_fname,
mod_loader, pkg_name)
finally:
sys.argv[0] = saved_argv0
if restore_module:
sys.modules[mod_name] = saved_module
else:
del sys.modules[mod_name]
# Copy the globals of the temporary module, as they
# may be cleared when the temporary module goes away
return mod_globals.copy()
# This helper is needed due to a missing component in the PEP 302
# loader protocol (specifically, "get_filename" is non-standard)
def _get_filename(loader, mod_name):
try:
get_filename = loader.get_filename
except AttributeError:
return None
else:
return get_filename(mod_name)
# Helper to get the loader, code and filename for a module
def _get_module_details(mod_name):
loader = get_loader(mod_name)
if loader is None:
raise ImportError("No module named %s" % mod_name)
if loader.is_package(mod_name):
raise ImportError(("%s is a package and cannot " +
"be directly executed") % mod_name)
code = loader.get_code(mod_name)
if code is None:
raise ImportError("No code object available for %s" % mod_name)
filename = _get_filename(loader, mod_name)
return loader, code, filename
# XXX ncoghlan: Should this be documented and made public?
# (Current thoughts: don't repeat the mistake that lead to its
# creation when run_module() no longer met the needs of
# mainmodule.c, but couldn't be changed because it was public)
def _run_module_as_main(mod_name, set_argv0=True):
"""Runs the designated module in the __main__ namespace
These __*__ magic variables will be overwritten:
__file__
__loader__
"""
try:
loader, code, fname = _get_module_details(mod_name)
except ImportError as exc:
# Try to provide a good error message
# for directories, zip files and the -m switch
if set_argv0:
# For -m switch, just disply the exception
info = str(exc)
else:
# For directories/zipfiles, let the user
# know what the code was looking for
info = "can't find '__main__.py' in %r" % sys.argv[0]
msg = "%s: %s" % (sys.executable, info)
sys.exit(msg)
pkg_name = mod_name.rpartition('.')[0]
main_globals = sys.modules["__main__"].__dict__
if set_argv0:
sys.argv[0] = fname
return _run_code(code, main_globals, None,
"__main__", fname, loader, pkg_name)
def run_module(mod_name, init_globals=None,
run_name=None, alter_sys=False):
"""Execute a module's code without importing it
Returns the resulting top level namespace dictionary
"""
loader, code, fname = _get_module_details(mod_name)
if run_name is None:
run_name = mod_name
pkg_name = mod_name.rpartition('.')[0]
if alter_sys:
return _run_module_code(code, init_globals, run_name,
fname, loader, pkg_name)
else:
# Leave the sys module alone
return _run_code(code, {}, init_globals, run_name,
fname, loader, pkg_name)
if __name__ == "__main__":
# Run the module specified as the next command line argument
if len(sys.argv) < 2:
print >> sys.stderr, "No module specified for execution"
else:
del sys.argv[0] # Make the requested module sys.argv[0]
_run_module_as_main(sys.argv[0])
| deanhiller/databus | webapp/play1.3.x/python/Lib/runpy.py | Python | mpl-2.0 | 5,404 |
from autobahn.twisted.websocket import WampWebSocketClientFactory
from autobahn.twisted.websocket import WampWebSocketClientProtocol
from autobahn.twisted.websocket import WebSocketClientProtocol
from autobahn.twisted.websocket import WebSocketClientFactory
from autobahn.twisted.websocket import connectWS
from autobahn.twisted.wamp import ApplicationSessionFactory
from twisted.internet import reactor
#from twisted.python import log
#import sys
#log.startLogging(sys.stdout)
import threading
import Queue
# ----- twisted ----------
class MyAppComponent(ApplicationSession):
def onJoin(self, details):
if not self.factory._myAppSession:
self.factory._myAppSession = self
def onLeave(self, details):
if self.factory._myAppSession == self:
self.factory._myAppSession = None
#-------------------------------------------------------
class _WampClientProtocol(WampWebSocketClientProtocol):
def __init__(self, factory):
self.factory = factory
def onOpen(self):
#log.msg("Client connected")
self.factory.protocol_instance = self
self.factory.base_client._connected_event.set()
#--------------------------------------------------------
class _WampClientFactory(WampWebSocketClientFactory):
def __init__(self, factory, *args, **kwargs):
WampWebSocketClientFactory.__init__(self, factory, *args, **kwargs)
self.protocol_instance = None
self.base_client = None
def buildProtocol(self, addr):
return _WampClientProtocol(self)
#------------------------------------------------------------
'''
class _WebSocketClientProtocol(WebSocketClientProtocol):
def __init__(self, factory):
self.factory = factory
def onOpen(self):
#log.debug("Client connected")
self.factory.protocol_instance = self
self.factory.base_client._connected_event.set()
class _WebSocketClientFactory(WebSocketClientFactory):
def __init__(self, *args, **kwargs):
WebSocketClientFactory.__init__(self, *args, **kwargs)
self.protocol_instance = None
self.base_client = None
def buildProtocol(self, addr):
return _WebSocketClientProtocol(self)
'''
# ------ end twisted -------
class BaseWBClient(object):
def __init__(self, websocket_settings):
#self.settings = websocket_settings
# instance to be set by the own factory
self.factory = None
# this event will be triggered on onOpen()
self._connected_event = threading.Event()
# queue to hold not yet dispatched messages
self._send_queue = Queue.Queue()
self._reactor_thread = None
self.session_factory = ApplicationSessionFactory()
def connect(self):
#log.msg("Connecting to 172.17.3.139:8181")
self.factory = _WampClientFactory(self.session_factory,
"ws://172.17.3.139:8181/ws",
debug_wamp=True)
self.factory.base_client = self
c = connectWS(self.factory)
self._reactor_thread = threading.Thread(target=reactor.run,
args=(False,))
self._reactor_thread.daemon = True
self._reactor_thread.start()
def send_message(self, body):
if not self._check_connection():
return
#log.msg("Queing send")
self._send_queue.put(body)
reactor.callFromThread(self._dispatch)
def _check_connection(self):
if not self._connected_event.wait(timeout=10):
#log.err("Unable to connect to server")
self.close()
return False
return True
def _dispatch(self):
#log.msg("Dispatching")
while True:
try:
body = self._send_queue.get(block=False)
except Queue.Empty:
break
self.factory.protocol_instance.sendMessage(body)
def close(self):
reactor.callFromThread(reactor.stop)
import time
def Ppippo(coda):
while True:
coda.send_message('YOOOOOOOO')
time.sleep(5)
if __name__ == '__main__':
ws_setting = {'host':'172.17.3.139', 'port':8080}
client = BaseWBClient(ws_setting)
t1 = threading.Thread(client.connect())
#t11 = threading.Thread(Ppippo(client))
#t11.start()
t1.start()
#client.connect()
#client.send_message('pippo')
| arduino-org/s4t-iotronic | lib/test_autobahn_wamp_client.py | Python | apache-2.0 | 4,467 |
# Copyright (C) 2010-2018 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Visualization sample for particle dumbbells in the constant-temperature, constant-pressure ensemble.
"""
from __future__ import print_function
import numpy as np
from threading import Thread
import espressomd
from espressomd import thermostat
from espressomd.interactions import HarmonicBond
import espressomd.visualization_opengl
required_features = ["NPT", "LENNARD_JONES"]
espressomd.assert_features(required_features)
box_l = 10
system = espressomd.System(box_l=[box_l] * 3)
system.set_random_state_PRNG()
np.random.seed(seed=system.seed)
visualizer = espressomd.visualization_opengl.openGLLive(
system, background_color=[1, 1, 1], bond_type_radius=[0.2])
system.time_step = 0.0005
system.cell_system.skin = 0.1
system.box_l = [box_l, box_l, box_l]
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=2, sigma=1,
cutoff=3, shift="auto")
system.bonded_inter[0] = HarmonicBond(k=5.0, r_0=1.0)
n_part = 200
for i in range(n_part):
system.part.add(id=i, pos=np.random.random(3) * system.box_l)
for i in range(0, n_part - 1, 2):
system.part[i].add_bond((system.bonded_inter[0], system.part[i + 1].id))
print("E before minimization:", system.analysis.energy()["total"])
system.minimize_energy.init(f_max=0.0, gamma=30.0,
max_steps=10000, max_displacement=0.1)
system.minimize_energy.minimize()
print("E after minimization:", system.analysis.energy()["total"])
system.thermostat.set_npt(kT=2.0, gamma0=1.0, gammav=0.01)
system.integrator.set_isotropic_npt(ext_pressure=1.0, piston=0.01)
def main():
cnt = 0
P = 0
while True:
system.integrator.run(1)
P += system.analysis.pressure()['total']
if cnt > 10000:
print("Pressure:", P / cnt, "Box:", system.box_l)
cnt = 0
P = 0
visualizer.update()
cnt += 1
# Start simulation in seperate thread
t = Thread(target=main)
t.daemon = True
t.start()
visualizer.start()
| hmenke/espresso | samples/visualization_npt.py | Python | gpl-3.0 | 2,684 |
from setuptools import setup, find_packages
def read(filename):
path = os.path.join(os.path.dirname(__file__), filename)
contents = open(path).read()
return contents
setup(
name = 'dtmc',
version = '0.0.1',
description = 'Discrete Time Markov Chain analysis and simulation',
long_description = read('README.rst'),
author = 'Andrew Walker',
author_email = 'walker.ab@gmail.com',
packages = find_packages(),
url = "http://github.com/AndrewWalker/dtmc",
license = "MIT",
classifiers = [
'Development Status :: 1 - Planning',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Mathematics',
]
)
| AndrewWalker/dtmc | setup.py | Python | mit | 788 |
'''
read the input data, parse to int list;
create mappings of user -> reviews, item -> reviews
The companion iterator reads thru the input file sequentially, yielding the data of the form: user word id list, item word id list, rating (float)
@author: roseck
@date Feb 28, 2017
'''
from __builtin__ import dict
import gzip
from DatasetUtils.Review import Review
class DataMgr():
def _int_list(self,int_str):
'''utility fn for converting an int string to a list of int
'''
return [int(w) for w in int_str.split()]
def __init__(self, filename, empty_user = set()):
'''
filename: inits the UBRR data from the input file
empty_user: skip the reviews by this user (keeps the ratings)
'''
self.empty_user = empty_user
ur_map = dict()
br_map = dict()
cnt = 0
skipped = 0
#read the file
if filename.endswith('.gz'):
f = gzip.open(filename, 'r')
else:
f = open(filename, 'r')
for line in f:
vals = line.split("\t")
if len(vals) == 0:
continue
u = vals[0]
b = vals[1]
r = float(vals[2])
d = vals[3].strip()
if u in self.empty_user:
#we are skipping this review
d = ''
skipped += 1
rev = Review(u, b, r, d) #review obj
#store biz -> list of reviews
if not br_map.has_key(b):
br_map[b] = []
br_map[b].append(rev)
#store user -> list of reviews
if not ur_map.has_key(u):
ur_map[u] = []
ur_map[u].append(rev)
cnt += 1
self.biz_map = br_map
self.user_map = ur_map
f.close()
print 'Review Data Manager Initialized with ', cnt, ' reviews'
print 'Number of skipped users = ', len(self.empty_user)
print 'Number of skipped reviews = ', skipped
| rosecatherinek/TransNets | src/DatasetUtils/DataMgr.py | Python | gpl-3.0 | 2,239 |
import logging
import urllib
import os
import sqlaload as sl
from offenesparlament.core import archive_path
log = logging.getLogger(__name__)
def load_document(link):
log.info("Fetching %s...", link)
destination = archive_path('documents', link.split('/dip21/')[-1])
try:
if os.path.isfile(destination):
return
temp = destination + '.tmp'
urllib.urlretrieve(link, temp)
os.rename(temp, destination)
except Exception, ex:
log.exception(ex)
def load_documents(engine):
refs = sl.get_table(engine, 'referenz')
for ref in sl.distinct(engine, refs, 'link'):
link = ref.get('link')
if link is None:
continue
load_document(link)
| bundestag/ablaeufe-scraper | bulk.py | Python | mit | 740 |
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from . import business_requirement
from . import sale_order_line
from . import sale_order
| OCA/business-requirement | business_requirement_sale/models/__init__.py | Python | agpl-3.0 | 156 |
import unittest
from django.db import connection
from django.conf import settings
from django.core.management import call_command
from django.db.models import loading
# Only perform encrypted fields tests if keyczar is present
# Resolves http://github.com/django-extensions/django-extensions/issues/#issue/17
try:
from keyczar import keyczar
from django_extensions.tests.models import Secret
from django_extensions.db.fields.encrypted import EncryptedTextField, EncryptedCharField
keyczar_active = True
except ImportError:
keyczar_active = False
class EncryptedFieldsTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
if keyczar_active:
self.crypt = keyczar.Crypter.Read(settings.ENCRYPTED_FIELD_KEYS_DIR)
super(EncryptedFieldsTestCase, self).__init__(*args, **kwargs)
def setUp(self):
self.old_installed_apps = settings.INSTALLED_APPS
settings.INSTALLED_APPS.append('django_extensions.tests')
loading.cache.loaded = False
call_command('syncdb', verbosity=0)
def tearDown(self):
settings.INSTALLED_APPS = self.old_installed_apps
def testCharFieldCreate(self):
if not keyczar_active:
return
test_val = "Test Secret"
secret = Secret.objects.create(name=test_val)
cursor = connection.cursor()
query = "SELECT name FROM %s WHERE id = %d" % (Secret._meta.db_table, secret.id)
cursor.execute(query)
db_val, = cursor.fetchone()
decrypted_val = self.crypt.Decrypt(db_val[len(EncryptedCharField.prefix):])
self.assertEqual(test_val, decrypted_val)
def testCharFieldRead(self):
if not keyczar_active:
return
test_val = "Test Secret"
secret = Secret.objects.create(name=test_val)
retrieved_secret = Secret.objects.get(id=secret.id)
self.assertEqual(test_val, retrieved_secret.name)
def testTextFieldCreate(self):
if not keyczar_active:
return
test_val = "Test Secret"
secret = Secret.objects.create(text=test_val)
cursor = connection.cursor()
query = "SELECT text FROM %s WHERE id = %d" % (Secret._meta.db_table, secret.id)
cursor.execute(query)
db_val, = cursor.fetchone()
decrypted_val = self.crypt.Decrypt(db_val[len(EncryptedCharField.prefix):])
self.assertEqual(test_val, decrypted_val)
def testTextFieldRead(self):
if not keyczar_active:
return
test_val = "Test Secret"
secret = Secret.objects.create(text=test_val)
retrieved_secret = Secret.objects.get(id=secret.id)
self.assertEqual(test_val, retrieved_secret.text)
| mzdaniel/oh-mainline | vendor/packages/django-extensions/django_extensions/tests/encrypted_fields.py | Python | agpl-3.0 | 2,728 |
# Copyright 2017 Balazs Nemeth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import networkx as nx
import UnifyExceptionTypes as uet
class ChainStub(object):
"""
Provides a common base class for chain types.
"""
# shows the highest integer ID used by any in/output (sub)chain in the system.
current_maximal_chain_id = 0
def __init__(self, chain, link_ids, id=None):
"""
Constructs a chain stub.
:param id: integer identifier of the subchain
:param chain: Node ID list of the request graph showing chain path
:param link_ids: Link ID list of the request graph showing chain path
"""
if id is None:
self.id = ChainStub.current_maximal_chain_id
ChainStub.current_maximal_chain_id += 1
elif id >= ChainStub.current_maximal_chain_id:
ChainStub.current_maximal_chain_id = id + 1
self.id = id
else:
self.id = id
self.chain = chain
self.link_ids = link_ids
@classmethod
def getNextChainID(self):
"""
Maintains the maximal chain ID, can be called if a unique chain ID is needed
:return: unique chain ID
"""
ChainStub.current_maximal_chain_id += 1
return ChainStub.current_maximal_chain_id
def __getitem__ (self, item):
"""
Return the attribute of the element given by ``item``.
:param item: attribute name
:type item: str or int
:return: attribute
:rtype: object
"""
if hasattr(self, item):
return getattr(self, item)
else:
raise KeyError(
"%s object has no key: %s" % (self.__class__.__name__, item))
def __repr__(self):
return "path: %s, id: %s>"%(self.chain, self.id)
class Subchain(ChainStub):
"""
Maintains a structure for handling chain pieces, defining an order of the
greedy backtracking orchestration procedure.
"""
def __init__(self, chain, link_ids, subgraph, id=None):
"""
Constructs a subchain.
:param subgraph: reference to a subgraph of the original resource
:type subgraph: NFFG object
:param id:
:param chain:
:param link_ids:
"""
super(Subchain, self).__init__(chain, link_ids, id)
self.subchain = zip(chain[:-1], chain[1:], link_ids)
self.subgraph = subgraph
def __repr__(self):
return "<Subchain on " + super(Subchain, self).__repr__()
class EndToEndChain(ChainStub):
"""
Maintains a structure for end-to-end chain requirement handling
"""
def __init__(self, chain, link_ids, id, bandwidth, delay):
"""
Constructs an end-to-end subchain.
:param id:
:param chain:
:param link_ids:
:param bandwidth:
:param delay:
"""
super(EndToEndChain, self).__init__(chain, link_ids, id)
self.bandwidth = bandwidth
# special case when 0 delay requirement is used to infer collocation on
# Infra node with 0 delay forwarding, but back and forward stepping may
# introduce some float error
self.delay = delay if delay > 1e-8 else 1e-3
# dynamically incremented/decremented latency during greedy backtrack search
self.avail_latency = delay
def __repr__(self):
return "<E2E chain with delay %s on "%self.delay + \
super(EndToEndChain, self).__repr__()
class ChainSubchainDependencyAdministrator(object):
"""
Administrates the connections between subchains and end-to-end chains.
Subchains are defined as the longest connected pieces in the service graph
which have the set of end-to-end chains going through them.
This bidirectional connection is maintaned as latency budget of the
end-to-end chains are greedily used by the subchains.
"""
def __init__(self, e2e_chains, overall_highest_delay, log):
"""
Constructs the administrator instance. Saves E2E chain ID-s.
"""
self.log = log.getChild(self.__class__.__name__)
self.log.setLevel(log.getEffectiveLevel())
self.chain_subchain = nx.Graph()
self.all_e2e_chain_ids = []
# subchains will be added later with their e2e chain dependency
self.all_subchain_objects = []
for e2e_chain in e2e_chains:
if type(e2e_chain) is EndToEndChain:
if e2e_chain['delay'] is None:
e2e_chain.delay = overall_highest_delay
e2e_chain.avail_latency = overall_highest_delay
self.all_e2e_chain_ids.append(e2e_chain.id)
else:
raise uet.InternalAlgorithmException("Chain - Subchain dependency "
"administrator shouldn't receive non E2E chains: %s"%e2e_chain)
# store the stub object in the NX graphs data
self.chain_subchain.add_nodes_from((e2echain['id'], {'stub': e2echain})
for e2echain in e2e_chains)
def addDependency(self, subchain, e2e_chain_ids):
"""
Adds a dependency of subchain to all the E2E chains stored in e2e_chain_ids.
:param subchain: Subchain object to be added
:param e2e_chain_ids: List of E2E chain IDs
:return:
"""
self.chain_subchain.add_node(subchain.id, {'stub': subchain})
if len(e2e_chain_ids) == 0:
self.log.debug("Adding best effort chain %s to chain - subchain dependency"
" structure without E2E chain dependency!"%subchain)
for cid in e2e_chain_ids:
# the common chain ID for all best-effort subchains
# (self.max_input_chainid) shouldn't be in chainids in any case!
if cid not in self.chain_subchain:
raise uet.InternalAlgorithmException(
"Invalid E2E chain identifier % is referred in E2E chain - "
"subchain dependency!"%cid)
else:
self.chain_subchain.add_edge(cid, subchain.id)
self.all_subchain_objects.append(subchain)
self.log.debug("%s dependency to E2E chain IDs %s are added"%
(subchain, e2e_chain_ids))
def getInvolvedE2EChains(self, subchain_id):
"""
Returns all the E2E chains which uses the subchain with the given ID
:param subchain_id:
:return:
"""
involved_e2e_chains = []
for e2e_chain in self.chain_subchain.neighbors(subchain_id):
if type(self.chain_subchain.node[e2e_chain]['stub']) is not EndToEndChain:
raise uet.InternalAlgorithmException(
"Subchain-subchain connection is not allowed in chain-subchain "
"bipartie graph!")
involved_e2e_chains.append(self.chain_subchain.node[e2e_chain]['stub'])
return involved_e2e_chains
def updateRemainingLatencyInfo(self, subchain_id, used_lat):
"""
Subtracts the used latency from the available latency of all E2E chains
which are involved in this subchain.
:param subchain_id:
:param used_lat:
:return:
"""
for e2e_chain_id in self.chain_subchain.neighbors_iter(subchain_id):
# feasibility already checked by the core algorithm
e2e_chain_object = self.chain_subchain.node[e2e_chain_id]['stub']
e2e_chain_object.avail_latency -= used_lat
new_avail_lat = e2e_chain_object.avail_latency
if new_avail_lat > 1.001 * e2e_chain_object.delay or \
new_avail_lat <= -0.001 * e2e_chain_object.delay:
raise uet.InternalAlgorithmException("MappingManager error: End-to-End"
" available latency cannot "
"exceed maximal permitted or got "
"below zero!")
def getTerminatorAndRequestPathOfStrictestChain(self, subchain_id):
"""
Explores which is the strictest E2E chain's path. Returns the link ID and
the last node ID of the chain in the request graph. If no E2E chains are
involved then return the best effort subchain's path and ending.
:type subchain_id: integer
:param subchain_id: the subchain id
:return:
"""
strictest_cid = subchain_id
# it can be a best-effort subchain, in this case it doesn't have any
# neighbours in the dependency graph
if len(self.chain_subchain[subchain_id].keys()) > 0:
strictest_cid = min(self.chain_subchain[subchain_id].keys(),
key=lambda sc, graph=self.chain_subchain: \
graph.node[sc]['stub'].avail_latency)
# return the last element of that subchain or
# the end of an E2E chain if there was one.
return self.chain_subchain.node[strictest_cid]['stub'].chain[-1], \
self.chain_subchain.node[strictest_cid]['stub'].link_ids
def getSubchains(self):
"""
Returns all subchains which have any dependency to E2E chains.
:return:
"""
return self.all_subchain_objects
def getNextChainID(self):
"""
Accesses the class variable of the maximal chain ID
:param self:
:return:
"""
return ChainStub.getNextChainID()
def getRemainingE2ELatency(self, chain_id):
"""
Returns the the currently remaining latency for the specified E2E chain
:param chain_id:
:return:
"""
if type(self.chain_subchain.node[chain_id]['stub']) is EndToEndChain:
return self.chain_subchain.node[chain_id]['stub'].avail_latency | 5GExchange/mapping | alg1/ServiceChains.py | Python | apache-2.0 | 9,517 |
from django.core.management.base import BaseCommand, CommandError
from first_app.models import Cell
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('wr_sz', type=int)
def handle(self, *args, **options):
wr_sz = options['wr_sz']
for x in range(wr_sz):
for y in range(wr_sz):
cell = Cell()
cell.x = x
cell.y = y
cell.save()
print('World of %s cells succesfully generated' % wr_sz**2)
| CONSOLNY/rglk | first_app/management/commands/genwr.py | Python | mit | 533 |
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Entry point for both build and try bots.
This script is invoked from XXX, usually without arguments
to package an SDK. It automatically determines whether
this SDK is for mac, win, linux.
The script inspects the following environment variables:
BUILDBOT_BUILDERNAME to determine whether the script is run locally
and whether it should upload an SDK to file storage (GSTORE)
"""
# pylint: disable=W0621
# std python includes
import argparse
import datetime
import glob
import os
import re
import sys
if sys.version_info < (2, 7, 0):
sys.stderr.write("python 2.7 or later is required run this script\n")
sys.exit(1)
# local includes
import buildbot_common
import build_projects
import build_updater
import build_version
import generate_notice
import manifest_util
import parse_dsc
import verify_filelist
from build_paths import SCRIPT_DIR, SDK_SRC_DIR, SRC_DIR, NACL_DIR, OUT_DIR
from build_paths import GSTORE, GONACL_APPENGINE_SRC_DIR
# Add SDK make tools scripts to the python path.
sys.path.append(os.path.join(SDK_SRC_DIR, 'tools'))
sys.path.append(os.path.join(NACL_DIR, 'build'))
import getos
import oshelpers
BUILD_DIR = os.path.join(NACL_DIR, 'build')
NACL_TOOLCHAIN_DIR = os.path.join(NACL_DIR, 'toolchain')
NACL_TOOLCHAINTARS_DIR = os.path.join(NACL_TOOLCHAIN_DIR, '.tars')
CYGTAR = os.path.join(BUILD_DIR, 'cygtar.py')
PKGVER = os.path.join(BUILD_DIR, 'package_version', 'package_version.py')
GNBUILD_DIR = 'gnbuild'
options = None
# Map of: ToolchainName: (PackageName, SDKDir, arch).
TOOLCHAIN_PACKAGE_MAP = {
'arm_glibc': ('nacl_arm_glibc', '%(platform)s_arm_glibc', 'arm'),
'x86_glibc': ('nacl_x86_glibc', '%(platform)s_x86_glibc', 'x86'),
'pnacl': ('pnacl_newlib', '%(platform)s_pnacl', 'pnacl')
}
def GetToolchainDirName(tcname):
"""Return the directory name for a given toolchain"""
return TOOLCHAIN_PACKAGE_MAP[tcname][1] % {'platform': getos.GetPlatform()}
def GetToolchainDir(pepperdir, tcname):
"""Return the full path to a given toolchain within a given sdk root"""
return os.path.join(pepperdir, 'toolchain', GetToolchainDirName(tcname))
def GetToolchainLibc(tcname):
if tcname == 'pnacl':
return 'newlib'
for libc in ('glibc', 'newlib', 'host'):
if libc in tcname:
return libc
def GetToolchainNaClInclude(pepperdir, tcname, arch=None):
tcpath = GetToolchainDir(pepperdir, tcname)
if arch is None:
arch = TOOLCHAIN_PACKAGE_MAP[tcname][2]
if arch == 'x86':
return os.path.join(tcpath, 'x86_64-nacl', 'include')
elif arch == 'pnacl':
return os.path.join(tcpath, 'le32-nacl', 'include')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'include')
else:
buildbot_common.ErrorExit('Unknown architecture: %s' % arch)
def GetNinjaOutDir(arch):
return os.path.join(OUT_DIR, GNBUILD_DIR + '-' + arch)
def GetGnBuiltLib(tc, arch):
if 'glibc' in tc:
out_dir = 'glibc_%s' % arch
elif arch == 'pnacl':
out_dir = 'newlib_pnacl'
else:
out_dir = 'clang_newlib_%s' % arch
return os.path.join(GetNinjaOutDir('x64'), out_dir)
def GetToolchainNaClLib(tcname, tcpath, arch):
if arch == 'x86':
return os.path.join(tcpath, 'x86_64-nacl', 'lib32')
elif arch == 'x64':
return os.path.join(tcpath, 'x86_64-nacl', 'lib')
elif arch == 'arm':
return os.path.join(tcpath, 'arm-nacl', 'lib')
elif tcname == 'pnacl':
return os.path.join(tcpath, 'le32-nacl', 'lib')
def GetOutputToolchainLib(pepperdir, tcname, arch):
tcpath = os.path.join(pepperdir, 'toolchain', GetToolchainDirName(tcname))
return GetToolchainNaClLib(tcname, tcpath, arch)
def GetPNaClTranslatorLib(tcpath, arch):
if arch not in ['arm', 'x86', 'x64']:
buildbot_common.ErrorExit('Unknown architecture %s.' % arch)
if arch == 'x86':
arch = 'x86-32'
elif arch == 'x64':
arch = 'x86-64'
return os.path.join(tcpath, 'translator', arch, 'lib')
def BuildStepDownloadToolchains(toolchains):
buildbot_common.BuildStep('Running package_version.py')
args = [sys.executable, PKGVER, '--mode', 'nacl_core_sdk']
args.extend(['sync', '--extract'])
buildbot_common.Run(args, cwd=NACL_DIR)
def BuildStepCleanPepperDirs(pepperdir, pepperdir_old):
buildbot_common.BuildStep('Clean Pepper Dirs')
dirs_to_remove = (
pepperdir,
pepperdir_old,
os.path.join(OUT_DIR, 'arm_trusted')
)
for dirname in dirs_to_remove:
if os.path.exists(dirname):
buildbot_common.RemoveDir(dirname)
buildbot_common.MakeDir(pepperdir)
def BuildStepMakePepperDirs(pepperdir, subdirs):
for subdir in subdirs:
buildbot_common.MakeDir(os.path.join(pepperdir, subdir))
TEXT_FILES = [
'AUTHORS',
'COPYING',
'LICENSE',
'README.Makefiles',
'getting_started/README',
]
def BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision,
nacl_revision):
buildbot_common.BuildStep('Add Text Files')
InstallFiles(SDK_SRC_DIR, pepperdir, TEXT_FILES)
# Replace a few placeholders in README
readme_text = open(os.path.join(SDK_SRC_DIR, 'README')).read()
readme_text = readme_text.replace('${VERSION}', pepper_ver)
readme_text = readme_text.replace('${CHROME_REVISION}', chrome_revision)
readme_text = readme_text.replace('${CHROME_COMMIT_POSITION}',
build_version.ChromeCommitPosition())
readme_text = readme_text.replace('${NACL_REVISION}', nacl_revision)
# Year/Month/Day Hour:Minute:Second
time_format = '%Y/%m/%d %H:%M:%S'
readme_text = readme_text.replace('${DATE}',
datetime.datetime.now().strftime(time_format))
open(os.path.join(pepperdir, 'README'), 'w').write(readme_text)
def BuildStepUntarToolchains(pepperdir, toolchains):
buildbot_common.BuildStep('Untar Toolchains')
platform = getos.GetPlatform()
build_platform = '%s_x86' % platform
tmpdir = os.path.join(OUT_DIR, 'tc_temp')
buildbot_common.RemoveDir(tmpdir)
buildbot_common.MakeDir(tmpdir)
# Create a list of extract packages tuples, the first part should be
# "$PACKAGE_TARGET/$PACKAGE". The second part should be the destination
# directory relative to pepperdir/toolchain.
extract_packages = []
for toolchain in toolchains:
toolchain_map = TOOLCHAIN_PACKAGE_MAP.get(toolchain, None)
if toolchain_map:
package_name, tcdir, _ = toolchain_map
package_tuple = (os.path.join(build_platform, package_name),
tcdir % {'platform': platform})
extract_packages.append(package_tuple)
# On linux we also want to extract the arm_trusted package which contains
# the ARM libraries we ship in support of sel_ldr_arm.
if platform == 'linux':
extract_packages.append((os.path.join(build_platform, 'arm_trusted'),
'arm_trusted'))
if extract_packages:
# Extract all of the packages into the temp directory.
package_names = [package_tuple[0] for package_tuple in extract_packages]
buildbot_common.Run([sys.executable, PKGVER,
'--packages', ','.join(package_names),
'--tar-dir', NACL_TOOLCHAINTARS_DIR,
'--dest-dir', tmpdir,
'extract'])
# Move all the packages we extracted to the correct destination.
for package_name, dest_dir in extract_packages:
full_src_dir = os.path.join(tmpdir, package_name)
full_dst_dir = os.path.join(pepperdir, 'toolchain', dest_dir)
buildbot_common.Move(full_src_dir, full_dst_dir)
# Cleanup the temporary directory we are no longer using.
buildbot_common.RemoveDir(tmpdir)
# List of toolchain headers to install.
# Source is relative to top of Chromium tree, destination is relative
# to the toolchain header directory.
NACL_HEADER_MAP = {
'newlib': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/irt/irt_extension.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/pthread/pthread.h', ''),
('native_client/src/untrusted/pthread/semaphore.h', ''),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
'glibc': [
('native_client/src/include/nacl/nacl_exception.h', 'nacl/'),
('native_client/src/include/nacl/nacl_minidump.h', 'nacl/'),
('native_client/src/untrusted/irt/irt.h', ''),
('native_client/src/untrusted/irt/irt_dev.h', ''),
('native_client/src/untrusted/irt/irt_extension.h', ''),
('native_client/src/untrusted/nacl/nacl_dyncode.h', 'nacl/'),
('native_client/src/untrusted/nacl/nacl_startup.h', 'nacl/'),
('native_client/src/untrusted/valgrind/dynamic_annotations.h', 'nacl/'),
('ppapi/nacl_irt/public/irt_ppapi.h', ''),
],
}
def InstallFiles(src_root, dest_root, file_list):
"""Copy a set of files from src_root to dest_root according
to the given mapping. This allows files to be copied from
to a location in the destination tree that is different to the
location in the source tree.
If the destination mapping ends with a '/' then the destination
basename is inherited from the the source file.
Wildcards can be used in the source list but it is not recommended
as this can end up adding things to the SDK unintentionally.
"""
for file_spec in file_list:
# The list of files to install can be a simple list of
# strings or a list of pairs, where each pair corresponds
# to a mapping from source to destination names.
if isinstance(file_spec, str):
src_file = dest_file = file_spec
else:
src_file, dest_file = file_spec
src_file = os.path.join(src_root, src_file)
# Expand sources files using glob.
sources = glob.glob(src_file)
if not sources:
sources = [src_file]
if len(sources) > 1 and not dest_file.endswith('/'):
buildbot_common.ErrorExit("Target file must end in '/' when "
"using globbing to install multiple files")
for source in sources:
if dest_file.endswith('/'):
dest = os.path.join(dest_file, os.path.basename(source))
else:
dest = dest_file
dest = os.path.join(dest_root, dest)
if not os.path.isdir(os.path.dirname(dest)):
buildbot_common.MakeDir(os.path.dirname(dest))
buildbot_common.CopyFile(source, dest)
def InstallNaClHeaders(tc_dst_inc, tcname):
"""Copies NaCl headers to expected locations in the toolchain."""
InstallFiles(SRC_DIR, tc_dst_inc, NACL_HEADER_MAP[GetToolchainLibc(tcname)])
def GnNinjaInstall(pepperdir, toolchains):
tools_files_x86 = [
['sel_ldr', 'sel_ldr_x86_32'],
]
tools_files_x64 = [
['sel_ldr', 'sel_ldr_x86_64'],
['ncval_new', 'ncval'],
['clang_newlib_arm/elf_loader.nexe', 'elf_loader_arm.nexe'],
['irt_x86/irt_core.nexe', 'irt_core_x86_32.nexe'],
['irt_x64/irt_core.nexe', 'irt_core_x86_64.nexe'],
]
tools_files_arm = []
platform = getos.GetPlatform()
# TODO(binji): dump_syms doesn't currently build on Windows. See
# http://crbug.com/245456
if platform != 'win':
tools_files_x64 += [
['dump_syms', 'dump_syms'],
['minidump_dump', 'minidump_dump'],
['minidump_stackwalk', 'minidump_stackwalk']
]
if platform == 'linux':
tools_files_x86 += [['nonsfi_loader', 'nonsfi_loader_x86_32'],
['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_32']]
tools_files_x64 += [['nacl_helper_bootstrap',
'nacl_helper_bootstrap_x86_64']]
# Add ARM trusted binaries (linux only)
if not options.no_arm_trusted:
tools_files_x64 += [
['irt_arm/irt_core.nexe', 'irt_core_arm.nexe'],
]
tools_files_arm += [
['nacl_helper_bootstrap', 'nacl_helper_bootstrap_arm'],
['nonsfi_loader', 'nonsfi_loader_arm'],
['sel_ldr', 'sel_ldr_arm']
]
tools_dir = os.path.join(pepperdir, 'tools')
buildbot_common.MakeDir(tools_dir)
# Add .exe extensions to all windows tools
for pair in tools_files_x86 + tools_files_x64:
if platform == 'win' and not os.path.splitext(pair[0])[1]:
pair[0] += '.exe'
pair[1] += '.exe'
InstallFiles(GetNinjaOutDir('x64'), tools_dir, tools_files_x64)
InstallFiles(GetNinjaOutDir('x86'), tools_dir, tools_files_x86)
if platform == 'linux':
InstallFiles(GetNinjaOutDir('arm'), tools_dir, tools_files_arm)
stub_dir = os.path.join(SRC_DIR, 'ppapi/native_client/src/untrusted/irt_stub')
for tc in toolchains:
if tc in ('host', 'clang-newlib'):
continue
elif tc == 'pnacl':
xarches = ('pnacl', 'x86', 'x64', 'arm')
elif tc in ('x86_glibc'):
xarches = ('x86', 'x64')
elif tc == 'arm_glibc':
xarches = ('arm',)
else:
raise AssertionError('unexpected toolchain value: %s' % tc)
for xarch in xarches:
src_dir = GetGnBuiltLib(tc, xarch)
src_dir = os.path.join(src_dir, 'obj', 'ppapi', 'native_client', 'src',
'untrusted', 'irt_stub')
dst_dir = GetOutputToolchainLib(pepperdir, tc, xarch)
InstallFiles(src_dir, dst_dir, ['libppapi_stub.a'])
InstallFiles(stub_dir, dst_dir, ['libppapi.a'])
if 'glibc' in tc:
InstallFiles(stub_dir, dst_dir, ['libppapi.so'])
def GnNinjaBuildAll(rel_out_dir):
def MakeNinjaRelPath(suffix):
return os.path.join(os.path.relpath(OUT_DIR, SRC_DIR), rel_out_dir + suffix)
GnNinjaBuild('x64', MakeNinjaRelPath('-x64'), ['nacl_sdk_untrusted=true'])
GnNinjaBuild('x86', MakeNinjaRelPath('-x86'))
if getos.GetPlatform() == 'linux':
GnNinjaBuild('arm', MakeNinjaRelPath('-arm'))
def GetGNExecutable(platform):
# TODO(sbc): Remove this code, which is duplicated from mb.py and simply
# rely on the depot_tools gn wrapper which should be in the PATH.
# http://crbug.com/588794
if platform == 'linux':
subdir, exe = 'linux64', 'gn'
elif platform == 'mac':
subdir, exe = 'mac', 'gn'
else:
subdir, exe = 'win', 'gn.exe'
return os.path.join(SRC_DIR, 'buildtools', subdir, exe)
def GnNinjaBuild(arch, out_dir, extra_gn_args=None):
gn_args = ['is_debug=false']
if extra_gn_args is not None:
gn_args += extra_gn_args
platform = getos.GetPlatform()
if platform == 'mac':
if options.mac_sdk:
gn_args.append('mac_sdk_min="%s"' % options.mac_sdk)
if arch == 'arm':
# Without this the target_cpu='arm' build complains about missing code
# signing identity
gn_args.append('ios_enable_code_signing=false')
gn_exe = GetGNExecutable(platform)
if arch is not None:
gn_args.append('target_cpu="%s"' % arch)
if arch == 'arm':
if options.no_arm_trusted:
gn_args.append('enable_cross_trusted=false')
gn_args = ' '.join(gn_args)
buildbot_common.Run([gn_exe, 'gen', '--args=%s' % gn_args, out_dir],
cwd=SRC_DIR)
buildbot_common.Run(['ninja', '-C', out_dir, 'nacl_core_sdk'], cwd=SRC_DIR)
def BuildStepBuildToolchains(pepperdir, toolchains, build, clean):
buildbot_common.BuildStep('SDK Items')
if clean:
for dirname in glob.glob(os.path.join(OUT_DIR, GNBUILD_DIR + '*')):
buildbot_common.RemoveDir(dirname)
build = True
if build:
GnNinjaBuildAll(GNBUILD_DIR)
GnNinjaInstall(pepperdir, toolchains)
for toolchain in toolchains:
if toolchain not in ('host', 'clang-newlib'):
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, toolchain),
toolchain)
if 'pnacl' in toolchains:
# NOTE: gn build all untrusted code in the x86 build
build_dir = GetNinjaOutDir('x64')
nacl_arches = ['x86', 'x64', 'arm']
for nacl_arch in nacl_arches:
shim_file = os.path.join(build_dir, 'clang_newlib_' + nacl_arch, 'obj',
'ppapi', 'native_client', 'src', 'untrusted',
'pnacl_irt_shim', 'libpnacl_irt_shim.a')
pnacldir = GetToolchainDir(pepperdir, 'pnacl')
pnacl_translator_lib_dir = GetPNaClTranslatorLib(pnacldir, nacl_arch)
if not os.path.isdir(pnacl_translator_lib_dir):
buildbot_common.ErrorExit('Expected %s directory to exist.' %
pnacl_translator_lib_dir)
buildbot_common.CopyFile(shim_file, pnacl_translator_lib_dir)
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, 'pnacl', 'x86'),
'pnacl')
InstallNaClHeaders(GetToolchainNaClInclude(pepperdir, 'pnacl', 'arm'),
'pnacl')
def MakeDirectoryOrClobber(pepperdir, dirname, clobber):
dirpath = os.path.join(pepperdir, dirname)
if clobber:
buildbot_common.RemoveDir(dirpath)
buildbot_common.MakeDir(dirpath)
return dirpath
def BuildStepUpdateHelpers(pepperdir, clobber):
buildbot_common.BuildStep('Update project helpers')
build_projects.UpdateHelpers(pepperdir, clobber=clobber)
def BuildStepUpdateUserProjects(pepperdir, toolchains,
build_experimental, clobber):
buildbot_common.BuildStep('Update examples and libraries')
filters = {}
if not build_experimental:
filters['EXPERIMENTAL'] = False
dsc_toolchains = []
for t in toolchains:
if t.startswith('x86_') or t.startswith('arm_'):
if t[4:] not in dsc_toolchains:
dsc_toolchains.append(t[4:])
elif t == 'host':
dsc_toolchains.append(getos.GetPlatform())
else:
dsc_toolchains.append(t)
filters['TOOLS'] = dsc_toolchains
# Update examples and libraries
filters['DEST'] = [
'getting_started',
'examples/api',
'examples/demo',
'examples/tutorial',
'src'
]
tree = parse_dsc.LoadProjectTree(SDK_SRC_DIR, include=filters)
build_projects.UpdateProjects(pepperdir, tree, clobber=clobber,
toolchains=dsc_toolchains)
def BuildStepMakeAll(pepperdir, directory, step_name,
deps=True, clean=False, config='Debug', args=None):
buildbot_common.BuildStep(step_name)
build_projects.BuildProjectsBranch(pepperdir, directory, clean,
deps, config, args)
def BuildStepBuildLibraries(pepperdir, args=None):
BuildStepMakeAll(pepperdir, 'src', 'Build Libraries Debug',
clean=True, config='Debug', args=args)
BuildStepMakeAll(pepperdir, 'src', 'Build Libraries Release',
clean=True, config='Release', args=args)
# Cleanup .pyc file generated while building libraries. Without
# this we would end up shipping the pyc in the SDK tarball.
buildbot_common.RemoveFile(os.path.join(pepperdir, 'tools', '*.pyc'))
def GenerateNotice(fileroot, output_filename='NOTICE', extra_files=None):
# Look for LICENSE files
license_filenames_re = re.compile('LICENSE|COPYING|COPYRIGHT')
license_files = []
for root, _, files in os.walk(fileroot):
for filename in files:
if license_filenames_re.match(filename):
path = os.path.join(root, filename)
license_files.append(path)
if extra_files:
license_files += [os.path.join(fileroot, f) for f in extra_files]
print '\n'.join(license_files)
if not os.path.isabs(output_filename):
output_filename = os.path.join(fileroot, output_filename)
generate_notice.Generate(output_filename, fileroot, license_files)
def BuildStepVerifyFilelist(pepperdir):
buildbot_common.BuildStep('Verify SDK Files')
file_list_path = os.path.join(SCRIPT_DIR, 'sdk_files.list')
try:
print 'SDK directory: %s' % pepperdir
verify_filelist.Verify(file_list_path, pepperdir)
print 'OK'
except verify_filelist.ParseException, e:
buildbot_common.ErrorExit('Parsing sdk_files.list failed:\n\n%s' % e)
except verify_filelist.VerifyException, e:
file_list_rel = os.path.relpath(file_list_path)
verify_filelist_py = os.path.splitext(verify_filelist.__file__)[0] + '.py'
verify_filelist_py = os.path.relpath(verify_filelist_py)
pepperdir_rel = os.path.relpath(pepperdir)
msg = """\
SDK verification failed:
%s
Add/remove files from %s to fix.
Run:
./%s %s %s
to test.""" % (e, file_list_rel, verify_filelist_py, file_list_rel,
pepperdir_rel)
buildbot_common.ErrorExit(msg)
def BuildStepTarBundle(pepper_ver, tarfile):
buildbot_common.BuildStep('Tar Pepper Bundle')
buildbot_common.MakeDir(os.path.dirname(tarfile))
buildbot_common.Run([sys.executable, CYGTAR, '-C', OUT_DIR, '-cjf', tarfile,
'pepper_' + pepper_ver], cwd=NACL_DIR)
def GetManifestBundle(pepper_ver, chrome_revision, nacl_revision, tarfile,
archive_url):
with open(tarfile, 'rb') as tarfile_stream:
archive_sha1, archive_size = manifest_util.DownloadAndComputeHash(
tarfile_stream)
archive = manifest_util.Archive(manifest_util.GetHostOS())
archive.url = archive_url
archive.size = archive_size
archive.checksum = archive_sha1
bundle = manifest_util.Bundle('pepper_' + pepper_ver)
bundle.revision = int(chrome_revision)
bundle.repath = 'pepper_' + pepper_ver
bundle.version = int(pepper_ver)
bundle.description = (
'Chrome %s bundle. Chrome revision: %s. NaCl revision: %s' % (
pepper_ver, chrome_revision, nacl_revision))
bundle.stability = 'dev'
bundle.recommended = 'no'
bundle.archives = [archive]
return bundle
def Archive(filename, from_directory, step_link=True):
if buildbot_common.IsSDKBuilder():
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk/'
else:
bucket_path = 'nativeclient-mirror/nacl/nacl_sdk_test/'
bucket_path += build_version.ChromeVersion()
buildbot_common.Archive(filename, bucket_path, from_directory, step_link)
def BuildStepArchiveBundle(name, pepper_ver, chrome_revision, nacl_revision,
tarfile):
buildbot_common.BuildStep('Archive %s' % name)
tarname = os.path.basename(tarfile)
tarfile_dir = os.path.dirname(tarfile)
Archive(tarname, tarfile_dir)
# generate "manifest snippet" for this archive.
archive_url = GSTORE + 'nacl_sdk/%s/%s' % (
build_version.ChromeVersion(), tarname)
bundle = GetManifestBundle(pepper_ver, chrome_revision, nacl_revision,
tarfile, archive_url)
manifest_snippet_file = os.path.join(OUT_DIR, tarname + '.json')
with open(manifest_snippet_file, 'wb') as manifest_snippet_stream:
manifest_snippet_stream.write(bundle.GetDataAsString())
Archive(tarname + '.json', OUT_DIR, step_link=False)
def BuildStepBuildPNaClComponent(version, revision):
# Sadly revision can go backwords for a given version since when a version
# is built from master, revision will be a huge number (in the hundreds of
# thousands. Once the branch happens the revision will reset to zero.
# TODO(sbc): figure out how to compensate for this in some way such that
# revisions always go forward for a given version.
buildbot_common.BuildStep('PNaCl Component')
# Version numbers must follow the format specified in:
# https://developer.chrome.com/extensions/manifest/version
# So ensure that rev_major/rev_minor don't overflow and ensure there
# are no leading zeros.
if len(revision) > 4:
rev_minor = int(revision[-4:])
rev_major = int(revision[:-4])
version = "0.%s.%s.%s" % (version, rev_major, rev_minor)
else:
version = "0.%s.0.%s" % (version, revision)
buildbot_common.Run(['./make_pnacl_component.sh',
'pnacl_multicrx_%s.zip' % revision,
version], cwd=SCRIPT_DIR)
def BuildStepArchivePNaClComponent(revision):
buildbot_common.BuildStep('Archive PNaCl Component')
Archive('pnacl_multicrx_%s.zip' % revision, OUT_DIR)
def BuildStepArchiveSDKTools():
buildbot_common.BuildStep('Build SDK Tools')
build_updater.BuildUpdater(OUT_DIR)
buildbot_common.BuildStep('Archive SDK Tools')
Archive('sdk_tools.tgz', OUT_DIR, step_link=False)
Archive('nacl_sdk.zip', OUT_DIR, step_link=False)
def BuildStepBuildAppEngine(pepperdir, chrome_revision):
"""Build the projects found in src/gonacl_appengine/src"""
buildbot_common.BuildStep('Build GoNaCl AppEngine Projects')
cmd = ['make', 'upload', 'REVISION=%s' % chrome_revision]
env = dict(os.environ)
env['NACL_SDK_ROOT'] = pepperdir
env['NACLPORTS_NO_ANNOTATE'] = "1"
buildbot_common.Run(cmd, env=env, cwd=GONACL_APPENGINE_SRC_DIR)
def main(args):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--qemu', help='Add qemu for ARM.',
action='store_true')
parser.add_argument('--tar', help='Force the tar step.',
action='store_true')
parser.add_argument('--archive', help='Force the archive step.',
action='store_true')
parser.add_argument('--release', help='PPAPI release version.',
dest='release', default=None)
parser.add_argument('--build-app-engine',
help='Build AppEngine demos.', action='store_true')
parser.add_argument('--experimental',
help='build experimental examples and libraries', action='store_true',
dest='build_experimental')
parser.add_argument('--skip-toolchain', help='Skip toolchain untar',
action='store_true')
parser.add_argument('--no-clean', dest='clean', action='store_false',
help="Don't clean gn build directories")
parser.add_argument('--mac-sdk',
help='Set the mac-sdk (e.g. 10.6) to use when building with ninja.')
parser.add_argument('--no-arm-trusted', action='store_true',
help='Disable building of ARM trusted components (sel_ldr, etc).')
parser.add_argument('--no-use-sysroot', action='store_true',
help='Disable building against sysroot.')
# To setup bash completion for this command first install optcomplete
# and then add this line to your .bashrc:
# complete -F _optcomplete build_sdk.py
try:
import optcomplete
optcomplete.autocomplete(parser)
except ImportError:
pass
global options
options = parser.parse_args(args)
buildbot_common.BuildStep('build_sdk')
if buildbot_common.IsSDKBuilder():
options.archive = True
# TODO(binji): re-enable app_engine build when the linux builder stops
# breaking when trying to git clone from github.
# See http://crbug.com/412969.
options.build_app_engine = False
options.tar = True
# NOTE: order matters here. This will be the order that is specified in the
# Makefiles; the first toolchain will be the default.
toolchains = ['pnacl', 'x86_glibc', 'arm_glibc', 'clang-newlib', 'host']
print 'Building: ' + ' '.join(toolchains)
platform = getos.GetPlatform()
if options.archive and not options.tar:
parser.error('Incompatible arguments with archive.')
chrome_version = int(build_version.ChromeMajorVersion())
chrome_revision = build_version.ChromeRevision()
nacl_revision = build_version.NaClRevision()
pepper_ver = str(chrome_version)
pepper_old = str(chrome_version - 1)
pepperdir = os.path.join(OUT_DIR, 'pepper_' + pepper_ver)
pepperdir_old = os.path.join(OUT_DIR, 'pepper_' + pepper_old)
tarname = 'naclsdk_%s.tar.bz2' % platform
tarfile = os.path.join(OUT_DIR, tarname)
if options.release:
pepper_ver = options.release
print 'Building PEPPER %s at %s' % (pepper_ver, chrome_revision)
if 'NACL_SDK_ROOT' in os.environ:
# We don't want the currently configured NACL_SDK_ROOT to have any effect
# of the build.
del os.environ['NACL_SDK_ROOT']
if platform == 'linux':
# Linux-only: make sure the debian/stable sysroot image is installed
install_script = os.path.join(SRC_DIR, 'build', 'linux', 'sysroot_scripts',
'install-sysroot.py')
buildbot_common.Run([sys.executable, install_script, '--arch=arm'])
buildbot_common.Run([sys.executable, install_script, '--arch=i386'])
buildbot_common.Run([sys.executable, install_script, '--arch=amd64'])
if not options.skip_toolchain:
BuildStepCleanPepperDirs(pepperdir, pepperdir_old)
BuildStepMakePepperDirs(pepperdir, ['include', 'toolchain', 'tools'])
BuildStepDownloadToolchains(toolchains)
BuildStepUntarToolchains(pepperdir, toolchains)
if platform == 'linux':
buildbot_common.Move(os.path.join(pepperdir, 'toolchain', 'arm_trusted'),
os.path.join(OUT_DIR, 'arm_trusted'))
if platform == 'linux':
# Linux-only: Copy arm libraries from the arm_trusted package. These are
# needed to be able to run sel_ldr_arm under qemu.
arm_libs = [
'lib/arm-linux-gnueabihf/librt.so.1',
'lib/arm-linux-gnueabihf/libdl.so.2',
'lib/arm-linux-gnueabihf/libpthread.so.0',
'lib/arm-linux-gnueabihf/libgcc_s.so.1',
'lib/arm-linux-gnueabihf/libc.so.6',
'lib/arm-linux-gnueabihf/ld-linux-armhf.so.3',
'lib/arm-linux-gnueabihf/libm.so.6',
]
arm_lib_dir = os.path.join(pepperdir, 'tools', 'lib', 'arm_trusted', 'lib')
buildbot_common.MakeDir(arm_lib_dir)
for arm_lib in arm_libs:
arm_lib = os.path.join(OUT_DIR, 'arm_trusted', arm_lib)
buildbot_common.CopyFile(arm_lib, arm_lib_dir)
buildbot_common.CopyFile(os.path.join(OUT_DIR, 'arm_trusted', 'qemu-arm'),
os.path.join(pepperdir, 'tools'))
BuildStepBuildToolchains(pepperdir, toolchains,
not options.skip_toolchain,
options.clean)
BuildStepUpdateHelpers(pepperdir, True)
BuildStepUpdateUserProjects(pepperdir, toolchains,
options.build_experimental, True)
BuildStepCopyTextFiles(pepperdir, pepper_ver, chrome_revision, nacl_revision)
# Ship with libraries prebuilt, so run that first.
BuildStepBuildLibraries(pepperdir)
GenerateNotice(pepperdir)
# Verify the SDK contains what we expect.
BuildStepVerifyFilelist(pepperdir)
if options.tar:
BuildStepTarBundle(pepper_ver, tarfile)
if platform == 'linux':
BuildStepBuildPNaClComponent(pepper_ver, chrome_revision)
if options.build_app_engine and platform == 'linux':
BuildStepBuildAppEngine(pepperdir, chrome_revision)
if options.qemu:
qemudir = os.path.join(NACL_DIR, 'toolchain', 'linux_arm-trusted')
oshelpers.Copy(['-r', qemudir, pepperdir])
# Archive the results on Google Cloud Storage.
if options.archive:
BuildStepArchiveBundle('build', pepper_ver, chrome_revision, nacl_revision,
tarfile)
# Only archive sdk_tools/naclport/pnacl_component on linux.
if platform == 'linux':
BuildStepArchiveSDKTools()
BuildStepArchivePNaClComponent(chrome_revision)
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
buildbot_common.ErrorExit('build_sdk: interrupted')
| nwjs/chromium.src | native_client_sdk/src/build_tools/build_sdk.py | Python | bsd-3-clause | 31,155 |
GUIDE_NAME = 'plum'
class Plum(object):
def __init__(self, glyph):
self.glyph = glyph
def toggle(self):
if self.plum_guide:
self.destroy()
else:
self.create()
def create(self):
if self.glyph_exists:
self.glyph.addGuide(self.position, self.angle, name=GUIDE_NAME)
def destroy(self):
if self.glyph_exists:
self.glyph.removeGuide(self.plum_guide)
def update(self):
if self.glyph_exists:
guide = self.plum_guide
if guide:
guide.x = self.horizontal_center
@property
def guides(self):
return self.glyph.guides if self.glyph_exists else []
@property
def plum_guides(self):
return (guide for guide in self.guides if guide.name is GUIDE_NAME)
@property
def plum_guide(self):
return next(self.plum_guides, None)
@property
def angle(self):
return 90 + (self.font.info.italicAngle or 0)
@property
def position(self):
return (self.glyph.width / 2, 0)
@property
def horizontal_center(self):
return self.position[0]
@property
def font(self):
return self.glyph.getParent()
@property
def glyph_exists(self):
return self.glyph is not None
| jackjennings/Plum | Plum.roboFontExt/lib/plum/plum.py | Python | mit | 1,320 |
#!/usr/bin/env python
'''
Copyright (C) 2015, Digium, Inc.
Ashley Sanders <asanders@digium.com>
This program is free software, distributed under the terms of
the GNU General Public License Version 2.
'''
import logging
from sipp_scenario import SIPpScenarioWrapper
from twisted.internet import defer
LOGGER = logging.getLogger(__name__)
def get_friendly_scenario_type(scenario_type):
"""Returns the logger friendly name for the given scenario type.
Keyword Arguments:
scenario_type -- The type of scenario.
"""
if scenario_type == 'SIPpScenarioWrapper':
return 'SIPp scenario'
return 'Unknown type scenario'
class TcpAuthLimitTestModule(object):
"""The test module.
This class serves as a harness for the test scenarios. It manages the
life-cycle of the the objects needed to execute the test plan.
"""
def __init__(self, config, test_object):
"""Constructor.
Keyword Arguments:
config -- The YAML configuration for this test.
test_object -- The TestCaseModule instance for this test.
"""
self.__test_object = test_object
self.__remote_host = config['remote-host'][0]
self.__tcpauthlimit = config['tcpauthlimit']
self.__scenarios = self.__build_scenarios(config['test-scenarios'])
self.__test_object.register_stop_observer(self.__on_asterisk_stop)
self.__test_object.register_ami_observer(self.__on_ami_connected)
def __build_scenarios(self, config_scenarios):
"""Builds the scenarios.
Keyword Arguments:
config_scenarios -- The test-scenarios section from the YAML
configuration.
Returns:
A list of scenarios on success. None on error.
"""
scenarios = list()
msg = '{0} Building test scenarios.'
LOGGER.debug(msg.format(self))
remote_address = self.__remote_host['address']
remote_port = self.__remote_host['port']
tcpauthlimit = self.__tcpauthlimit
for config_scenario in config_scenarios:
scenario_type = config_scenario['type']
scenario_id = config_scenario.get('scenario-id') or None
if scenario_type.lower() == 'sipp-scenario':
key_args = config_scenario['key-args']
ordered_args = config_scenario.get('ordered-args') or []
target = config_scenario.get('target') or remote_address
scenario = SIPpScenarioWrapper(self.__test_object.test_name,
key_args,
ordered_args,
target,
scenario_id)
else:
msg = '{0} [{1}] is not a recognized scenario type.'
LOGGER.error(msg.format(self, scenario_type))
return None
scenarios.append(scenario)
if len(scenarios) == 0:
msg = '{0} Failing the test. No scenarios registered.'
LOGGER.error(msg.format(self))
self.__test_object.set_passed(False)
self.__test_object.stop_reactor()
return scenarios
def __evaluate_scenario_results(self, scenario_type):
"""Evaluates the results for the given scenario type.
For SIPpScenarioWrapper scenario type evaluations, the scenarios are
polled to determine if the number of those scenarios that passed equals
the tcpauthlimit (maximum number of connections permitted). Because
more scenarios are executed than the number of connections permitted,
some of these scenarios are expected to fail.
Keyword Arguments:
scenario_type -- The type of scenario instances to analyze.
Returns True on success, False otherwise.
"""
def __get_scenarios(scenario_type):
"""Creates a scenario generator for the given scenario type.
Keyword Arguments:
scenario_type -- The type of scenario instance for the
generator to return.
Returns a generator for the scenarios found matching the given
scenario type.
"""
for scenario in self.__scenarios:
if scenario.__class__.__name__ == scenario_type:
yield scenario
friendly_type = get_friendly_scenario_type(scenario_type)
scenario_count = sum(1 for s in __get_scenarios(scenario_type))
msg = '{0} Evaluating {1} results...'.format(self, friendly_type)
LOGGER.debug(msg)
if scenario_count == 0:
msg = '{0} No {1} results to evaluate.'
LOGGER.debug(msg.format(self, friendly_type))
return True
else:
actual = 0
msg = '{0} {1} \'{2}\' {3}.'
if scenario_type == 'SIPpScenarioWrapper':
expected = (
scenario_count if scenario_count < self.__tcpauthlimit
else self.__tcpauthlimit)
else:
expected = 1
for scenario in __get_scenarios(scenario_type):
if scenario.passed:
actual += 1
if scenario_type == 'SIPpScenarioWrapper':
if actual <= expected:
scenario.adjust_result()
else:
scenario.adjust_result(255)
if scenario.passed:
LOGGER.debug(msg.format(self,
friendly_type,
scenario.scenario_id,
'passed'))
else:
LOGGER.debug(msg.format(self,
friendly_type,
scenario.scenario_id,
'failed'))
if actual != expected:
msg = '{0} One or more {1}s failed.'
LOGGER.error(msg.format(self, friendly_type))
return False
msg = '{0} All {1}s passed.'
LOGGER.debug(msg.format(self, friendly_type))
return True
def __evaluate_test_results(self):
"""Evaluates the test results.
First, the method analyzes the SIPpScenarioWrapper instances (if any)
then analyzes the remaining TCP client scenarios (if any).
Returns True on success, False otherwise.
"""
LOGGER.debug('{0} Evaluating test results...'.format(self))
return self.__evaluate_scenario_results('SIPpScenarioWrapper')
def __format__(self, format_spec):
"""Overrides default format handling for 'self'."""
return self.__class__.__name__ + ':'
def __on_ami_connected(self, ami):
"""Handler for the AMI connect event.
Keyword Arguments:
ami -- The AMI instance that raised this event.
"""
self.__run_scenarios()
def __on_asterisk_stop(self, result):
"""Determines the overall pass/fail state for the test prior to
shutting down the reactor.
Keyword Arguments:
result -- A twisted deferred instance.
Returns:
A twisted deferred instance.
"""
self.__test_object.set_passed(self.__evaluate_test_results())
msg = '{0} Test {1}.'
if self.__test_object.passed:
LOGGER.info(msg.format(self, 'passed'))
else:
LOGGER.error(msg.format(self, 'failed'))
return result
def __run_scenarios(self):
"""Executes the scenarios."""
def __tear_down_test(message):
"""Tears down the test.
Keyword Arguments:
message -- The event payload.
"""
LOGGER.debug('{0} Stopping reactor.'.format(self))
self.__test_object.stop_reactor()
return message
LOGGER.debug('{0} Running test scenarios.'.format(self))
deferreds = []
for scenario in self.__scenarios:
deferred = scenario.run()
deferreds.append(deferred)
defer.DeferredList(deferreds).addCallback(__tear_down_test)
| asterisk/testsuite | tests/channels/SIP/tcpauthlimit/tcpauthlimit.py | Python | gpl-2.0 | 8,489 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dataset', '0040_auto_20151216_2203'),
('log_parse', '0002_auto_20160120_2149'),
]
operations = [
migrations.AddField(
model_name='s3file',
name='dataset',
field=models.ForeignKey(to='dataset.Dataset', null=True),
),
]
| poldracklab/open_fmri | open_fmri/apps/log_parse/migrations/0003_s3file_dataset.py | Python | bsd-3-clause | 470 |
from datetime import datetime
import unittest
from emma import exceptions as ex
from emma.model.account import Account
from emma.model.trigger import Trigger
from emma.model import SERIALIZED_DATETIME_FORMAT
from emma.model.mailing import Mailing
from tests.model import MockAdapter
class TriggerTest(unittest.TestCase):
def setUp(self):
Account.default_adapter = MockAdapter
self.trigger = Trigger(
Account(account_id="100", public_key="xxx", private_key="yyy"),
{
'trigger_id':200,
'start_ts': datetime.now().strftime(SERIALIZED_DATETIME_FORMAT),
'deleted_at':None,
'parent_mailing': {
'mailing_type': "m",
'send_started': None,
'signup_form_id': None,
'mailing_id': 1024,
'plaintext': "Hello [% member:first_name %]!",
'recipient_count': 0,
'year': None,
'account_id': 100,
'month': None,
'disabled': False,
'parent_mailing_id': None,
'started_or_finished': None,
'name': "Sample Mailing",
'mailing_status': "c",
'plaintext_only': False,
'sender': "Test Sender",
'send_finished': None,
'send_at': None,
'reply_to': None,
'subject': "Parent Mailing",
'archived_ts': None,
'html_body': "<p>Hello [% member:first_name %]!</p>"
}
}
)
def test_can_parse_special_fields_correctly(self):
self.assertIsInstance(self.trigger['start_ts'], datetime)
self.assertIsInstance(self.trigger['parent_mailing'], Mailing)
self.assertIsNone(self.trigger.get('deleted_at'))
def test_can_delete_a_trigger(self):
del(self.trigger['trigger_id'])
with self.assertRaises(ex.NoTriggerIdError):
self.trigger.delete()
self.assertEquals(self.trigger.account.adapter.called, 0)
self.assertFalse(self.trigger.is_deleted())
def test_can_delete_a_trigger2(self):
self.trigger['deleted_at'] = datetime.now()
result = self.trigger.delete()
self.assertIsNone(result)
self.assertEquals(self.trigger.account.adapter.called, 0)
self.assertTrue(self.trigger.is_deleted())
def test_can_delete_a_trigger3(self):
MockAdapter.expected = True
result = self.trigger.delete()
self.assertIsNone(result)
self.assertEquals(self.trigger.account.adapter.called, 1)
self.assertEquals(
self.trigger.account.adapter.call,
('DELETE', '/triggers/200', {}))
self.assertTrue(self.trigger.is_deleted())
def test_can_save_a_trigger(self):
trigger = Trigger(
self.trigger.account,
{'name':u"Test Trigger"}
)
MockAdapter.expected = 1024
result = trigger.save()
self.assertIsNone(result)
self.assertEquals(trigger.account.adapter.called, 1)
self.assertEquals(
trigger.account.adapter.call,
(
'POST',
'/triggers',
{
'name': u"Test Trigger"
}))
self.assertEquals(1024, trigger['trigger_id'])
def test_can_save_a_trigger2(self):
MockAdapter.expected = True
self.trigger['name'] = u"Renamed Trigger"
result = self.trigger.save()
self.assertIsNone(result)
self.assertEquals(self.trigger.account.adapter.called, 1)
self.assertEquals(
self.trigger.account.adapter.call,
('PUT', '/triggers/200', {'name': u"Renamed Trigger"}))
class TriggerMailingCollectionTest(unittest.TestCase):
def setUp(self):
Account.default_adapter = MockAdapter
self.mailings = Trigger(
Account(account_id="100", public_key="xxx", private_key="yyy"),
{'trigger_id': 1024}
).mailings
def test_can_fetch_all_mailings(self):
del(self.mailings.trigger['trigger_id'])
with self.assertRaises(ex.NoSearchIdError):
self.mailings.fetch_all()
self.assertEquals(self.mailings.trigger.account.adapter.called, 0)
def test_can_fetch_all_mailings2(self):
# Setup
MockAdapter.expected = [
{'mailing_id': 200},
{'mailing_id': 201},
{'mailing_id': 202}
]
mailings = self.mailings.fetch_all()
self.assertEquals(self.mailings.trigger.account.adapter.called, 1)
self.assertEquals(
self.mailings.trigger.account.adapter.call,
('GET', '/triggers/1024/mailings', {}))
self.assertIsInstance(mailings, dict)
self.assertEquals(3, len(mailings))
self.assertEquals(3, len(self.mailings))
self.assertIsInstance(self.mailings[200], Mailing)
self.assertIsInstance(self.mailings[201], Mailing)
self.assertIsInstance(self.mailings[202], Mailing)
| myemma/EmmaPython | tests/model/trigger_test.py | Python | mit | 5,248 |
import unittest
from streamlink.plugins.gulli import Gulli
class TestPluginGulli(unittest.TestCase):
def test_can_handle_url(self):
# should match
self.assertTrue(Gulli.can_handle_url("http://replay.gulli.fr/Direct"))
self.assertTrue(Gulli.can_handle_url("http://replay.gulli.fr/dessins-animes/My-Little-Pony-les-amies-c-est-magique/VOD68328764799000"))
self.assertTrue(Gulli.can_handle_url("https://replay.gulli.fr/emissions/In-Ze-Boite2/VOD68639028668000"))
self.assertTrue(Gulli.can_handle_url("https://replay.gulli.fr/series/Power-Rangers-Dino-Super-Charge/VOD68612908435000"))
# shouldn't match
self.assertFalse(Gulli.can_handle_url("http://replay.gulli.fr/"))
self.assertFalse(Gulli.can_handle_url("http://replay.gulli.fr/dessins-animes"))
self.assertFalse(Gulli.can_handle_url("http://replay.gulli.fr/emissions"))
self.assertFalse(Gulli.can_handle_url("http://replay.gulli.fr/series"))
self.assertFalse(Gulli.can_handle_url("http://www.tvcatchup.com/"))
self.assertFalse(Gulli.can_handle_url("http://www.youtube.com/"))
| wlerin/streamlink | tests/plugins/test_gulli.py | Python | bsd-2-clause | 1,131 |
import psycopg2
from utils.db_connection import get_connection,close_connection
def main():
"""Creates the main polygon and
point geographies tables
"""
conn = get_connection()
cur = conn.cursor()
statements = [
"""DROP TABLE IF EXISTS polygon_geographies;""",
"""DROP TABLE IF EXISTS point_geographies;"""]
for statement in statements:
cur.execute(statement)
conn.commit()
close_connection(conn,cur)
if __name__ == '__main__':
main() | UrbanCCD-UChicago/sustainableSystems | src/drop_geographies.py | Python | mit | 456 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-12-18 00:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0014_auto_20171218_0008'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='tracked_gyms',
field=models.ManyToManyField(blank=True, to='app.Gym'),
),
]
| Gimpneek/exclusive-raid-gym-tracker | app/migrations/0015_auto_20171218_0008.py | Python | gpl-3.0 | 471 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2018-12-16 12:23
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
('server', '0028_auto_20181211_1333'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='member_id',
field=models.CharField(help_text='Format:sss-oo-mmmmmm s=Sektionsnummer(008) o=Ortsgruppe(00|01) m=Mitgliedsnummer', max_length=13, null=True, unique=True, blank=True, validators=[django.core.validators.RegexValidator(re.compile('\\d{3}-\\d{2}-\\d{6}', 32), 'Bitte auf den richtigen Aufbau achten')], verbose_name='MitgliedsNr'),
),
]
| wodo/WebTool3 | webtool/server/migrations/0029_auto_20181216_1323.py | Python | bsd-2-clause | 785 |
# pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow pandas.DataFrame."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
PANDAS_DTYPES = {
'int8': 'int',
'int16': 'int',
'int32': 'int',
'int64': 'int',
'uint8': 'int',
'uint16': 'int',
'uint32': 'int',
'uint64': 'int',
'float16': 'float',
'float32': 'float',
'float64': 'float',
'bool': 'i'
}
def extract_pandas_data(data):
"""Extract data from pandas.DataFrame for predictors."""
if not isinstance(data, pd.DataFrame):
return data
if all(dtype.name in PANDAS_DTYPES for dtype in data.dtypes):
return data.values.astype('float')
else:
raise ValueError('Data types for data must be int, float, or bool.')
def extract_pandas_matrix(data):
"""Extracts numpy matrix from pandas DataFrame."""
if not isinstance(data, pd.DataFrame):
return data
return data.as_matrix()
def extract_pandas_labels(labels):
"""Extract data from pandas.DataFrame for labels."""
if isinstance(labels,
pd.DataFrame): # pandas.Series also belongs to DataFrame
if len(labels.columns) > 1:
raise ValueError('Only one column for labels is allowed.')
if all(dtype.name in PANDAS_DTYPES for dtype in labels.dtypes):
return labels.values.astype('float')
else:
raise ValueError('Data types for labels must be int, float, or bool.')
else:
return labels
| sachinpro/sachinpro.github.io | tensorflow/contrib/learn/python/learn/io/pandas_io.py | Python | apache-2.0 | 2,295 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.